summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h14
-rw-r--r--include/acpi/cppc_acpi.h23
-rw-r--r--include/acpi/ghes.h11
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h2
-rw-r--r--include/asm-generic/bitsperlong.h9
-rw-r--r--include/asm-generic/codetag.lds.h2
-rw-r--r--include/asm-generic/futex.h4
-rw-r--r--include/asm-generic/thread_info_tif.h5
-rw-r--r--include/asm-generic/tlb.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h25
-rw-r--r--include/asm-generic/xor.h738
-rw-r--r--include/crypto/acompress.h5
-rw-r--r--include/crypto/aes-cbc-macs.h154
-rw-r--r--include/crypto/aes.h66
-rw-r--r--include/crypto/chacha20poly1305.h2
-rw-r--r--include/crypto/cryptd.h33
-rw-r--r--include/crypto/des.h8
-rw-r--r--include/crypto/gcm.h4
-rw-r--r--include/crypto/gf128hash.h (renamed from include/crypto/polyval.h)126
-rw-r--r--include/crypto/gf128mul.h17
-rw-r--r--include/crypto/ghash.h12
-rw-r--r--include/crypto/hkdf.h20
-rw-r--r--include/crypto/if_alg.h5
-rw-r--r--include/crypto/internal/acompress.h1
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/ecc.h22
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/internal/scompress.h1
-rw-r--r--include/crypto/internal/simd.h19
-rw-r--r--include/crypto/rng.h25
-rw-r--r--include/crypto/skcipher.h1
-rw-r--r--include/crypto/sm3.h85
-rw-r--r--include/crypto/sm3_base.h82
-rw-r--r--include/cxl/cxl.h226
-rw-r--r--include/drm/bridge/dw_dp.h7
-rw-r--r--include/drm/bridge/dw_hdmi_qp.h2
-rw-r--r--include/drm/display/drm_hdmi_helper.h3
-rw-r--r--include/drm/drm_atomic.h20
-rw-r--r--include/drm/drm_atomic_state_helper.h3
-rw-r--r--include/drm/drm_blend.h4
-rw-r--r--include/drm/drm_bridge.h6
-rw-r--r--include/drm/drm_buddy.h163
-rw-r--r--include/drm/drm_client.h3
-rw-r--r--include/drm/drm_colorop.h32
-rw-r--r--include/drm/drm_connector.h49
-rw-r--r--include/drm/drm_crtc.h12
-rw-r--r--include/drm/drm_fb_helper.h107
-rw-r--r--include/drm/drm_mipi_dbi.h145
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_mode_config.h9
-rw-r--r--include/drm/drm_pagemap.h21
-rw-r--r--include/drm/drm_ras.h75
-rw-r--r--include/drm/drm_ras_genl_family.h17
-rw-r--r--include/drm/drm_simple_kms_helper.h216
-rw-r--r--include/drm/drm_suballoc.h6
-rw-r--r--include/drm/intel/display_parent_interface.h108
-rw-r--r--include/drm/intel/i915_drm.h82
-rw-r--r--include/drm/intel/intel_gmd_interrupt_regs.h92
-rw-r--r--include/drm/intel/intel_gmd_misc_regs.h21
-rw-r--r--include/drm/intel/intel_pcode_regs.h108
-rw-r--r--include/drm/intel/pciids.h12
-rw-r--r--include/drm/intel/pick.h51
-rw-r--r--include/drm/intel/reg_bits.h139
-rw-r--r--include/drm/intel/step.h62
-rw-r--r--include/drm/intel/xe_sriov_vfio.h11
-rw-r--r--include/drm/ttm/ttm_backup.h2
-rw-r--r--include/drm/ttm/ttm_bo.h28
-rw-r--r--include/drm/ttm/ttm_pool.h7
-rw-r--r--include/dt-bindings/arm/qcom,ids.h10
-rw-r--r--include/dt-bindings/clock/axis,artpec9-clk.h195
-rw-r--r--include/dt-bindings/clock/mobileye,eyeq6lplus-clk.h37
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6125.h6
-rw-r--r--include/dt-bindings/clock/qcom,eliza-gcc.h210
-rw-r--r--include/dt-bindings/clock/qcom,eliza-tcsr.h17
-rw-r--r--include/dt-bindings/clock/qcom,ipq5210-gcc.h126
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-dispcc.h7
-rw-r--r--include/dt-bindings/clock/renesas,r9a08g046-cpg.h342
-rw-r--r--include/dt-bindings/clock/rockchip,rv1103b-cru.h220
-rw-r--r--include/dt-bindings/interconnect/qcom,eliza-rpmh.h136
-rw-r--r--include/dt-bindings/power/allwinner,sun60i-a733-pck-600.h18
-rw-r--r--include/dt-bindings/power/marvell,pxa1908-power.h1
-rw-r--r--include/dt-bindings/power/mediatek,mt8189-power.h38
-rw-r--r--include/dt-bindings/power/mt7622-power.h1
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h12
-rw-r--r--include/dt-bindings/reset/cix,sky1-s5-system-control.h163
-rw-r--r--include/dt-bindings/reset/cix,sky1-system-control.h41
-rw-r--r--include/dt-bindings/reset/qcom,ipq5210-gcc.h127
-rw-r--r--include/dt-bindings/reset/spacemit,k3-resets.h48
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h12
-rw-r--r--include/hyperv/hvgdk_mini.h6
-rw-r--r--include/hyperv/hvhdk.h4
-rw-r--r--include/kvm/arm_arch_timer.h8
-rw-r--r--include/kvm/arm_pmu.h5
-rw-r--r--include/kvm/arm_vgic.h191
-rw-r--r--include/linux/acpi.h21
-rw-r--r--include/linux/arm_mpam.h32
-rw-r--r--include/linux/atmdev.h6
-rw-r--r--include/linux/audit.h11
-rw-r--r--include/linux/auxiliary_bus.h2
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backing-dev.h13
-rw-r--r--include/linux/backing-file.h4
-rw-r--r--include/linux/bio-integrity.h12
-rw-r--r--include/linux/bio.h7
-rw-r--r--include/linux/bitmap.h48
-rw-r--r--include/linux/bitops.h12
-rw-r--r--include/linux/blk-integrity.h29
-rw-r--r--include/linux/blkdev.h50
-rw-r--r--include/linux/bnxt/ulp.h144
-rw-r--r--include/linux/bootconfig.h6
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h16
-rw-r--r--include/linux/bpf_local_storage.h15
-rw-r--r--include/linux/bpf_verifier.h432
-rw-r--r--include/linux/brcmphy.h3
-rw-r--r--include/linux/bsg.h6
-rw-r--r--include/linux/btf_ids.h2
-rw-r--r--include/linux/buffer_head.h29
-rw-r--r--include/linux/bus/stm32_firewall.h83
-rw-r--r--include/linux/bus/stm32_firewall_device.h26
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/cgroup-defs.h5
-rw-r--r--include/linux/cgroup.h65
-rw-r--r--include/linux/cleanup.h19
-rw-r--r--include/linux/clockchips.h12
-rw-r--r--include/linux/clocksource.h33
-rw-r--r--include/linux/cma.h10
-rw-r--r--include/linux/compiler-context-analysis.h32
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/compiler_types.h9
-rw-r--r--include/linux/console_struct.h33
-rw-r--r--include/linux/coreboot.h90
-rw-r--r--include/linux/count_zeros.h13
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpufreq.h11
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/crash_core.h14
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/damon.h18
-rw-r--r--include/linux/dax.h4
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/device-mapper.h7
-rw-r--r--include/linux/device.h51
-rw-r--r--include/linux/device/class.h10
-rw-r--r--include/linux/dma-buf.h17
-rw-r--r--include/linux/dma-buf/heaps/cma.h16
-rw-r--r--include/linux/dma-fence-array.h1
-rw-r--r--include/linux/dma-fence-chain.h1
-rw-r--r--include/linux/dma-fence.h97
-rw-r--r--include/linux/dma-map-ops.h23
-rw-r--r--include/linux/dma-mapping.h12
-rw-r--r--include/linux/dma/edma.h7
-rw-r--r--include/linux/dma/qcom-gpi-dma.h5
-rw-r--r--include/linux/dma/ti-cppi5.h53
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/dmi.h23
-rw-r--r--include/linux/dpll.h10
-rw-r--r--include/linux/drbd_genl.h208
-rw-r--r--include/linux/dsa/loop.h42
-rw-r--r--include/linux/edac.h23
-rw-r--r--include/linux/efi.h21
-rw-r--r--include/linux/energy_model.h4
-rw-r--r--include/linux/entry-common.h5
-rw-r--r--include/linux/ethtool.h21
-rw-r--r--include/linux/evm.h8
-rw-r--r--include/linux/export-internal.h28
-rw-r--r--include/linux/fb.h19
-rw-r--r--include/linux/filter.h66
-rw-r--r--include/linux/find.h2
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h3
-rw-r--r--include/linux/firmware/samsung/exynos-acpm-protocol.h40
-rw-r--r--include/linux/firmware/thead/thead,th1520-aon.h74
-rw-r--r--include/linux/folio_batch.h (renamed from include/linux/pagevec.h)16
-rw-r--r--include/linux/folio_queue.h8
-rw-r--r--include/linux/font.h166
-rw-r--r--include/linux/fs.h133
-rw-r--r--include/linux/fs_parser.h8
-rw-r--r--include/linux/fscrypt.h37
-rw-r--r--include/linux/fsl/mc.h4
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/fwnode.h44
-rw-r--r--include/linux/generic_pt/common.h16
-rw-r--r--include/linux/generic_pt/iommu.h80
-rw-r--r--include/linux/genl_magic_func.h7
-rw-r--r--include/linux/genl_magic_struct.h15
-rw-r--r--include/linux/gpio/consumer.h2
-rw-r--r--include/linux/gpio/defs.h9
-rw-r--r--include/linux/gpio/driver.h15
-rw-r--r--include/linux/gpio/generic.h8
-rw-r--r--include/linux/gpio/gpio-nomadik.h12
-rw-r--r--include/linux/gpio/machine.h33
-rw-r--r--include/linux/gpu_buddy.h241
-rw-r--r--include/linux/hfs_common.h9
-rw-r--r--include/linux/hid.h58
-rw-r--r--include/linux/hisi_acc_qm.h14
-rw-r--r--include/linux/host1x.h10
-rw-r--r--include/linux/hrtimer.h64
-rw-r--r--include/linux/hrtimer_defs.h83
-rw-r--r--include/linux/hrtimer_rearm.h83
-rw-r--r--include/linux/hrtimer_types.h19
-rw-r--r--include/linux/hsi/hsi.h2
-rw-r--r--include/linux/huge_mm.h13
-rw-r--r--include/linux/hugetlb.h32
-rw-r--r--include/linux/hugetlb_inline.h4
-rw-r--r--include/linux/hw_random.h2
-rw-r--r--include/linux/hwspinlock.h28
-rw-r--r--include/linux/hyperv.h4
-rw-r--r--include/linux/icmpv6.h29
-rw-r--r--include/linux/ieee80211-eht.h4
-rw-r--r--include/linux/ieee80211-ht.h3
-rw-r--r--include/linux/ieee80211-nan.h44
-rw-r--r--include/linux/ieee80211-uhr.h275
-rw-r--r--include/linux/ieee80211.h100
-rw-r--r--include/linux/if_pppox.h13
-rw-r--r--include/linux/if_team.h66
-rw-r--r--include/linux/iio/iio.h12
-rw-r--r--include/linux/ima.h7
-rw-r--r--include/linux/indirect_call_wrapper.h2
-rw-r--r--include/linux/intel_rapl.h52
-rw-r--r--include/linux/io_uring_types.h47
-rw-r--r--include/linux/iomap.h22
-rw-r--r--include/linux/iommu.h6
-rw-r--r--include/linux/iopoll.h8
-rw-r--r--include/linux/ioport.h22
-rw-r--r--include/linux/irq-entry-common.h283
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/irqchip/arm-gic-v5.h27
-rw-r--r--include/linux/jbd2.h38
-rw-r--r--include/linux/jiffies.h6
-rw-r--r--include/linux/jump_label.h22
-rw-r--r--include/linux/kasan.h8
-rw-r--r--include/linux/kernfs.h42
-rw-r--r--include/linux/kho/abi/kexec_handover.h144
-rw-r--r--include/linux/kho/abi/memfd.h18
-rw-r--r--include/linux/kho_radix_tree.h70
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kobject_ns.h13
-rw-r--r--include/linux/ksm.h10
-rw-r--r--include/linux/ksysfs.h8
-rw-r--r--include/linux/kvm_host.h27
-rw-r--r--include/linux/leafops.h39
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/lis3lv02d.h4
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/lsm_hook_defs.h10
-rw-r--r--include/linux/lsm_hooks.h1
-rw-r--r--include/linux/maple_tree.h42
-rw-r--r--include/linux/math.h18
-rw-r--r--include/linux/mdio-gpio.h9
-rw-r--r--include/linux/mdio.h2
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/memfd.h12
-rw-r--r--include/linux/memory-tiers.h2
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/memory_hotplug.h18
-rw-r--r--include/linux/mfd/arizona/pdata.h10
-rw-r--r--include/linux/mfd/cs42l43-regs.h76
-rw-r--r--include/linux/mfd/cs42l43.h1
-rw-r--r--include/linux/mfd/kempld.h1
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/microchipphy.h5
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mlx5/fs.h10
-rw-r--r--include/linux/mlx5/lag.h21
-rw-r--r--include/linux/mlx5/mlx5_ifc.h80
-rw-r--r--include/linux/mm.h716
-rw-r--r--include/linux/mm_inline.h16
-rw-r--r--include/linux/mm_types.h91
-rw-r--r--include/linux/mman.h49
-rw-r--r--include/linux/mmap_lock.h6
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/sdio_ids.h4
-rw-r--r--include/linux/mmu_notifier.h172
-rw-r--r--include/linux/mmzone.h84
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/module_signature.h30
-rw-r--r--include/linux/module_symbol.h5
-rw-r--r--include/linux/moduleparam.h100
-rw-r--r--include/linux/mroute_base.h9
-rw-r--r--include/linux/mtd/concat.h63
-rw-r--r--include/linux/mtd/spinand.h5
-rw-r--r--include/linux/mutex.h13
-rw-r--r--include/linux/mutex_types.h2
-rw-r--r--include/linux/mux/consumer.h108
-rw-r--r--include/linux/namei.h6
-rw-r--r--include/linux/net.h26
-rw-r--r--include/linux/netdevice.h33
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_amanda.h15
-rw-r--r--include/linux/netfilter/nf_conntrack_ftp.h17
-rw-r--r--include/linux/netfilter/nf_conntrack_irc.h15
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h11
-rw-r--r--include/linux/netfilter/nf_conntrack_tftp.h9
-rw-r--r--include/linux/netfilter_ipv6.h102
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/ns/ns_common_types.h44
-rw-r--r--include/linux/ntb.h24
-rw-r--r--include/linux/nvme-auth.h41
-rw-r--r--include/linux/nvme.h24
-rw-r--r--include/linux/of.h25
-rw-r--r--include/linux/of_dma.h29
-rw-r--r--include/linux/of_gpio.h38
-rw-r--r--include/linux/of_reserved_mem.h16
-rw-r--r--include/linux/padata.h8
-rw-r--r--include/linux/page-flags.h163
-rw-r--r--include/linux/page_ref.h18
-rw-r--r--include/linux/page_reporting.h1
-rw-r--r--include/linux/pagewalk.h8
-rw-r--r--include/linux/pci-epc.h50
-rw-r--r--include/linux/pci-tph.h4
-rw-r--r--include/linux/pci.h37
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pgtable.h139
-rw-r--r--include/linux/phy.h19
-rw-r--r--include/linux/pinctrl/consumer.h9
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h5
-rw-r--r--include/linux/platform_data/dsa.h40
-rw-r--r--include/linux/platform_data/ina2xx.h16
-rw-r--r--include/linux/platform_data/mdio-gpio.h14
-rw-r--r--include/linux/platform_data/voltage-omap.h4
-rw-r--r--include/linux/platform_device.h58
-rw-r--r--include/linux/pm_domain.h5
-rw-r--r--include/linux/power/max17042_battery.h25
-rw-r--r--include/linux/powercap.h4
-rw-r--r--include/linux/ppp_channel.h6
-rw-r--r--include/linux/printk.h13
-rw-r--r--include/linux/psp-sev.h5
-rw-r--r--include/linux/ptr_ring.h8
-rw-r--r--include/linux/quotaops.h9
-rw-r--r--include/linux/raid/xor.h27
-rw-r--r--include/linux/randomize_kstack.h54
-rw-r--r--include/linux/rbtree.h81
-rw-r--r--include/linux/rbtree_types.h16
-rw-r--r--include/linux/rculist_bl.h49
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/refcount.h10
-rw-r--r--include/linux/regmap.h32
-rw-r--r--include/linux/remoteproc/mtk_scp.h2
-rw-r--r--include/linux/reset-controller.h21
-rw-r--r--include/linux/reset.h43
-rw-r--r--include/linux/rhashtable.h13
-rw-r--r--include/linux/ring_buffer.h58
-rw-r--r--include/linux/ring_buffer_types.h41
-rw-r--r--include/linux/rpmsg.h17
-rw-r--r--include/linux/rpmsg/mtk_rpmsg.h2
-rw-r--r--include/linux/rseq_entry.h16
-rw-r--r--include/linux/rtmutex.h8
-rw-r--r--include/linux/rv.h39
-rw-r--r--include/linux/rwlock.h18
-rw-r--r--include/linux/rwlock_api_smp.h6
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/sched.h104
-rw-r--r--include/linux/sched/deadline.h27
-rw-r--r--include/linux/sched/ext.h109
-rw-r--r--include/linux/sched/signal.h3
-rw-r--r--include/linux/sched/topology.h26
-rw-r--r--include/linux/secure_boot.h23
-rw-r--r--include/linux/security.h33
-rw-r--r--include/linux/sed-opal.h5
-rw-r--r--include/linux/semaphore.h4
-rw-r--r--include/linux/serdev.h24
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/simple_ring_buffer.h65
-rw-r--r--include/linux/skbuff.h71
-rw-r--r--include/linux/smp.h38
-rw-r--r--include/linux/soc/qcom/apr.h4
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h8
-rw-r--r--include/linux/soc/qcom/pdr.h1
-rw-r--r--include/linux/soc/qcom/qmi.h12
-rw-r--r--include/linux/soc/qcom/ubwc.h25
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/soundwire/sdw.h18
-rw-r--r--include/linux/soundwire/sdw_amd.h4
-rw-r--r--include/linux/spi/spi-mem.h16
-rw-r--r--include/linux/spi/spi.h1
-rw-r--r--include/linux/spinlock.h3
-rw-r--r--include/linux/spinlock_up.h20
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/linux/srcutree.h2
-rw-r--r--include/linux/stmmac.h122
-rw-r--r--include/linux/stop_machine.h4
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h30
-rw-r--r--include/linux/sys_soc.h10
-rw-r--r--include/linux/syscalls.h16
-rw-r--r--include/linux/sysfs.h40
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/tee_core.h30
-rw-r--r--include/linux/tegra-mipi-cal.h57
-rw-r--r--include/linux/thermal.h6
-rw-r--r--include/linux/timb_gpio.h6
-rw-r--r--include/linux/time_namespace.h39
-rw-r--r--include/linux/timekeeper_internal.h8
-rw-r--r--include/linux/timerqueue.h56
-rw-r--r--include/linux/timerqueue_types.h15
-rw-r--r--include/linux/trace_events.h13
-rw-r--r--include/linux/trace_printk.h1
-rw-r--r--include/linux/trace_remote.h48
-rw-r--r--include/linux/trace_remote_event.h33
-rw-r--r--include/linux/tracepoint.h31
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uaccess.h11
-rw-r--r--include/linux/udp.h10
-rw-r--r--include/linux/uio_driver.h4
-rw-r--r--include/linux/units.h3
-rw-r--r--include/linux/usb.h10
-rw-r--r--include/linux/usb/uvc.h3
-rw-r--r--include/linux/userfaultfd_k.h3
-rw-r--r--include/linux/vdpa.h4
-rw-r--r--include/linux/vdso_datastore.h6
-rw-r--r--include/linux/vfio.h42
-rw-r--r--include/linux/vm_event_item.h13
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--include/linux/wait_bit.h4
-rw-r--r--include/linux/wmi.h4
-rw-r--r--include/linux/workqueue.h47
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/linux/ww_mutex.h4
-rw-r--r--include/linux/xattr.h47
-rw-r--r--include/media/rc-core.h4
-rw-r--r--include/media/v4l2-fwnode.h6
-rw-r--r--include/media/v4l2-subdev.h56
-rw-r--r--include/net/af_vsock.h9
-rw-r--r--include/net/bluetooth/hci.h16
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/cfg80211.h328
-rw-r--r--include/net/codel_impl.h45
-rw-r--r--include/net/devlink.h20
-rw-r--r--include/net/dropreason-core.h66
-rw-r--r--include/net/dropreason-qdisc.h114
-rw-r--r--include/net/dropreason.h6
-rw-r--r--include/net/dsa.h16
-rw-r--r--include/net/hotdata.h5
-rw-r--r--include/net/inet6_connection_sock.h4
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_hashtables.h1
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ip6_checksum.h2
-rw-r--r--include/net/ip6_fib.h35
-rw-r--r--include/net/ip6_route.h41
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/ip_vs.h396
-rw-r--r--include/net/ipv6.h20
-rw-r--r--include/net/ipv6_stubs.h102
-rw-r--r--include/net/mac80211.h176
-rw-r--r--include/net/mana/gdma.h20
-rw-r--r--include/net/mana/mana.h18
-rw-r--r--include/net/mctp.h1
-rw-r--r--include/net/ndisc.h31
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/netdev_queues.h23
-rw-r--r--include/net/netdev_rx_queue.h27
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h1
-rw-r--r--include/net/netfilter/nf_queue.h1
-rw-r--r--include/net/netfilter/nf_tables.h36
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h17
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h16
-rw-r--r--include/net/netfilter/nf_tables_offload.h10
-rw-r--r--include/net/netfilter/nft_fib.h2
-rw-r--r--include/net/netfilter/nft_meta.h3
-rw-r--r--include/net/netlink.h19
-rw-r--r--include/net/netmem.h52
-rw-r--r--include/net/netns/ipv4.h9
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/mib.h5
-rw-r--r--include/net/netns/mpls.h1
-rw-r--r--include/net/netns/vsock.h2
-rw-r--r--include/net/page_pool/memory_provider.h8
-rw-r--r--include/net/page_pool/types.h11
-rw-r--r--include/net/ping.h5
-rw-r--r--include/net/rps-types.h24
-rw-r--r--include/net/rps.h49
-rw-r--r--include/net/sch_generic.h61
-rw-r--r--include/net/sock.h38
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tc_wrapper.h47
-rw-r--r--include/net/tcp.h120
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/tso.h100
-rw-r--r--include/net/udp.h89
-rw-r--r--include/net/udp_tunnel.h3
-rw-r--r--include/net/udplite.h88
-rw-r--r--include/net/xdp_sock.h2
-rw-r--r--include/net/xdp_sock_drv.h23
-rw-r--r--include/net/xsk_buff_pool.h7
-rw-r--r--include/rv/da_monitor.h644
-rw-r--r--include/rv/ha_monitor.h478
-rw-r--r--include/soc/tegra/bpmp-abi.h4573
-rw-r--r--include/soc/tegra/bpmp.h20
-rw-r--r--include/soc/tegra/mc.h40
-rw-r--r--include/sound/core.h3
-rw-r--r--include/sound/cs35l56.h7
-rw-r--r--include/sound/gus.h8
-rw-r--r--include/sound/hda_codec.h15
-rw-r--r--include/sound/hda_verbs.h12
-rw-r--r--include/sound/pcm.h4
-rw-r--r--include/sound/sdca_asoc.h43
-rw-r--r--include/sound/sdca_interrupts.h5
-rw-r--r--include/sound/simple_card_utils.h6
-rw-r--r--include/sound/soc-component.h11
-rw-r--r--include/sound/soc-dai.h22
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/sound/soc.h33
-rw-r--r--include/sound/soc_sdw_utils.h5
-rw-r--r--include/sound/tea6330t.h1
-rw-r--r--include/sound/timer.h1
-rw-r--r--include/sound/uda1380.h19
-rw-r--r--include/trace/define_remote_events.h73
-rw-r--r--include/trace/events/btrfs.h24
-rw-r--r--include/trace/events/cachefiles.h18
-rw-r--r--include/trace/events/coredump.h45
-rw-r--r--include/trace/events/devlink.h36
-rw-r--r--include/trace/events/dma.h3
-rw-r--r--include/trace/events/dma_fence.h35
-rw-r--r--include/trace/events/erofs.h7
-rw-r--r--include/trace/events/ext4.h544
-rw-r--r--include/trace/events/f2fs.h242
-rw-r--r--include/trace/events/filelock.h34
-rw-r--r--include/trace/events/filemap.h20
-rw-r--r--include/trace/events/fs_dax.h20
-rw-r--r--include/trace/events/fsverity.h30
-rw-r--r--include/trace/events/huge_memory.h26
-rw-r--r--include/trace/events/hugetlbfs.h42
-rw-r--r--include/trace/events/ipi.h6
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--include/trace/events/netfs.h8
-rw-r--r--include/trace/events/nilfs2.h12
-rw-r--r--include/trace/events/pci_controller.h58
-rw-r--r--include/trace/events/qdisc.h51
-rw-r--r--include/trace/events/readahead.h18
-rw-r--r--include/trace/events/rxrpc.h4
-rw-r--r--include/trace/events/sched.h26
-rw-r--r--include/trace/events/timer.h42
-rw-r--r--include/trace/events/timestamp.h16
-rw-r--r--include/trace/events/vmscan.h95
-rw-r--r--include/trace/events/writeback.h162
-rw-r--r--include/trace/stages/stage3_trace_output.h48
-rw-r--r--include/trace/stages/stage7_class_define.h19
-rw-r--r--include/uapi/drm/amdgpu_drm.h7
-rw-r--r--include/uapi/drm/amdxdna_accel.h47
-rw-r--r--include/uapi/drm/drm_fourcc.h16
-rw-r--r--include/uapi/drm/drm_mode.h84
-rw-r--r--include/uapi/drm/drm_ras.h49
-rw-r--r--include/uapi/drm/msm_drm.h1
-rw-r--r--include/uapi/drm/nouveau_drm.h66
-rw-r--r--include/uapi/drm/panthor_drm.h63
-rw-r--r--include/uapi/drm/xe_drm.h268
-rw-r--r--include/uapi/fwctl/bnxt.h26
-rw-r--r--include/uapi/fwctl/fwctl.h1
-rw-r--r--include/uapi/linux/audit.h8
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/bsg.h75
-rw-r--r--include/uapi/linux/btf.h12
-rw-r--r--include/uapi/linux/btrfs_tree.h3
-rw-r--r--include/uapi/linux/const.h18
-rw-r--r--include/uapi/linux/devlink.h15
-rw-r--r--include/uapi/linux/dpll.h5
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h3
-rw-r--r--include/uapi/linux/fs.h12
-rw-r--r--include/uapi/linux/if_link.h47
-rw-r--r--include/uapi/linux/if_pppox.h14
-rw-r--r--include/uapi/linux/inet_diag.h9
-rw-r--r--include/uapi/linux/input-event-codes.h4
-rw-r--r--include/uapi/linux/io_uring.h101
-rw-r--r--include/uapi/linux/io_uring/zcrx.h115
-rw-r--r--include/uapi/linux/iommufd.h9
-rw-r--r--include/uapi/linux/kvm.h21
-rw-r--r--include/uapi/linux/landlock.h25
-rw-r--r--include/uapi/linux/map_benchmark.h13
-rw-r--r--include/uapi/linux/mii.h3
-rw-r--r--include/uapi/linux/module_signature.h41
-rw-r--r--include/uapi/linux/mount.h1
-rw-r--r--include/uapi/linux/netdev.h11
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h6
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/nl80211.h272
-rw-r--r--include/uapi/linux/openvswitch.h76
-rw-r--r--include/uapi/linux/ovpn.h2
-rw-r--r--include/uapi/linux/pidfd.h5
-rw-r--r--include/uapi/linux/prctl.h37
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--include/uapi/linux/sched.h17
-rw-r--r--include/uapi/linux/sed-opal.h30
-rw-r--r--include/uapi/linux/seg6_iptunnel.h1
-rw-r--r--include/uapi/linux/trace_mmap.h9
-rw-r--r--include/uapi/linux/ublk_cmd.h80
-rw-r--r--include/uapi/linux/udp.h2
-rw-r--r--include/uapi/linux/vfio.h26
-rw-r--r--include/vdso/datapage.h27
-rw-r--r--include/vdso/helpers.h31
-rw-r--r--include/video/vga.h1
-rw-r--r--include/xen/grant_table.h12
-rw-r--r--include/xen/interface/io/console.h13
605 files changed, 20222 insertions, 7341 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index aad1a95e6863..b701b5f972cb 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -613,6 +613,8 @@ struct acpi_bus_event {
u32 data;
};
+#define ACPI_AC_CLASS "ac_adapter"
+
extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
void acpi_bus_private_data_handler(acpi_handle, void *);
@@ -625,7 +627,8 @@ int acpi_dev_install_notify_handler(struct acpi_device *adev,
void acpi_dev_remove_notify_handler(struct acpi_device *adev,
u32 handler_type,
acpi_notify_handler handler);
-extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
+extern int acpi_notifier_call_chain(const char *device_class,
+ const char *bus_id, u32 type, u32 data);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
@@ -760,8 +763,6 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_X86
bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
bool acpi_quirk_skip_acpi_ac_and_battery(void);
-int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
-void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
#else
static inline bool acpi_device_override_status(struct acpi_device *adev,
@@ -773,13 +774,6 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
{
return false;
}
-static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
-{
- return 1;
-}
-static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
-{
-}
static inline int
acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
{
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 4d644f03098e..d1f02ceec4f9 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -42,6 +42,11 @@
#define CPPC_EPP_PERFORMANCE_PREF 0x00
#define CPPC_EPP_ENERGY_EFFICIENCY_PREF 0xFF
+#define CPPC_PERF_LIMITED_DESIRED_EXCURSION BIT(0)
+#define CPPC_PERF_LIMITED_MINIMUM_EXCURSION BIT(1)
+#define CPPC_PERF_LIMITED_MASK (CPPC_PERF_LIMITED_DESIRED_EXCURSION | \
+ CPPC_PERF_LIMITED_MINIMUM_EXCURSION)
+
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor;
@@ -116,6 +121,7 @@ struct cppc_perf_caps {
u32 guaranteed_perf;
u32 highest_perf;
u32 nominal_perf;
+ u32 reference_perf;
u32 lowest_perf;
u32 lowest_nonlinear_perf;
u32 lowest_freq;
@@ -133,7 +139,6 @@ struct cppc_perf_ctrls {
struct cppc_perf_fb_ctrs {
u64 reference;
u64 delivered;
- u64 reference_perf;
u64 wraparound_time;
};
@@ -151,11 +156,13 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
+extern int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu);
extern bool cppc_perf_ctrs_in_pcc(void);
+extern u64 cppc_get_dmi_max_khz(void);
extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);
@@ -173,6 +180,8 @@ extern int cppc_get_auto_act_window(int cpu, u64 *auto_act_window);
extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window);
extern int cppc_get_auto_sel(int cpu, bool *enable);
extern int cppc_set_auto_sel(int cpu, bool enable);
+extern int cppc_get_perf_limited(int cpu, u64 *perf_limited);
+extern int cppc_set_perf_limited(int cpu, u64 bits_to_clear);
extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
extern int amd_detect_prefcore(bool *detected);
@@ -193,6 +202,10 @@ static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_
{
return -EOPNOTSUPP;
}
+static inline int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
+{
+ return -EOPNOTSUPP;
+}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
return -EOPNOTSUPP;
@@ -265,6 +278,14 @@ static inline int cppc_set_auto_sel(int cpu, bool enable)
{
return -EOPNOTSUPP;
}
+static inline int cppc_get_perf_limited(int cpu, u64 *perf_limited)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_perf_limited(int cpu, u64 bits_to_clear)
+{
+ return -EOPNOTSUPP;
+}
static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
{
return -ENODEV;
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 7bea522c0657..8d7e5caef3f1 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -71,6 +71,17 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
*/
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+/**
+ * devm_ghes_register_vendor_record_notifier - device-managed vendor
+ * record notifier registration.
+ * @dev: device that owns the notifier lifetime
+ * @nb: pointer to the notifier_block structure of the vendor record handler
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int devm_ghes_register_vendor_record_notifier(struct device *dev,
+ struct notifier_block *nb);
+
struct list_head *ghes_get_devices(void);
void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 7146a8e9e9c2..554be224ce76 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -14,8 +14,6 @@
#include <asm/acpi.h>
-#define ACPI_PROCESSOR_CLASS "processor"
-#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
#define ACPI_PROCESSOR_CONTAINER_HID "ACPI0010"
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 9aff61e7b8f2..2c53a1e0b760 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -65,4 +65,3 @@ mandatory-y += vermagic.h
mandatory-y += vga.h
mandatory-y += video.h
mandatory-y += word-at-a-time.h
-mandatory-y += xor.h
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 4225a8ca9c1a..c010d54275e4 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -100,4 +100,4 @@ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long
return arch_test_and_change_bit(nr, addr);
}
-#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 1023e2a4bd37..90e8aeebfd2f 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -19,6 +19,15 @@
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
+#if __CHAR_BIT__ * __SIZEOF_LONG__ != __BITS_PER_LONG
+#error Inconsistent word size. Check asm/bitsperlong.h
+#endif
+
+#ifndef __ASSEMBLER__
+_Static_assert(sizeof(long) * 8 == __BITS_PER_LONG,
+ "Inconsistent word size. Check asm/bitsperlong.h");
+#endif
+
#ifndef BITS_PER_LONG_LONG
#define BITS_PER_LONG_LONG 64
#endif
diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h
index a14f4bdafdda..4948e5d4e9d9 100644
--- a/include/asm-generic/codetag.lds.h
+++ b/include/asm-generic/codetag.lds.h
@@ -18,7 +18,7 @@
IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags))
#define MOD_SEPARATE_CODETAG_SECTION(_name) \
- .codetag.##_name : { \
+ .codetag.##_name 0 : { \
SECTION_WITH_BOUNDARIES(_name) \
}
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 2a19215baae5..fbbcfd801cd0 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -25,7 +25,9 @@
* argument and comparison of the previous
* futex value with another constant.
*
- * @encoded_op: encoded operation to execute
+ * @op: operation to execute
+ * @oparg: argument of the operation
+ * @oval: previous value at @uaddr on successful return
* @uaddr: pointer to user space address
*
* Return:
diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h
index da1610a78f92..528e6fc7efe9 100644
--- a/include/asm-generic/thread_info_tif.h
+++ b/include/asm-generic/thread_info_tif.h
@@ -41,11 +41,14 @@
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#ifdef HAVE_TIF_RESTORE_SIGMASK
-# define TIF_RESTORE_SIGMASK 10 // Restore signal mask in do_signal() */
+# define TIF_RESTORE_SIGMASK 10 // Restore signal mask in do_signal()
# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
#endif
#define TIF_RSEQ 11 // Run RSEQ fast path
#define _TIF_RSEQ BIT(TIF_RSEQ)
+#define TIF_HRTIMER_REARM 12 // re-arm the timer
+#define _TIF_HRTIMER_REARM BIT(TIF_HRTIMER_REARM)
+
#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 4aeac0c3d3f0..bdcc2778ac64 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -251,6 +251,8 @@ static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
void tlb_remove_table_sync_one(void);
+void tlb_remove_table_sync_rcu(void);
+
#else
#ifdef tlb_needs_table_invalidate
@@ -259,6 +261,8 @@ void tlb_remove_table_sync_one(void);
static inline void tlb_remove_table_sync_one(void) { }
+static inline void tlb_remove_table_sync_rcu(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1e1580febe4b..60c8c22fd3e4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -508,32 +508,25 @@
\
PRINTK_INDEX \
\
- /* Kernel symbol table: Normal symbols */ \
+ /* Kernel symbol table */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
__start___ksymtab = .; \
KEEP(*(SORT(___ksymtab+*))) \
__stop___ksymtab = .; \
} \
\
- /* Kernel symbol table: GPL-only symbols */ \
- __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
- __start___ksymtab_gpl = .; \
- KEEP(*(SORT(___ksymtab_gpl+*))) \
- __stop___ksymtab_gpl = .; \
- } \
- \
- /* Kernel symbol table: Normal symbols */ \
+ /* Kernel symbol CRC table */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
__start___kcrctab = .; \
KEEP(*(SORT(___kcrctab+*))) \
__stop___kcrctab = .; \
} \
\
- /* Kernel symbol table: GPL-only symbols */ \
- __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
- __start___kcrctab_gpl = .; \
- KEEP(*(SORT(___kcrctab_gpl+*))) \
- __stop___kcrctab_gpl = .; \
+ /* Kernel symbol flags table */ \
+ __kflagstab : AT(ADDR(__kflagstab) - LOAD_OFFSET) { \
+ __start___kflagstab = .; \
+ KEEP(*(SORT(___kflagstab+*))) \
+ __stop___kflagstab = .; \
} \
\
/* Kernel symbol table: strings */ \
@@ -975,7 +968,9 @@
RUNTIME_CONST(shift, d_hash_shift) \
RUNTIME_CONST(ptr, dentry_hashtable) \
RUNTIME_CONST(ptr, __dentry_cache) \
- RUNTIME_CONST(ptr, __names_cache)
+ RUNTIME_CONST(ptr, __names_cache) \
+ RUNTIME_CONST(ptr, __filp_cache) \
+ RUNTIME_CONST(ptr, __bfilp_cache)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
deleted file mode 100644
index 44509d48fca2..000000000000
--- a/include/asm-generic/xor.h
+++ /dev/null
@@ -1,738 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * include/asm-generic/xor.h
- *
- * Generic optimized RAID-5 checksumming functions.
- */
-
-#include <linux/prefetch.h>
-
-static void
-xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- p1[0] ^= p2[0];
- p1[1] ^= p2[1];
- p1[2] ^= p2[2];
- p1[3] ^= p2[3];
- p1[4] ^= p2[4];
- p1[5] ^= p2[5];
- p1[6] ^= p2[6];
- p1[7] ^= p2[7];
- p1 += 8;
- p2 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- p1[0] ^= p2[0] ^ p3[0];
- p1[1] ^= p2[1] ^ p3[1];
- p1[2] ^= p2[2] ^ p3[2];
- p1[3] ^= p2[3] ^ p3[3];
- p1[4] ^= p2[4] ^ p3[4];
- p1[5] ^= p2[5] ^ p3[5];
- p1[6] ^= p2[6] ^ p3[6];
- p1[7] ^= p2[7] ^ p3[7];
- p1 += 8;
- p2 += 8;
- p3 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
- p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
- p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
- p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
- p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
- p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
- p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
- p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
- p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
- p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
- p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
- p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
- p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
- p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
- p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- p5 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- d0 ^= p3[0];
- d1 ^= p3[1];
- d2 ^= p3[2];
- d3 ^= p3[3];
- d4 ^= p3[4];
- d5 ^= p3[5];
- d6 ^= p3[6];
- d7 ^= p3[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- p3 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- d0 ^= p3[0];
- d1 ^= p3[1];
- d2 ^= p3[2];
- d3 ^= p3[3];
- d4 ^= p3[4];
- d5 ^= p3[5];
- d6 ^= p3[6];
- d7 ^= p3[7];
- d0 ^= p4[0];
- d1 ^= p4[1];
- d2 ^= p4[2];
- d3 ^= p4[3];
- d4 ^= p4[4];
- d5 ^= p4[5];
- d6 ^= p4[6];
- d7 ^= p4[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
-{
- long lines = bytes / (sizeof (long)) / 8;
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- d0 ^= p3[0];
- d1 ^= p3[1];
- d2 ^= p3[2];
- d3 ^= p3[3];
- d4 ^= p3[4];
- d5 ^= p3[5];
- d6 ^= p3[6];
- d7 ^= p3[7];
- d0 ^= p4[0];
- d1 ^= p4[1];
- d2 ^= p4[2];
- d3 ^= p4[3];
- d4 ^= p4[4];
- d5 ^= p4[5];
- d6 ^= p4[6];
- d7 ^= p4[7];
- d0 ^= p5[0];
- d1 ^= p5[1];
- d2 ^= p5[2];
- d3 ^= p5[3];
- d4 ^= p5[4];
- d5 ^= p5[5];
- d6 ^= p5[6];
- d7 ^= p5[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- p5 += 8;
- } while (--lines > 0);
-}
-
-static void
-xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
- prefetchw(p1);
- prefetch(p2);
-
- do {
- prefetchw(p1+8);
- prefetch(p2+8);
- once_more:
- p1[0] ^= p2[0];
- p1[1] ^= p2[1];
- p1[2] ^= p2[2];
- p1[3] ^= p2[3];
- p1[4] ^= p2[4];
- p1[5] ^= p2[5];
- p1[6] ^= p2[6];
- p1[7] ^= p2[7];
- p1 += 8;
- p2 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
- prefetchw(p1);
- prefetch(p2);
- prefetch(p3);
-
- do {
- prefetchw(p1+8);
- prefetch(p2+8);
- prefetch(p3+8);
- once_more:
- p1[0] ^= p2[0] ^ p3[0];
- p1[1] ^= p2[1] ^ p3[1];
- p1[2] ^= p2[2] ^ p3[2];
- p1[3] ^= p2[3] ^ p3[3];
- p1[4] ^= p2[4] ^ p3[4];
- p1[5] ^= p2[5] ^ p3[5];
- p1[6] ^= p2[6] ^ p3[6];
- p1[7] ^= p2[7] ^ p3[7];
- p1 += 8;
- p2 += 8;
- p3 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
-
- prefetchw(p1);
- prefetch(p2);
- prefetch(p3);
- prefetch(p4);
-
- do {
- prefetchw(p1+8);
- prefetch(p2+8);
- prefetch(p3+8);
- prefetch(p4+8);
- once_more:
- p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
- p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
- p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
- p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
- p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
- p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
- p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
- p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
-
- prefetchw(p1);
- prefetch(p2);
- prefetch(p3);
- prefetch(p4);
- prefetch(p5);
-
- do {
- prefetchw(p1+8);
- prefetch(p2+8);
- prefetch(p3+8);
- prefetch(p4+8);
- prefetch(p5+8);
- once_more:
- p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
- p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
- p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
- p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
- p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
- p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
- p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
- p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- p5 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
-
- prefetchw(p1);
- prefetch(p2);
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
-
- prefetchw(p1+8);
- prefetch(p2+8);
- once_more:
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
-
- prefetchw(p1);
- prefetch(p2);
- prefetch(p3);
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
-
- prefetchw(p1+8);
- prefetch(p2+8);
- prefetch(p3+8);
- once_more:
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- d0 ^= p3[0];
- d1 ^= p3[1];
- d2 ^= p3[2];
- d3 ^= p3[3];
- d4 ^= p3[4];
- d5 ^= p3[5];
- d6 ^= p3[6];
- d7 ^= p3[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- p3 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
-
- prefetchw(p1);
- prefetch(p2);
- prefetch(p3);
- prefetch(p4);
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
-
- prefetchw(p1+8);
- prefetch(p2+8);
- prefetch(p3+8);
- prefetch(p4+8);
- once_more:
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- d0 ^= p3[0];
- d1 ^= p3[1];
- d2 ^= p3[2];
- d3 ^= p3[3];
- d4 ^= p3[4];
- d5 ^= p3[5];
- d6 ^= p3[6];
- d7 ^= p3[7];
- d0 ^= p4[0];
- d1 ^= p4[1];
- d2 ^= p4[2];
- d3 ^= p4[3];
- d4 ^= p4[4];
- d5 ^= p4[5];
- d6 ^= p4[6];
- d7 ^= p4[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static void
-xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
-{
- long lines = bytes / (sizeof (long)) / 8 - 1;
-
- prefetchw(p1);
- prefetch(p2);
- prefetch(p3);
- prefetch(p4);
- prefetch(p5);
-
- do {
- register long d0, d1, d2, d3, d4, d5, d6, d7;
-
- prefetchw(p1+8);
- prefetch(p2+8);
- prefetch(p3+8);
- prefetch(p4+8);
- prefetch(p5+8);
- once_more:
- d0 = p1[0]; /* Pull the stuff into registers */
- d1 = p1[1]; /* ... in bursts, if possible. */
- d2 = p1[2];
- d3 = p1[3];
- d4 = p1[4];
- d5 = p1[5];
- d6 = p1[6];
- d7 = p1[7];
- d0 ^= p2[0];
- d1 ^= p2[1];
- d2 ^= p2[2];
- d3 ^= p2[3];
- d4 ^= p2[4];
- d5 ^= p2[5];
- d6 ^= p2[6];
- d7 ^= p2[7];
- d0 ^= p3[0];
- d1 ^= p3[1];
- d2 ^= p3[2];
- d3 ^= p3[3];
- d4 ^= p3[4];
- d5 ^= p3[5];
- d6 ^= p3[6];
- d7 ^= p3[7];
- d0 ^= p4[0];
- d1 ^= p4[1];
- d2 ^= p4[2];
- d3 ^= p4[3];
- d4 ^= p4[4];
- d5 ^= p4[5];
- d6 ^= p4[6];
- d7 ^= p4[7];
- d0 ^= p5[0];
- d1 ^= p5[1];
- d2 ^= p5[2];
- d3 ^= p5[3];
- d4 ^= p5[4];
- d5 ^= p5[5];
- d6 ^= p5[6];
- d7 ^= p5[7];
- p1[0] = d0; /* Store the result (in bursts) */
- p1[1] = d1;
- p1[2] = d2;
- p1[3] = d3;
- p1[4] = d4;
- p1[5] = d5;
- p1[6] = d6;
- p1[7] = d7;
- p1 += 8;
- p2 += 8;
- p3 += 8;
- p4 += 8;
- p5 += 8;
- } while (--lines > 0);
- if (lines == 0)
- goto once_more;
-}
-
-static struct xor_block_template xor_block_8regs = {
- .name = "8regs",
- .do_2 = xor_8regs_2,
- .do_3 = xor_8regs_3,
- .do_4 = xor_8regs_4,
- .do_5 = xor_8regs_5,
-};
-
-static struct xor_block_template xor_block_32regs = {
- .name = "32regs",
- .do_2 = xor_32regs_2,
- .do_3 = xor_32regs_3,
- .do_4 = xor_32regs_4,
- .do_5 = xor_32regs_5,
-};
-
-static struct xor_block_template xor_block_8regs_p __maybe_unused = {
- .name = "8regs_prefetch",
- .do_2 = xor_8regs_p_2,
- .do_3 = xor_8regs_p_3,
- .do_4 = xor_8regs_p_4,
- .do_5 = xor_8regs_p_5,
-};
-
-static struct xor_block_template xor_block_32regs_p __maybe_unused = {
- .name = "32regs_prefetch",
- .do_2 = xor_32regs_p_2,
- .do_3 = xor_32regs_p_3,
- .do_4 = xor_32regs_p_4,
- .do_5 = xor_32regs_p_5,
-};
-
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
- } while (0)
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 9eacb9fa375d..5d5358dfab73 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -240,9 +240,10 @@ static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
}
/**
- * acomp_request_alloc() -- allocates asynchronous (de)compression request
+ * acomp_request_alloc_extra() -- allocates asynchronous (de)compression request
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @extra: amount of extra memory
* @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
*
* Return: allocated handle in case of success or NULL in case of an error
@@ -318,7 +319,7 @@ static inline void acomp_request_free(struct acomp_req *req)
*
* @req: request that the callback will be set for
* @flgs: specify for instance if the operation may backlog
- * @cmlp: callback which will be called
+ * @cmpl: callback which will be called
* @data: private data used by the caller
*/
static inline void acomp_request_set_callback(struct acomp_req *req,
diff --git a/include/crypto/aes-cbc-macs.h b/include/crypto/aes-cbc-macs.h
new file mode 100644
index 000000000000..e61df108b926
--- /dev/null
+++ b/include/crypto/aes-cbc-macs.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for AES-CMAC, AES-XCBC-MAC, and AES-CBC-MAC
+ *
+ * Copyright 2026 Google LLC
+ */
+#ifndef _CRYPTO_AES_CBC_MACS_H
+#define _CRYPTO_AES_CBC_MACS_H
+
+#include <crypto/aes.h>
+
+/**
+ * struct aes_cmac_key - Prepared key for AES-CMAC or AES-XCBC-MAC
+ * @aes: The AES key for cipher block chaining
+ * @k_final: Finalization subkeys for the final block.
+ * k_final[0] (CMAC K1, XCBC-MAC K2) is used if it's a full block.
+ * k_final[1] (CMAC K2, XCBC-MAC K3) is used if it's a partial block.
+ */
+struct aes_cmac_key {
+ struct aes_enckey aes;
+ union {
+ u8 b[AES_BLOCK_SIZE];
+ __be64 w[2];
+ } k_final[2];
+};
+
+/**
+ * struct aes_cmac_ctx - Context for computing an AES-CMAC or AES-XCBC-MAC value
+ * @key: Pointer to the key struct. A pointer is used rather than a copy of the
+ * struct, since the key struct size may be large. It is assumed that the
+ * key lives at least as long as the context.
+ * @partial_len: Number of bytes that have been XOR'ed into @h since the last
+ * AES encryption. This is 0 if no data has been processed yet,
+ * or between 1 and AES_BLOCK_SIZE inclusive otherwise.
+ * @h: The current chaining value
+ */
+struct aes_cmac_ctx {
+ const struct aes_cmac_key *key;
+ size_t partial_len;
+ u8 h[AES_BLOCK_SIZE];
+};
+
+/**
+ * aes_cmac_preparekey() - Prepare a key for AES-CMAC
+ * @key: (output) The key struct to initialize
+ * @in_key: The raw AES key
+ * @key_len: Length of the raw key in bytes. The supported values are
+ * AES_KEYSIZE_128, AES_KEYSIZE_192, and AES_KEYSIZE_256.
+ *
+ * Context: Any context.
+ * Return: 0 on success or -EINVAL if the given key length is invalid. No other
+ * errors are possible, so callers that always pass a valid key length
+ * don't need to check for errors.
+ */
+int aes_cmac_preparekey(struct aes_cmac_key *key, const u8 *in_key,
+ size_t key_len);
+
+/**
+ * aes_xcbcmac_preparekey() - Prepare a key for AES-XCBC-MAC
+ * @key: (output) The key struct to initialize
+ * @in_key: The raw key. As per the AES-XCBC-MAC specification (RFC 3566), this
+ * is 128 bits, matching the internal use of AES-128.
+ *
+ * AES-XCBC-MAC and AES-CMAC are the same except for the key preparation. After
+ * that step, AES-XCBC-MAC is supported via the aes_cmac_* functions.
+ *
+ * New users should use AES-CMAC instead of AES-XCBC-MAC.
+ *
+ * Context: Any context.
+ */
+void aes_xcbcmac_preparekey(struct aes_cmac_key *key,
+ const u8 in_key[at_least AES_KEYSIZE_128]);
+
+/**
+ * aes_cmac_init() - Start computing an AES-CMAC or AES-XCBC-MAC value
+ * @ctx: (output) The context to initialize
+ * @key: The key to use. Note that a pointer to the key is saved in the
+ * context, so the key must live at least as long as the context.
+ *
+ * This supports both AES-CMAC and AES-XCBC-MAC. Which one is done depends on
+ * whether aes_cmac_preparekey() or aes_xcbcmac_preparekey() was called.
+ */
+static inline void aes_cmac_init(struct aes_cmac_ctx *ctx,
+ const struct aes_cmac_key *key)
+{
+ *ctx = (struct aes_cmac_ctx){ .key = key };
+}
+
+/**
+ * aes_cmac_update() - Update an AES-CMAC or AES-XCBC-MAC context with more data
+ * @ctx: The context to update; must have been initialized
+ * @data: The message data
+ * @data_len: The data length in bytes. Doesn't need to be block-aligned.
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void aes_cmac_update(struct aes_cmac_ctx *ctx, const u8 *data, size_t data_len);
+
+/**
+ * aes_cmac_final() - Finish computing an AES-CMAC or AES-XCBC-MAC value
+ * @ctx: The context to finalize; must have been initialized
+ * @out: (output) The resulting MAC
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void aes_cmac_final(struct aes_cmac_ctx *ctx, u8 out[at_least AES_BLOCK_SIZE]);
+
+/**
+ * aes_cmac() - Compute AES-CMAC or AES-XCBC-MAC in one shot
+ * @key: The key to use
+ * @data: The message data
+ * @data_len: The data length in bytes
+ * @out: (output) The resulting AES-CMAC or AES-XCBC-MAC value
+ *
+ * This supports both AES-CMAC and AES-XCBC-MAC. Which one is done depends on
+ * whether aes_cmac_preparekey() or aes_xcbcmac_preparekey() was called.
+ *
+ * Context: Any context.
+ */
+static inline void aes_cmac(const struct aes_cmac_key *key, const u8 *data,
+ size_t data_len, u8 out[at_least AES_BLOCK_SIZE])
+{
+ struct aes_cmac_ctx ctx;
+
+ aes_cmac_init(&ctx, key);
+ aes_cmac_update(&ctx, data, data_len);
+ aes_cmac_final(&ctx, out);
+}
+
+/*
+ * AES-CBC-MAC support. This is provided only for use by the implementation of
+ * AES-CCM. It should have no other users. Warning: unlike AES-CMAC and
+ * AES-XCBC-MAC, AES-CBC-MAC isn't a secure MAC for variable-length messages.
+ */
+struct aes_cbcmac_ctx {
+ const struct aes_enckey *key;
+ size_t partial_len;
+ u8 h[AES_BLOCK_SIZE];
+};
+static inline void aes_cbcmac_init(struct aes_cbcmac_ctx *ctx,
+ const struct aes_enckey *key)
+{
+ *ctx = (struct aes_cbcmac_ctx){ .key = key };
+}
+void aes_cbcmac_update(struct aes_cbcmac_ctx *ctx, const u8 *data,
+ size_t data_len);
+void aes_cbcmac_final(struct aes_cbcmac_ctx *ctx,
+ u8 out[at_least AES_BLOCK_SIZE]);
+
+#endif /* _CRYPTO_AES_CBC_MACS_H */
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index cbf1cc96db52..16fbfd93e2bd 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -167,6 +167,72 @@ int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
#ifdef CONFIG_ARM64
int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
+asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks);
+asmlinkage void neon_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks);
+asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks, u8 iv[]);
+asmlinkage void neon_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks, u8 iv[]);
+asmlinkage void neon_aes_cbc_cts_encrypt(u8 out[], u8 const in[],
+ u32 const rk[], int rounds, int bytes,
+ u8 const iv[]);
+asmlinkage void neon_aes_cbc_cts_decrypt(u8 out[], u8 const in[],
+ u32 const rk[], int rounds, int bytes,
+ u8 const iv[]);
+asmlinkage void neon_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 ctr[]);
+asmlinkage void neon_aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 ctr[],
+ int byte_ctr);
+asmlinkage void neon_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int bytes, u32 const rk2[],
+ u8 iv[], int first);
+asmlinkage void neon_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int bytes, u32 const rk2[],
+ u8 iv[], int first);
+asmlinkage void neon_aes_essiv_cbc_encrypt(u8 out[], u8 const in[],
+ u32 const rk1[], int rounds,
+ int blocks, u8 iv[],
+ u32 const rk2[]);
+asmlinkage void neon_aes_essiv_cbc_decrypt(u8 out[], u8 const in[],
+ u32 const rk1[], int rounds,
+ int blocks, u8 iv[],
+ u32 const rk2[]);
+
+asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks);
+asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks);
+asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks, u8 iv[]);
+asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks, u8 iv[]);
+asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 const iv[]);
+asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 const iv[]);
+asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 ctr[]);
+asmlinkage void ce_aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 ctr[],
+ int byte_ctr);
+asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int bytes, u32 const rk2[],
+ u8 iv[], int first);
+asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int bytes, u32 const rk2[],
+ u8 iv[], int first);
+asmlinkage void ce_aes_essiv_cbc_encrypt(u8 out[], u8 const in[],
+ u32 const rk1[], int rounds,
+ int blocks, u8 iv[], u32 const rk2[]);
+asmlinkage void ce_aes_essiv_cbc_decrypt(u8 out[], u8 const in[],
+ u32 const rk1[], int rounds,
+ int blocks, u8 iv[], u32 const rk2[]);
+asmlinkage void ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ size_t blocks, u8 dg[], int enc_before,
+ int enc_after);
#elif defined(CONFIG_PPC)
void ppc_expand_key_128(u32 *key_enc, const u8 *key);
void ppc_expand_key_192(u32 *key_enc, const u8 *key);
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
index 0f71b037702d..0f6d99170aaf 100644
--- a/include/crypto/chacha20poly1305.h
+++ b/include/crypto/chacha20poly1305.h
@@ -46,6 +46,4 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
const u64 nonce,
const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
-bool chacha20poly1305_selftest(void);
-
#endif /* __CHACHA20POLY1305_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 796d986e58e1..29c5878a3609 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -16,39 +16,6 @@
#include <linux/types.h>
#include <crypto/aead.h>
-#include <crypto/hash.h>
-#include <crypto/skcipher.h>
-
-struct cryptd_skcipher {
- struct crypto_skcipher base;
-};
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
-/* Must be called without moving CPUs. */
-bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm);
-void cryptd_free_skcipher(struct cryptd_skcipher *tfm);
-
-struct cryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct cryptd_ahash *__cryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct cryptd_ahash *)tfm;
-}
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
-struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
-/* Must be called without moving CPUs. */
-bool cryptd_ahash_queued(struct cryptd_ahash *tfm);
-void cryptd_free_ahash(struct cryptd_ahash *tfm);
struct cryptd_aead {
struct crypto_aead base;
diff --git a/include/crypto/des.h b/include/crypto/des.h
index 7812b4331ae4..73eec617f480 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -34,9 +34,9 @@ void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src);
* des_expand_key - Expand a DES input key into a key schedule
* @ctx: the key schedule
* @key: buffer containing the input key
- * @len: size of the buffer contents
+ * @keylen: size of the buffer contents
*
- * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
+ * Returns: 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
* the key is accepted but has been found to be weak.
*/
int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen);
@@ -45,9 +45,9 @@ int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen);
* des3_ede_expand_key - Expand a triple DES input key into a key schedule
* @ctx: the key schedule
* @key: buffer containing the input key
- * @len: size of the buffer contents
+ * @keylen: size of the buffer contents
*
- * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
+ * Returns: 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
* the key is accepted but has been found to be weak. Note that weak keys will
* be rejected (and -EINVAL will be returned) when running in FIPS mode.
*/
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index b524e47bd4d0..1d5f39ff1dc4 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -4,7 +4,7 @@
#include <linux/errno.h>
#include <crypto/aes.h>
-#include <crypto/gf128mul.h>
+#include <crypto/gf128hash.h>
#define GCM_AES_IV_SIZE 12
#define GCM_RFC4106_IV_SIZE 8
@@ -65,7 +65,7 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
}
struct aesgcm_ctx {
- be128 ghash_key;
+ struct ghash_key ghash_key;
struct aes_enckey aes_key;
unsigned int authsize;
};
diff --git a/include/crypto/polyval.h b/include/crypto/gf128hash.h
index b28b8ef11353..41c557d55965 100644
--- a/include/crypto/polyval.h
+++ b/include/crypto/gf128hash.h
@@ -1,13 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * POLYVAL library API
+ * GF(2^128) polynomial hashing: GHASH and POLYVAL
*
* Copyright 2025 Google LLC
*/
-#ifndef _CRYPTO_POLYVAL_H
-#define _CRYPTO_POLYVAL_H
+#ifndef _CRYPTO_GF128HASH_H
+#define _CRYPTO_GF128HASH_H
+#include <crypto/ghash.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -34,6 +35,24 @@ struct polyval_elem {
};
/**
+ * struct ghash_key - Prepared key for GHASH
+ *
+ * Use ghash_preparekey() to initialize this.
+ */
+struct ghash_key {
+#if defined(CONFIG_CRYPTO_LIB_GF128HASH_ARCH) && defined(CONFIG_PPC64)
+ /** @htable: GHASH key format used by the POWER8 assembly code */
+ u64 htable[4][2];
+#elif defined(CONFIG_CRYPTO_LIB_GF128HASH_ARCH) && \
+ (defined(CONFIG_RISCV) || defined(CONFIG_S390))
+ /** @h_raw: The hash key H, in GHASH format */
+ u8 h_raw[GHASH_BLOCK_SIZE];
+#endif
+ /** @h: The hash key H, in POLYVAL format */
+ struct polyval_elem h;
+};
+
+/**
* struct polyval_key - Prepared key for POLYVAL
*
* This may contain just the raw key H, or it may contain precomputed key
@@ -44,20 +63,28 @@ struct polyval_elem {
* exponentiation repeats the POLYVAL dot operation, with its "extra" x^-128.
*/
struct polyval_key {
-#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH
-#ifdef CONFIG_ARM64
- /** @h_powers: Powers of the hash key H^8 through H^1 */
- struct polyval_elem h_powers[8];
-#elif defined(CONFIG_X86)
+#if defined(CONFIG_CRYPTO_LIB_GF128HASH_ARCH) && \
+ (defined(CONFIG_ARM64) || defined(CONFIG_X86))
/** @h_powers: Powers of the hash key H^8 through H^1 */
struct polyval_elem h_powers[8];
#else
-#error "Unhandled arch"
-#endif
-#else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */
/** @h: The hash key H */
struct polyval_elem h;
-#endif /* !CONFIG_CRYPTO_LIB_POLYVAL_ARCH */
+#endif
+};
+
+/**
+ * struct ghash_ctx - Context for computing a GHASH value
+ * @key: Pointer to the prepared GHASH key. The user of the API is
+ * responsible for ensuring that the key lives as long as the context.
+ * @acc: The accumulator. It is stored in POLYVAL format rather than GHASH
+ * format, since most implementations want it in POLYVAL format.
+ * @partial: Number of data bytes processed so far modulo GHASH_BLOCK_SIZE
+ */
+struct ghash_ctx {
+ const struct ghash_key *key;
+ struct polyval_elem acc;
+ size_t partial;
};
/**
@@ -74,6 +101,18 @@ struct polyval_ctx {
};
/**
+ * ghash_preparekey() - Prepare a GHASH key
+ * @key: (output) The key structure to initialize
+ * @raw_key: The raw hash key
+ *
+ * Initialize a GHASH key structure from a raw key.
+ *
+ * Context: Any context.
+ */
+void ghash_preparekey(struct ghash_key *key,
+ const u8 raw_key[GHASH_BLOCK_SIZE]);
+
+/**
* polyval_preparekey() - Prepare a POLYVAL key
* @key: (output) The key structure to initialize
* @raw_key: The raw hash key
@@ -84,18 +123,20 @@ struct polyval_ctx {
*
* Context: Any context.
*/
-#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH
void polyval_preparekey(struct polyval_key *key,
const u8 raw_key[POLYVAL_BLOCK_SIZE]);
-#else
-static inline void polyval_preparekey(struct polyval_key *key,
- const u8 raw_key[POLYVAL_BLOCK_SIZE])
+/**
+ * ghash_init() - Initialize a GHASH context for a new message
+ * @ctx: The context to initialize
+ * @key: The key to use. Note that a pointer to the key is saved in the
+ * context, so the key must live at least as long as the context.
+ */
+static inline void ghash_init(struct ghash_ctx *ctx,
+ const struct ghash_key *key)
{
- /* Just a simple copy, so inline it. */
- memcpy(key->h.bytes, raw_key, POLYVAL_BLOCK_SIZE);
+ *ctx = (struct ghash_ctx){ .key = key };
}
-#endif
/**
* polyval_init() - Initialize a POLYVAL context for a new message
@@ -142,6 +183,18 @@ static inline void polyval_export_blkaligned(const struct polyval_ctx *ctx,
}
/**
+ * ghash_update() - Update a GHASH context with message data
+ * @ctx: The context to update; must have been initialized
+ * @data: The message data
+ * @len: The data length in bytes. Doesn't need to be block-aligned.
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void ghash_update(struct ghash_ctx *ctx, const u8 *data, size_t len);
+
+/**
* polyval_update() - Update a POLYVAL context with message data
* @ctx: The context to update; must have been initialized
* @data: The message data
@@ -154,6 +207,20 @@ static inline void polyval_export_blkaligned(const struct polyval_ctx *ctx,
void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len);
/**
+ * ghash_final() - Finish computing a GHASH value
+ * @ctx: The context to finalize
+ * @out: The output value
+ *
+ * If the total data length isn't a multiple of GHASH_BLOCK_SIZE, then the
+ * final block is automatically zero-padded.
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void ghash_final(struct ghash_ctx *ctx, u8 out[GHASH_BLOCK_SIZE]);
+
+/**
* polyval_final() - Finish computing a POLYVAL value
* @ctx: The context to finalize
* @out: The output value
@@ -168,6 +235,25 @@ void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len);
void polyval_final(struct polyval_ctx *ctx, u8 out[POLYVAL_BLOCK_SIZE]);
/**
+ * ghash() - Compute a GHASH value
+ * @key: The prepared key
+ * @data: The message data
+ * @len: The data length in bytes. Doesn't need to be block-aligned.
+ * @out: The output value
+ *
+ * Context: Any context.
+ */
+static inline void ghash(const struct ghash_key *key, const u8 *data,
+ size_t len, u8 out[GHASH_BLOCK_SIZE])
+{
+ struct ghash_ctx ctx;
+
+ ghash_init(&ctx, key);
+ ghash_update(&ctx, data, len);
+ ghash_final(&ctx, out);
+}
+
+/**
* polyval() - Compute a POLYVAL value
* @key: The prepared key
* @data: The message data
@@ -187,4 +273,4 @@ static inline void polyval(const struct polyval_key *key,
polyval_final(&ctx, out);
}
-#endif /* _CRYPTO_POLYVAL_H */
+#endif /* _CRYPTO_GF128HASH_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index b0853f7cada0..6ed2a8351902 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -215,25 +215,14 @@ static inline void gf128mul_x_ble(le128 *r, const le128 *x)
r->b = cpu_to_le64((b << 1) ^ _tt);
}
-/* 4k table optimization */
-
-struct gf128mul_4k {
- be128 t[256];
-};
-
-struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
-void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
-static inline void gf128mul_free_4k(struct gf128mul_4k *t)
-{
- kfree_sensitive(t);
-}
-
/* 64k table optimization, implemented for bbe */
struct gf128mul_64k {
- struct gf128mul_4k *t[16];
+ struct {
+ be128 t[256];
+ } *t[16];
};
/* First initialize with the constant factor with which you
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index 043d938e9a2c..d187e5af9925 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -6,19 +6,7 @@
#ifndef __CRYPTO_GHASH_H__
#define __CRYPTO_GHASH_H__
-#include <linux/types.h>
-
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
-struct gf128mul_4k;
-
-struct ghash_ctx {
- struct gf128mul_4k *gf128;
-};
-
-struct ghash_desc_ctx {
- u8 buffer[GHASH_BLOCK_SIZE];
-};
-
#endif
diff --git a/include/crypto/hkdf.h b/include/crypto/hkdf.h
deleted file mode 100644
index 6a9678f508f5..000000000000
--- a/include/crypto/hkdf.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * HKDF: HMAC-based Key Derivation Function (HKDF), RFC 5869
- *
- * Extracted from fs/crypto/hkdf.c, which has
- * Copyright 2019 Google LLC
- */
-
-#ifndef _CRYPTO_HKDF_H
-#define _CRYPTO_HKDF_H
-
-#include <crypto/hash.h>
-
-int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
- unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
- u8 *prk);
-int hkdf_expand(struct crypto_shash *hmac_tfm,
- const u8 *info, unsigned int infolen,
- u8 *okm, unsigned int okmlen);
-#endif
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 107b797c33ec..0cc8fa749f68 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -230,9 +230,8 @@ static inline bool af_alg_readable(struct sock *sk)
return PAGE_SIZE <= af_alg_rcvbuf(sk);
}
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset);
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 9a3f28baa804..9cd37df32dc4 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -42,6 +42,7 @@
*
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with scomp
+ * @COMP_ALG_COMMON: see struct comp_alg_common
*/
struct acomp_alg {
int (*compress)(struct acomp_req *req);
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
deleted file mode 100644
index 52d9d4c82493..000000000000
--- a/include/crypto/internal/blockhash.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Handle partial blocks for block hash.
- *
- * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
-#define _CRYPTO_INTERNAL_BLOCKHASH_H
-
-#include <linux/string.h>
-#include <linux/types.h>
-
-#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
- buf, buflen) \
- ({ \
- typeof(block_fn) *_block_fn = &(block_fn); \
- typeof(state + 0) _state = (state); \
- unsigned int _buflen = (buflen); \
- size_t _nbytes = (nbytes); \
- unsigned int _bs = (bs); \
- const u8 *_src = (src); \
- u8 *_buf = (buf); \
- while ((_buflen + _nbytes) >= _bs) { \
- const u8 *data = _src; \
- size_t len = _nbytes; \
- size_t blocks; \
- int remain; \
- if (_buflen) { \
- remain = _bs - _buflen; \
- memcpy(_buf + _buflen, _src, remain); \
- data = _buf; \
- len = _bs; \
- } \
- remain = len % bs; \
- blocks = (len - remain) / (dv); \
- (*_block_fn)(_state, data, blocks); \
- _src += len - remain - _buflen; \
- _nbytes -= len - remain - _buflen; \
- _buflen = 0; \
- } \
- memcpy(_buf + _buflen, _src, _nbytes); \
- _buflen += _nbytes; \
- })
-
-#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
- BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
-#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
- BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
-
-#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index 57cd75242141..a4b48d76f53a 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -72,8 +72,8 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
/**
* ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
* @in: Input byte array
- * @nbytes Size of input byte array
- * @out Output digits array
+ * @nbytes: Size of input byte array
+ * @out: Output digits array
* @ndigits: Number of digits to create from byte array
*
* The first byte in the input byte array is expected to hold the most
@@ -90,7 +90,7 @@ void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
* @private_key: private key to be used for the given curve
* @private_key_len: private key length
*
- * Returns 0 if the key is acceptable, a negative value otherwise
+ * Returns: 0 if the key is acceptable, a negative value otherwise
*/
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, unsigned int private_key_len);
@@ -104,7 +104,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
* @ndigits: curve number of digits
* @private_key: buffer for storing the generated private key
*
- * Returns 0 if the private key was generated successfully, a negative value
+ * Returns: 0 if the private key was generated successfully, a negative value
* if an error occurred.
*/
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
@@ -118,7 +118,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
* @private_key: pregenerated private key for the given curve
* @public_key: buffer for storing the generated public key
*
- * Returns 0 if the public key was generated successfully, a negative value
+ * Returns: 0 if the public key was generated successfully, a negative value
* if an error occurred.
*/
int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
@@ -136,7 +136,7 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
* Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
* before using it for symmetric encryption or HMAC.
*
- * Returns 0 if the shared secret was generated successfully, a negative value
+ * Returns: 0 if the shared secret was generated successfully, a negative value
* if an error occurred.
*/
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
@@ -179,6 +179,8 @@ int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
*
* @vli: vli to check.
* @ndigits: length of the @vli
+ *
+ * Returns: %true if vli == 0, %false otherwise.
*/
bool vli_is_zero(const u64 *vli, unsigned int ndigits);
@@ -189,7 +191,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits);
* @right: vli
* @ndigits: length of both vlis
*
- * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * Returns: sign of @left - @right, i.e. -1 if @left < @right,
* 0 if @left == @right, 1 if @left > @right.
*/
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
@@ -199,7 +201,7 @@ int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
*
* @result: where to write result
* @left: vli
- * @right vli
+ * @right: vli
* @ndigits: length of all vlis
*
* Note: can modify in-place.
@@ -263,7 +265,7 @@ void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
/**
- * ecc_aloc_point() - Allocate ECC point.
+ * ecc_alloc_point() - Allocate ECC point.
*
* @ndigits: Length of vlis in u64 qwords.
*
@@ -281,7 +283,7 @@ void ecc_free_point(struct ecc_point *p);
/**
* ecc_point_is_zero() - Check if point is zero.
*
- * @p: Point to check for zero.
+ * @point: Point to check for zero.
*
* Return: true if point is the point at infinity, false otherwise.
*/
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 012f5fb22d43..e38d9f0487ec 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -9,11 +9,9 @@
#define _CRYPTO_INTERNAL_GENIV_H
#include <crypto/internal/aead.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
struct aead_geniv_ctx {
- spinlock_t lock;
struct crypto_aead *child;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 6a2c5f2e90f9..13a0851a995b 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -22,6 +22,7 @@ struct crypto_scomp {
* @decompress: Function performs a de-compress operation
* @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
+ * @COMP_ALG_COMMON: see struct comp_alg_common
*/
struct scomp_alg {
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index 9e338e7aafbd..f5e5d7b63951 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -10,25 +10,6 @@
#include <linux/percpu.h>
#include <linux/types.h>
-/* skcipher support */
-
-struct simd_skcipher_alg;
-struct skcipher_alg;
-
-struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
- const char *algname,
- const char *drvname,
- const char *basename);
-void simd_skcipher_free(struct simd_skcipher_alg *alg);
-
-int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
- struct simd_skcipher_alg **simd_algs);
-
-void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
- struct simd_skcipher_alg **simd_algs);
-
-/* AEAD support */
-
struct simd_aead_alg;
struct aead_alg;
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index d451b54b322a..07f494b2c881 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -12,6 +12,8 @@
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
+#include <linux/random.h>
struct crypto_rng;
@@ -57,10 +59,27 @@ struct crypto_rng {
struct crypto_tfm base;
};
-extern struct crypto_rng *crypto_default_rng;
+int __crypto_stdrng_get_bytes(void *buf, unsigned int len);
-int crypto_get_default_rng(void);
-void crypto_put_default_rng(void);
+/**
+ * crypto_stdrng_get_bytes() - get cryptographically secure random bytes
+ * @buf: output buffer holding the random numbers
+ * @len: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * normal Linux RNG if fips_enabled=0, or the highest-priority "stdrng"
+ * algorithm in the crypto_rng subsystem if fips_enabled=1.
+ *
+ * Context: May sleep
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_stdrng_get_bytes(void *buf, unsigned int len)
+{
+ might_sleep();
+ if (fips_enabled)
+ return __crypto_stdrng_get_bytes(buf, len);
+ return get_random_bytes_wait(buf, len);
+}
/**
* DOC: Random number generator API
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 9e5853464345..4efe2ca8c4d1 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -145,6 +145,7 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
* @co: see struct skcipher_alg_common
+ * @SKCIPHER_ALG_COMMON: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index c8d02c86c298..371e8a661705 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Common values for SM3 algorithm
+ * SM3 hash algorithm
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
@@ -14,10 +14,6 @@
#define SM3_DIGEST_SIZE 32
#define SM3_BLOCK_SIZE 64
-#define SM3_STATE_SIZE 40
-
-#define SM3_T1 0x79CC4519
-#define SM3_T2 0x7A879D8A
#define SM3_IVA 0x7380166f
#define SM3_IVB 0x4914b2b9
@@ -28,37 +24,64 @@
#define SM3_IVG 0xe38dee4d
#define SM3_IVH 0xb0fb0e4e
-extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE];
+/* State for the SM3 compression function */
+struct sm3_block_state {
+ u32 h[SM3_DIGEST_SIZE / 4];
+};
-struct sm3_state {
- u32 state[SM3_DIGEST_SIZE / 4];
- u64 count;
- u8 buffer[SM3_BLOCK_SIZE];
+/**
+ * struct sm3_ctx - Context for hashing a message with SM3
+ * @state: the compression function state
+ * @bytecount: number of bytes processed so far
+ * @buf: partial block buffer; bytecount % SM3_BLOCK_SIZE bytes are valid
+ */
+struct sm3_ctx {
+ struct sm3_block_state state;
+ u64 bytecount;
+ u8 buf[SM3_BLOCK_SIZE] __aligned(__alignof__(__be64));
};
-/*
- * Stand-alone implementation of the SM3 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
+/**
+ * sm3_init() - Initialize an SM3 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sm3() instead.
+ *
+ * Context: Any context.
+ */
+void sm3_init(struct sm3_ctx *ctx);
+
+/**
+ * sm3_update() - Update an SM3 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
*
- * For details see lib/crypto/sm3.c
+ * Context: Any context.
*/
+void sm3_update(struct sm3_ctx *ctx, const u8 *data, size_t len);
-static inline void sm3_init(struct sm3_state *sctx)
-{
- sctx->state[0] = SM3_IVA;
- sctx->state[1] = SM3_IVB;
- sctx->state[2] = SM3_IVC;
- sctx->state[3] = SM3_IVD;
- sctx->state[4] = SM3_IVE;
- sctx->state[5] = SM3_IVF;
- sctx->state[6] = SM3_IVG;
- sctx->state[7] = SM3_IVH;
- sctx->count = 0;
-}
+/**
+ * sm3_final() - Finish computing an SM3 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SM3 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sm3_final(struct sm3_ctx *ctx, u8 out[at_least SM3_DIGEST_SIZE]);
-void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks);
+/**
+ * sm3() - Compute SM3 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SM3 message digest
+ *
+ * Context: Any context.
+ */
+void sm3(const u8 *data, size_t len, u8 out[at_least SM3_DIGEST_SIZE]);
-#endif
+#endif /* _CRYPTO_SM3_H */
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
deleted file mode 100644
index 7c53570bc05e..000000000000
--- a/include/crypto/sm3_base.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sm3_base.h - core logic for SM3 implementations
- *
- * Copyright (C) 2017 ARM Limited or its affiliates.
- * Written by Gilad Ben-Yossef <gilad@benyossef.com>
- */
-
-#ifndef _CRYPTO_SM3_BASE_H
-#define _CRYPTO_SM3_BASE_H
-
-#include <crypto/internal/hash.h>
-#include <crypto/sm3.h>
-#include <linux/math.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/unaligned.h>
-
-typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
-
-static inline int sm3_base_init(struct shash_desc *desc)
-{
- sm3_init(shash_desc_ctx(desc));
- return 0;
-}
-
-static inline int sm3_base_do_update_blocks(struct shash_desc *desc,
- const u8 *data, unsigned int len,
- sm3_block_fn *block_fn)
-{
- unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE);
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- sctx->count += len - remain;
- block_fn(sctx, data, len / SM3_BLOCK_SIZE);
- return remain;
-}
-
-static inline int sm3_base_do_finup(struct shash_desc *desc,
- const u8 *src, unsigned int len,
- sm3_block_fn *block_fn)
-{
- unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1;
- struct sm3_state *sctx = shash_desc_ctx(desc);
- union {
- __be64 b64[SM3_BLOCK_SIZE / 4];
- u8 u8[SM3_BLOCK_SIZE * 2];
- } block = {};
-
- if (len >= SM3_BLOCK_SIZE) {
- int remain;
-
- remain = sm3_base_do_update_blocks(desc, src, len, block_fn);
- src += len - remain;
- len = remain;
- }
-
- if (len >= bit_offset * 8)
- bit_offset += SM3_BLOCK_SIZE / 8;
- memcpy(&block, src, len);
- block.u8[len] = 0x80;
- sctx->count += len;
- block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE);
- memzero_explicit(&block, sizeof(block));
-
- return 0;
-}
-
-static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
-{
- struct sm3_state *sctx = shash_desc_ctx(desc);
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], digest++);
- return 0;
-}
-
-#endif /* _CRYPTO_SM3_BASE_H */
diff --git a/include/cxl/cxl.h b/include/cxl/cxl.h
new file mode 100644
index 000000000000..fa7269154620
--- /dev/null
+++ b/include/cxl/cxl.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 Intel Corporation. */
+/* Copyright(c) 2026 Advanced Micro Devices, Inc. */
+
+#ifndef __CXL_CXL_H__
+#define __CXL_CXL_H__
+
+#include <linux/node.h>
+#include <linux/ioport.h>
+#include <cxl/mailbox.h>
+
+/**
+ * enum cxl_devtype - delineate type-2 from a generic type-3 device
+ * @CXL_DEVTYPE_DEVMEM: Vendor specific CXL Type-2 device implementing HDM-D or
+ * HDM-DB, no requirement that this device implements a
+ * mailbox, or other memory-device-standard manageability
+ * flows.
+ * @CXL_DEVTYPE_CLASSMEM: Common class definition of a CXL Type-3 device with
+ * HDM-H and class-mandatory memory device registers
+ */
+enum cxl_devtype {
+ CXL_DEVTYPE_DEVMEM,
+ CXL_DEVTYPE_CLASSMEM,
+};
+
+struct device;
+
+/*
+ * Using struct_group() allows for per register-block-type helper routines,
+ * without requiring block-type agnostic code to include the prefix.
+ */
+struct cxl_regs {
+ /*
+ * Common set of CXL Component register block base pointers
+ * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
+ * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure
+ */
+ struct_group_tagged(cxl_component_regs, component,
+ void __iomem *hdm_decoder;
+ void __iomem *ras;
+ );
+ /*
+ * Common set of CXL Device register block base pointers
+ * @status: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
+ */
+ struct_group_tagged(cxl_device_regs, device_regs,
+ void __iomem *status, *mbox, *memdev;
+ );
+
+ struct_group_tagged(cxl_pmu_regs, pmu_regs,
+ void __iomem *pmu;
+ );
+
+ /*
+ * RCH downstream port specific RAS register
+ * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB
+ */
+ struct_group_tagged(cxl_rch_regs, rch_regs,
+ void __iomem *dport_aer;
+ );
+
+ /*
+ * RCD upstream port specific PCIe cap register
+ * @pcie_cap: CXL 3.0 8.2.1.2 RCD Upstream Port RCRB
+ */
+ struct_group_tagged(cxl_rcd_regs, rcd_regs,
+ void __iomem *rcd_pcie_cap;
+ );
+};
+
+struct cxl_reg_map {
+ bool valid;
+ int id;
+ unsigned long offset;
+ unsigned long size;
+};
+
+struct cxl_component_reg_map {
+ struct cxl_reg_map hdm_decoder;
+ struct cxl_reg_map ras;
+};
+
+struct cxl_device_reg_map {
+ struct cxl_reg_map status;
+ struct cxl_reg_map mbox;
+ struct cxl_reg_map memdev;
+};
+
+struct cxl_pmu_reg_map {
+ struct cxl_reg_map pmu;
+};
+
+/**
+ * struct cxl_register_map - DVSEC harvested register block mapping parameters
+ * @host: device for devm operations and logging
+ * @base: virtual base of the register-block-BAR + @block_offset
+ * @resource: physical resource base of the register block
+ * @max_size: maximum mapping size to perform register search
+ * @reg_type: see enum cxl_regloc_type
+ * @component_map: cxl_reg_map for component registers
+ * @device_map: cxl_reg_maps for device registers
+ * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
+ */
+struct cxl_register_map {
+ struct device *host;
+ void __iomem *base;
+ resource_size_t resource;
+ resource_size_t max_size;
+ u8 reg_type;
+ union {
+ struct cxl_component_reg_map component_map;
+ struct cxl_device_reg_map device_map;
+ struct cxl_pmu_reg_map pmu_map;
+ };
+};
+
+/**
+ * struct cxl_dpa_perf - DPA performance property entry
+ * @dpa_range: range for DPA address
+ * @coord: QoS performance data (i.e. latency, bandwidth)
+ * @cdat_coord: raw QoS performance data from CDAT
+ * @qos_class: QoS Class cookies
+ */
+struct cxl_dpa_perf {
+ struct range dpa_range;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
+ int qos_class;
+};
+
+enum cxl_partition_mode {
+ CXL_PARTMODE_RAM,
+ CXL_PARTMODE_PMEM,
+};
+
+/**
+ * struct cxl_dpa_partition - DPA partition descriptor
+ * @res: shortcut to the partition in the DPA resource tree (cxlds->dpa_res)
+ * @perf: performance attributes of the partition from CDAT
+ * @mode: operation mode for the DPA capacity, e.g. ram, pmem, dynamic...
+ */
+struct cxl_dpa_partition {
+ struct resource res;
+ struct cxl_dpa_perf perf;
+ enum cxl_partition_mode mode;
+};
+
+#define CXL_NR_PARTITIONS_MAX 2
+
+/**
+ * struct cxl_dev_state - The driver device state
+ *
+ * cxl_dev_state represents the CXL driver/device state. It provides an
+ * interface to mailbox commands as well as some cached data about the device.
+ * Currently only memory devices are represented.
+ *
+ * @dev: The device associated with this CXL state
+ * @cxlmd: The device representing the CXL.mem capabilities of @dev
+ * @reg_map: component and ras register mapping parameters
+ * @regs: Parsed register blocks
+ * @cxl_dvsec: Offset to the PCIe device DVSEC
+ * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
+ * @media_ready: Indicate whether the device media is usable
+ * @dpa_res: Overall DPA resource tree for the device
+ * @part: DPA partition array
+ * @nr_partitions: Number of DPA partitions
+ * @serial: PCIe Device Serial Number
+ * @type: Generic Memory Class device or Vendor Specific Memory device
+ * @cxl_mbox: CXL mailbox context
+ * @cxlfs: CXL features context
+ */
+struct cxl_dev_state {
+ /* public for Type2 drivers */
+ struct device *dev;
+ struct cxl_memdev *cxlmd;
+
+ /* private for Type2 drivers */
+ struct cxl_register_map reg_map;
+ struct cxl_device_regs regs;
+ int cxl_dvsec;
+ bool rcd;
+ bool media_ready;
+ struct resource dpa_res;
+ struct cxl_dpa_partition part[CXL_NR_PARTITIONS_MAX];
+ unsigned int nr_partitions;
+ u64 serial;
+ enum cxl_devtype type;
+ struct cxl_mailbox cxl_mbox;
+#ifdef CONFIG_CXL_FEATURES
+ struct cxl_features_state *cxlfs;
+#endif
+};
+
+struct cxl_dev_state *_devm_cxl_dev_state_create(struct device *dev,
+ enum cxl_devtype type,
+ u64 serial, u16 dvsec,
+ size_t size, bool has_mbox);
+
+/**
+ * cxl_dev_state_create - safely create and cast a cxl dev state embedded in a
+ * driver specific struct.
+ *
+ * @parent: device behind the request
+ * @type: CXL device type
+ * @serial: device identification
+ * @dvsec: dvsec capability offset
+ * @drv_struct: driver struct embedding a cxl_dev_state struct
+ * @member: name of the struct cxl_dev_state member in drv_struct
+ * @mbox: true if mailbox supported
+ *
+ * Returns a pointer to the drv_struct allocated and embedding a cxl_dev_state
+ * struct initialized.
+ *
+ * Introduced for Type2 driver support.
+ */
+#define devm_cxl_dev_state_create(parent, type, serial, dvsec, drv_struct, member, mbox) \
+ ({ \
+ static_assert(__same_type(struct cxl_dev_state, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_devm_cxl_dev_state_create(parent, type, serial, dvsec, \
+ sizeof(drv_struct), mbox); \
+ })
+#endif /* __CXL_CXL_H__ */
diff --git a/include/drm/bridge/dw_dp.h b/include/drm/bridge/dw_dp.h
index d05df49fd884..25363541e69d 100644
--- a/include/drm/bridge/dw_dp.h
+++ b/include/drm/bridge/dw_dp.h
@@ -11,8 +11,15 @@
struct drm_encoder;
struct dw_dp;
+enum {
+ DW_DP_MP_SINGLE_PIXEL,
+ DW_DP_MP_DUAL_PIXEL,
+ DW_DP_MP_QUAD_PIXEL,
+};
+
struct dw_dp_plat_data {
u32 max_link_rate;
+ u8 pixel_mode;
};
struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h
index 3af12f82da2c..6ea9c561cfef 100644
--- a/include/drm/bridge/dw_hdmi_qp.h
+++ b/include/drm/bridge/dw_hdmi_qp.h
@@ -25,7 +25,7 @@ struct dw_hdmi_qp_plat_data {
int main_irq;
int cec_irq;
unsigned long ref_clk_rate;
- /* Supported output formats: bitmask of @hdmi_colorspace */
+ /* Supported output formats: bitmask of @drm_output_color_format */
unsigned int supported_formats;
/* Maximum bits per color channel: 8, 10 or 12 */
unsigned int max_bpc;
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
index 09145c9ee9fc..9c31ed90516b 100644
--- a/include/drm/display/drm_hdmi_helper.h
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -8,6 +8,7 @@
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
+enum drm_output_color_format;
void
drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
@@ -26,7 +27,7 @@ void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
unsigned long long
drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
- unsigned int bpc, enum hdmi_colorspace fmt);
+ unsigned int bpc, enum drm_output_color_format fmt);
void
drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 178f8f62c80f..f03cd199aee7 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -262,6 +262,19 @@ struct drm_private_state;
*/
struct drm_private_state_funcs {
/**
+ * @atomic_create_state:
+ *
+ * Allocates a pristine, initialized, state for the private
+ * object and returns it.
+ *
+ * RETURNS:
+ *
+ * A new, pristine, private state instance or an error pointer
+ * on failure.
+ */
+ struct drm_private_state *(*atomic_create_state)(struct drm_private_obj *obj);
+
+ /**
* @atomic_duplicate_state:
*
* Duplicate the current state of the private object and return it. It
@@ -723,10 +736,9 @@ struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
-void drm_atomic_private_obj_init(struct drm_device *dev,
- struct drm_private_obj *obj,
- struct drm_private_state *state,
- const struct drm_private_state_funcs *funcs);
+int drm_atomic_private_obj_init(struct drm_device *dev,
+ struct drm_private_obj *obj,
+ const struct drm_private_state_funcs *funcs);
void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
struct drm_private_state * __must_check
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index b9740edb2658..900672c6ea90 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -84,6 +84,9 @@ void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
+
+void __drm_atomic_helper_private_obj_create_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state);
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 88bdfec3bd88..c7e888767c81 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -31,8 +31,9 @@
#define DRM_MODE_BLEND_COVERAGE 1
#define DRM_MODE_BLEND_PIXEL_NONE 2
-struct drm_device;
struct drm_atomic_state;
+struct drm_crtc;
+struct drm_device;
struct drm_plane;
static inline bool drm_rotation_90_or_270(unsigned int rotation)
@@ -58,4 +59,5 @@ int drm_atomic_normalize_zpos(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_plane_create_blend_mode_property(struct drm_plane *plane,
unsigned int supported_modes);
+void drm_crtc_attach_background_color_property(struct drm_crtc *crtc);
#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 4f19f7064ee3..a8d67bd9ee50 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -1188,8 +1188,9 @@ struct drm_bridge {
const char *product;
/**
- * @supported_formats: Bitmask of @hdmi_colorspace listing supported
- * output formats. This is only relevant if @DRM_BRIDGE_OP_HDMI is set.
+ * @supported_formats: Bitmask of @drm_output_color_format listing
+ * supported output formats. This is only relevant if
+ * @DRM_BRIDGE_OP_HDMI is set.
*/
unsigned int supported_formats;
@@ -1290,6 +1291,7 @@ void drm_bridge_unplug(struct drm_bridge *bridge);
struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
void drm_bridge_put(struct drm_bridge *bridge);
+void drm_bridge_clear_and_put(struct drm_bridge **bridge_pp);
/* Cleanup action for use with __free() */
DEFINE_FREE(drm_bridge_put, struct drm_bridge *, if (_T) drm_bridge_put(_T))
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index b909fa8f810a..3054369bebff 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -6,166 +6,13 @@
#ifndef __DRM_BUDDY_H__
#define __DRM_BUDDY_H__
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/rbtree.h>
+#include <linux/gpu_buddy.h>
struct drm_printer;
-#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
-#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
-#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
-#define DRM_BUDDY_CLEARED BIT(4)
-#define DRM_BUDDY_TRIM_DISABLE BIT(5)
-
-struct drm_buddy_block {
-#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
-#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
-#define DRM_BUDDY_ALLOCATED (1 << 10)
-#define DRM_BUDDY_FREE (2 << 10)
-#define DRM_BUDDY_SPLIT (3 << 10)
-#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9)
-/* Free to be used, if needed in the future */
-#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6)
-#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
- u64 header;
-
- struct drm_buddy_block *left;
- struct drm_buddy_block *right;
- struct drm_buddy_block *parent;
-
- void *private; /* owned by creator */
-
- /*
- * While the block is allocated by the user through drm_buddy_alloc*,
- * the user has ownership of the link, for example to maintain within
- * a list, if so desired. As soon as the block is freed with
- * drm_buddy_free* ownership is given back to the mm.
- */
- union {
- struct rb_node rb;
- struct list_head link;
- };
-
- struct list_head tmp_link;
-};
-
-/* Order-zero must be at least SZ_4K */
-#define DRM_BUDDY_MAX_ORDER (63 - 12)
-
-/*
- * Binary Buddy System.
- *
- * Locking should be handled by the user, a simple mutex around
- * drm_buddy_alloc* and drm_buddy_free* should suffice.
- */
-struct drm_buddy {
- /* Maintain a free list for each order. */
- struct rb_root **free_trees;
-
- /*
- * Maintain explicit binary tree(s) to track the allocation of the
- * address space. This gives us a simple way of finding a buddy block
- * and performing the potentially recursive merge step when freeing a
- * block. Nodes are either allocated or free, in which case they will
- * also exist on the respective free list.
- */
- struct drm_buddy_block **roots;
-
- /*
- * Anything from here is public, and remains static for the lifetime of
- * the mm. Everything above is considered do-not-touch.
- */
- unsigned int n_roots;
- unsigned int max_order;
-
- /* Must be at least SZ_4K */
- u64 chunk_size;
- u64 size;
- u64 avail;
- u64 clear_avail;
-};
-
-static inline u64
-drm_buddy_block_offset(const struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_OFFSET;
-}
-
-static inline unsigned int
-drm_buddy_block_order(struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_ORDER;
-}
-
-static inline unsigned int
-drm_buddy_block_state(struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_STATE;
-}
-
-static inline bool
-drm_buddy_block_is_allocated(struct drm_buddy_block *block)
-{
- return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED;
-}
-
-static inline bool
-drm_buddy_block_is_clear(struct drm_buddy_block *block)
-{
- return block->header & DRM_BUDDY_HEADER_CLEAR;
-}
-
-static inline bool
-drm_buddy_block_is_free(struct drm_buddy_block *block)
-{
- return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
-}
-
-static inline bool
-drm_buddy_block_is_split(struct drm_buddy_block *block)
-{
- return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT;
-}
-
-static inline u64
-drm_buddy_block_size(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- return mm->chunk_size << drm_buddy_block_order(block);
-}
-
-int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
-
-void drm_buddy_fini(struct drm_buddy *mm);
-
-struct drm_buddy_block *
-drm_get_buddy(struct drm_buddy_block *block);
-
-int drm_buddy_alloc_blocks(struct drm_buddy *mm,
- u64 start, u64 end, u64 size,
- u64 min_page_size,
- struct list_head *blocks,
- unsigned long flags);
-
-int drm_buddy_block_trim(struct drm_buddy *mm,
- u64 *start,
- u64 new_size,
- struct list_head *blocks);
-
-void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
-
-void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
-
-void drm_buddy_free_list(struct drm_buddy *mm,
- struct list_head *objects,
- unsigned int flags);
-
-void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
-void drm_buddy_block_print(struct drm_buddy *mm,
- struct drm_buddy_block *block,
+/* DRM-specific GPU Buddy Allocator print helpers */
+void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p);
+void drm_buddy_block_print(struct gpu_buddy *mm,
+ struct gpu_buddy_block *block,
struct drm_printer *p);
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index c972a8a3385b..49a21f3dcb36 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -196,6 +196,9 @@ struct drm_client_buffer {
};
struct drm_client_buffer *
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 handle, u32 pitch);
+struct drm_client_buffer *
drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_buffer_delete(struct drm_client_buffer *buffer);
int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h
index a3a32f9f918c..bd082854ca74 100644
--- a/include/drm/drm_colorop.h
+++ b/include/drm/drm_colorop.h
@@ -188,6 +188,19 @@ struct drm_colorop_state {
};
/**
+ * struct drm_colorop_funcs - driver colorop control functions
+ */
+struct drm_colorop_funcs {
+ /**
+ * @destroy:
+ *
+ * Clean up colorop resources. This is called at driver unload time
+ * through drm_mode_config_cleanup()
+ */
+ void (*destroy)(struct drm_colorop *colorop);
+};
+
+/**
* struct drm_colorop - DRM color operation control structure
*
* A colorop represents one color operation. They can be chained via
@@ -362,6 +375,8 @@ struct drm_colorop {
*/
struct drm_property *next_property;
+ /** @funcs: colorop control functions */
+ const struct drm_colorop_funcs *funcs;
};
#define obj_to_colorop(x) container_of(x, struct drm_colorop, base)
@@ -390,17 +405,22 @@ void drm_colorop_pipeline_destroy(struct drm_device *dev);
void drm_colorop_cleanup(struct drm_colorop *colorop);
int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, u64 supported_tfs, uint32_t flags);
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
+ u64 supported_tfs, uint32_t flags);
int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, uint32_t lut_size,
+ struct drm_plane *plane,
+ const struct drm_colorop_funcs *funcs,
+ uint32_t lut_size,
enum drm_colorop_lut1d_interpolation_type interpolation,
uint32_t flags);
int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, uint32_t flags);
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
+ uint32_t flags);
int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane, uint32_t flags);
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
+ uint32_t flags);
int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
- struct drm_plane *plane,
+ struct drm_plane *plane, const struct drm_colorop_funcs *funcs,
uint32_t lut_size,
enum drm_colorop_lut3d_interpolation_type interpolation,
uint32_t flags);
@@ -420,6 +440,8 @@ void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
*/
void drm_colorop_reset(struct drm_colorop *colorop);
+void drm_colorop_destroy(struct drm_colorop *colorop);
+
/**
* drm_colorop_index - find the index of a registered colorop
* @colorop: colorop to find index for
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 7eaec37ae1c7..af8b92d2d5b7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -402,8 +402,6 @@ enum drm_hdmi_broadcast_rgb {
const char *
drm_hdmi_connector_get_broadcast_rgb_name(enum drm_hdmi_broadcast_rgb broadcast_rgb);
-const char *
-drm_hdmi_connector_get_output_format_name(enum hdmi_colorspace fmt);
/**
* struct drm_monitor_range_info - Panel's Monitor range in EDID for
@@ -557,6 +555,34 @@ enum drm_colorspace {
};
/**
+ * enum drm_output_color_format - Output Color Format
+ *
+ * This enum is a consolidated color format list supported by
+ * connectors. It's only ever really been used for HDMI and DP so far,
+ * so it's not exhaustive and can be extended to represent other formats
+ * in the future.
+ *
+ *
+ * @DRM_OUTPUT_COLOR_FORMAT_RGB444:
+ * RGB output format
+ * @DRM_OUTPUT_COLOR_FORMAT_YCBCR444:
+ * YCbCr 4:4:4 output format (ie. not subsampled)
+ * @DRM_OUTPUT_COLOR_FORMAT_YCBCR422:
+ * YCbCr 4:2:2 output format (ie. with horizontal subsampling)
+ * @DRM_OUTPUT_COLOR_FORMAT_YCBCR420:
+ * YCbCr 4:2:0 output format (ie. with horizontal and vertical subsampling)
+ */
+enum drm_output_color_format {
+ DRM_OUTPUT_COLOR_FORMAT_RGB444 = 0,
+ DRM_OUTPUT_COLOR_FORMAT_YCBCR444,
+ DRM_OUTPUT_COLOR_FORMAT_YCBCR422,
+ DRM_OUTPUT_COLOR_FORMAT_YCBCR420,
+};
+
+const char *
+drm_hdmi_connector_get_output_format_name(enum drm_output_color_format fmt);
+
+/**
* enum drm_bus_flags - bus_flags info for &drm_display_info
*
* This enum defines signal polarities and clock edge information for signals on
@@ -699,11 +725,6 @@ struct drm_display_info {
*/
enum subpixel_order subpixel_order;
-#define DRM_COLOR_FORMAT_RGB444 (1<<0)
-#define DRM_COLOR_FORMAT_YCBCR444 (1<<1)
-#define DRM_COLOR_FORMAT_YCBCR422 (1<<2)
-#define DRM_COLOR_FORMAT_YCBCR420 (1<<3)
-
/**
* @panel_orientation: Read only connector property for built-in panels,
* indicating the orientation of the panel vs the device's casing.
@@ -714,10 +735,11 @@ struct drm_display_info {
int panel_orientation;
/**
- * @color_formats: HDMI Color formats, selects between RGB and YCrCb
- * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
- * as used to describe the pixel format in framebuffers, and also don't
- * match the formats in @bus_formats which are shared with v4l.
+ * @color_formats: HDMI Color formats, selects between RGB and
+ * YCbCr modes. Uses a bitmask of DRM_OUTPUT_COLOR_FORMAT\_
+ * defines, which are _not_ the same ones as used to describe
+ * the pixel format in framebuffers, and also don't match the
+ * formats in @bus_formats which are shared with v4l.
*/
u32 color_formats;
@@ -991,7 +1013,7 @@ struct drm_connector_hdmi_state {
/**
* @output_format: Pixel format to output in.
*/
- enum hdmi_colorspace output_format;
+ enum drm_output_color_format output_format;
/**
* @tmds_char_rate: TMDS Character Rate, in Hz.
@@ -1879,7 +1901,7 @@ struct drm_connector_hdmi {
unsigned char product[DRM_CONNECTOR_HDMI_PRODUCT_LEN] __nonstring;
/**
- * @supported_formats: Bitmask of @hdmi_colorspace
+ * @supported_formats: Bitmask of @drm_output_color_format
* supported by the controller.
*/
unsigned long supported_formats;
@@ -2493,6 +2515,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+void drm_connector_attach_panel_type_property(struct drm_connector *connector);
int drm_connector_attach_broadcast_rgb_property(struct drm_connector *connector);
int drm_connector_attach_colorspace_property(struct drm_connector *connector);
int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 66278ffeebd6..312fc1e745d2 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -275,6 +275,18 @@ struct drm_crtc_state {
struct drm_property_blob *gamma_lut;
/**
+ * @background_color:
+ *
+ * RGB value representing the CRTC's background color. The background
+ * color (aka "canvas color") of a CRTC is the color that will be used
+ * for pixels not covered by a plane, or covered by transparent pixels
+ * of a plane. The value here should be built using DRM_ARGB64_PREP*()
+ * helpers, while the individual color components can be extracted with
+ * desired precision via the DRM_ARGB64_GET*() macros.
+ */
+ u64 background_color;
+
+ /**
* @target_vblank:
*
* Target vertical blank period when a page flip
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 05cca77b7249..bf391903443d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,111 +271,8 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
-#else
-static inline void drm_fb_helper_prepare(struct drm_device *dev,
- struct drm_fb_helper *helper,
- unsigned int preferred_bpp,
- const struct drm_fb_helper_funcs *funcs)
-{
-}
-
-static inline void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
-{
-}
-
-static inline int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper)
-{
- /* So drivers can use it to free the struct */
- helper->dev = dev;
- dev->fb_helper = helper;
-
- return 0;
-}
-
-static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
-{
- if (helper && helper->dev)
- helper->dev->fb_helper = NULL;
-}
-
-static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_set_par(struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
-{
-}
-
-static inline void
-drm_fb_helper_fill_info(struct fb_info *info,
- struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
-}
-
-static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
- struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- return 0;
-}
-
-#ifdef CONFIG_FB_DEFERRED_IO
-static inline void drm_fb_helper_deferred_io(struct fb_info *info,
- struct list_head *pagelist)
-{
-}
-#endif
-
-static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
- bool suspend)
-{
-}
-
-static inline void
-drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend)
-{
-}
-
-static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
+bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper,
+ const struct drm_gem_object *obj);
#endif
#endif
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index f45f9612c0bc..07374eb5d88e 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -9,8 +9,12 @@
#define __LINUX_MIPI_DBI_H
#include <linux/mutex.h>
+
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
struct drm_format_conv_state;
struct drm_rect;
@@ -87,16 +91,6 @@ struct mipi_dbi_dev {
struct drm_device drm;
/**
- * @pipe: Display pipe structure
- */
- struct drm_simple_display_pipe pipe;
-
- /**
- * @connector: Connector
- */
- struct drm_connector connector;
-
- /**
* @mode: Fixed display mode
*/
struct drm_display_mode mode;
@@ -164,30 +158,9 @@ static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm)
int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
struct gpio_desc *dc);
-int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
- const struct drm_simple_display_pipe_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation, size_t tx_buf_size);
-int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
- const struct drm_simple_display_pipe_funcs *funcs,
- const struct drm_display_mode *mode, unsigned int rotation);
-enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode);
-void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
-void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plan_state);
-void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
-int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
-void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
-void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe);
-struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe);
-void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
+
+int drm_mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, const struct drm_display_mode *mode,
+ u32 format, unsigned int rotation, size_t tx_buf_size);
void mipi_dbi_hw_reset(struct mipi_dbi *dbi);
bool mipi_dbi_display_is_on(struct mipi_dbi *dbi);
@@ -229,31 +202,91 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
ret; \
})
+/*
+ * Plane
+ */
+
+#define DRM_MIPI_DBI_PLANE_FORMATS \
+ DRM_FORMAT_RGB565, \
+ DRM_FORMAT_XRGB8888
+
+#define DRM_MIPI_DBI_PLANE_FORMAT_MODIFIERS \
+ DRM_FORMAT_MOD_LINEAR, \
+ DRM_FORMAT_MOD_INVALID
+
+#define DRM_MIPI_DBI_PLANE_FUNCS \
+ DRM_GEM_SHADOW_PLANE_FUNCS, \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane
+
+int drm_mipi_dbi_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void drm_mipi_dbi_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+
+#define DRM_MIPI_DBI_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = drm_mipi_dbi_plane_helper_atomic_check, \
+ .atomic_update = drm_mipi_dbi_plane_helper_atomic_update
+
+/*
+ * CRTC
+ */
+
+#define DRM_MIPI_DBI_CRTC_FUNCS \
+ .reset = drm_atomic_helper_crtc_reset, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state
+
+enum drm_mode_status drm_mipi_dbi_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int drm_mipi_dbi_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_mipi_dbi_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+
+#define DRM_MIPI_DBI_CRTC_HELPER_FUNCS \
+ .mode_valid = drm_mipi_dbi_crtc_helper_mode_valid, \
+ .atomic_check = drm_mipi_dbi_crtc_helper_atomic_check, \
+ .atomic_disable = drm_mipi_dbi_crtc_helper_atomic_disable
+
+/*
+ * Connector
+ */
+
+#define DRM_MIPI_DBI_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+int drm_mipi_dbi_connector_helper_get_modes(struct drm_connector *connector);
+
+#define DRM_MIPI_DBI_CONNECTOR_HELPER_FUNCS \
+ .get_modes = drm_mipi_dbi_connector_helper_get_modes
+
+/*
+ * Mode config
+ */
+
+#define DRM_MIPI_DBI_MODE_CONFIG_FUNCS \
+ .fb_create = drm_gem_fb_create_with_dirty, \
+ .atomic_check = drm_atomic_helper_check, \
+ .atomic_commit = drm_atomic_helper_commit
+
+#define DRM_MIPI_DBI_MODE_CONFIG_HELPER_FUNCS \
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm
+
+/*
+ * Debug FS
+ */
+
#ifdef CONFIG_DEBUG_FS
void mipi_dbi_debugfs_init(struct drm_minor *minor);
#else
static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
-/**
- * DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS - Initializes struct drm_simple_display_pipe_funcs
- * for MIPI-DBI devices
- * @enable_: Enable-callback implementation
- *
- * This macro initializes struct drm_simple_display_pipe_funcs with default
- * values for MIPI-DBI-based devices. The only callback that depends on the
- * hardware is @enable, for which the driver has to provide an implementation.
- * MIPI-based drivers are encouraged to use this macro for initialization.
- */
-#define DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(enable_) \
- .mode_valid = mipi_dbi_pipe_mode_valid, \
- .enable = (enable_), \
- .disable = mipi_dbi_pipe_disable, \
- .update = mipi_dbi_pipe_update, \
- .begin_fb_access = mipi_dbi_pipe_begin_fb_access, \
- .end_fb_access = mipi_dbi_pipe_end_fb_access, \
- .reset_plane = mipi_dbi_pipe_reset_plane, \
- .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state, \
- .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state
-
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 3aba7b380c8d..2ab651a36115 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -144,6 +144,7 @@ enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB666,
MIPI_DSI_FMT_RGB666_PACKED,
MIPI_DSI_FMT_RGB565,
+ MIPI_DSI_FMT_RGB101010,
};
#define DSI_DEV_NAME_SIZE 20
@@ -235,6 +236,9 @@ extern const struct bus_type mipi_dsi_bus_type;
static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
{
switch (fmt) {
+ case MIPI_DSI_FMT_RGB101010:
+ return 30;
+
case MIPI_DSI_FMT_RGB888:
case MIPI_DSI_FMT_RGB666:
return 24;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 895fb820dba0..687c0ee163d2 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -601,6 +601,10 @@ struct drm_mode_config {
*/
struct drm_property *tile_property;
/**
+ * @panel_type_property: Default connector property for panel type
+ */
+ struct drm_property *panel_type_property;
+ /**
* @link_status_property: Default connector property for link status
* of a connector
*/
@@ -832,6 +836,11 @@ struct drm_mode_config {
* gamma LUT as supported by the driver (read-only).
*/
struct drm_property *gamma_lut_size_property;
+ /**
+ * @background_color_property: Optional CRTC property to set the
+ * background color.
+ */
+ struct drm_property *background_color_property;
/**
* @suggested_x_property: Optional connector property with a hint for
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index c848f578e3da..75e6ca58922d 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -4,6 +4,7 @@
#include <linux/dma-direction.h>
#include <linux/hmm.h>
+#include <linux/memremap.h>
#include <linux/types.h>
#define NR_PAGES(order) (1U << (order))
@@ -367,6 +368,26 @@ void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim
int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
+/**
+ * drm_pagemap_page_zone_device_data() - Page to zone_device_data
+ * @page: Pointer to the page
+ *
+ * Return: Page's zone_device_data
+ */
+static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+
+ return folio_zone_device_data(folio);
+}
+
+#else
+
+static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page)
+{
+ return NULL;
+}
+
#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
#endif
diff --git a/include/drm/drm_ras.h b/include/drm/drm_ras.h
new file mode 100644
index 000000000000..5d50209e51db
--- /dev/null
+++ b/include/drm/drm_ras.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2026 Intel Corporation
+ */
+
+#ifndef __DRM_RAS_H__
+#define __DRM_RAS_H__
+
+#include <uapi/drm/drm_ras.h>
+
+/**
+ * struct drm_ras_node - A DRM RAS Node
+ */
+struct drm_ras_node {
+ /** @id: Unique identifier for the node. Dynamically assigned. */
+ u32 id;
+ /**
+ * @device_name: Human-readable name of the device. Given by the driver.
+ */
+ const char *device_name;
+ /** @node_name: Human-readable name of the node. Given by the driver. */
+ const char *node_name;
+ /** @type: Type of the node (enum drm_ras_node_type). */
+ enum drm_ras_node_type type;
+
+ /* Error-Counter Related Callback and Variables */
+
+ /** @error_counter_range: Range of valid Error IDs for this node. */
+ struct {
+ /** @first: First valid Error ID. */
+ u32 first;
+ /** @last: Last valid Error ID. Mandatory entry. */
+ u32 last;
+ } error_counter_range;
+
+ /**
+ * @query_error_counter:
+ *
+ * This callback is used by drm-ras to query a specific error counter.
+ * Used for input check and to iterate all error counters in a node.
+ *
+ * Driver should expect query_error_counter() to be called with
+ * error_id from `error_counter_range.first` to
+ * `error_counter_range.last`.
+ *
+ * The @query_error_counter is a mandatory callback for
+ * error_counter_node.
+ *
+ * Returns: 0 on success,
+ * -ENOENT when error_id is not supported as an indication that
+ * drm_ras should silently skip this entry. Used for
+ * supporting non-contiguous error ranges.
+ * Driver is responsible for maintaining the list of
+ * supported error IDs in the range of first to last.
+ * Other negative values on errors that should terminate the
+ * netlink query.
+ */
+ int (*query_error_counter)(struct drm_ras_node *node, u32 error_id,
+ const char **name, u32 *val);
+
+ /** @priv: Driver private data */
+ void *priv;
+};
+
+struct drm_device;
+
+#if IS_ENABLED(CONFIG_DRM_RAS)
+int drm_ras_node_register(struct drm_ras_node *node);
+void drm_ras_node_unregister(struct drm_ras_node *node);
+#else
+static inline int drm_ras_node_register(struct drm_ras_node *node) { return 0; }
+static inline void drm_ras_node_unregister(struct drm_ras_node *node) { }
+#endif
+
+#endif
diff --git a/include/drm/drm_ras_genl_family.h b/include/drm/drm_ras_genl_family.h
new file mode 100644
index 000000000000..910fb3943a75
--- /dev/null
+++ b/include/drm/drm_ras_genl_family.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2026 Intel Corporation
+ */
+
+#ifndef __DRM_RAS_GENL_FAMILY_H__
+#define __DRM_RAS_GENL_FAMILY_H__
+
+#if IS_ENABLED(CONFIG_DRM_RAS)
+int drm_ras_genl_family_register(void);
+void drm_ras_genl_family_unregister(void);
+#else
+static inline int drm_ras_genl_family_register(void) { return 0; }
+static inline void drm_ras_genl_family_unregister(void) { }
+#endif
+
+#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index b2486d073763..cb672ce0e856 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -3,6 +3,11 @@
* Copyright (C) 2016 Noralf Trønnes
*/
+/*
+ * Simple KMS helpers are deprected in favor of regular atomic helpers. Do not
+ * use the min new code.
+ */
+
#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
@@ -12,233 +17,38 @@
struct drm_simple_display_pipe;
-/**
- * struct drm_simple_display_pipe_funcs - helper operations for a simple
- * display pipeline
- */
struct drm_simple_display_pipe_funcs {
- /**
- * @mode_valid:
- *
- * This callback is used to check if a specific mode is valid in the
- * crtc used in this simple display pipe. This should be implemented
- * if the display pipe has some sort of restriction in the modes
- * it can display. For example, a given display pipe may be responsible
- * to set a clock value. If the clock can not produce all the values
- * for the available modes then this callback can be used to restrict
- * the number of modes to only the ones that can be displayed. Another
- * reason can be bandwidth mitigation: the memory port on the display
- * controller can have bandwidth limitations not allowing pixel data
- * to be fetched at any rate.
- *
- * This hook is used by the probe helpers to filter the mode list in
- * drm_helper_probe_single_connector_modes(), and it is used by the
- * atomic helpers to validate modes supplied by userspace in
- * drm_atomic_helper_check_modeset().
- *
- * This function is optional.
- *
- * NOTE:
- *
- * Since this function is both called from the check phase of an atomic
- * commit, and the mode validation in the probe paths it is not allowed
- * to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardware constraints.
- *
- * RETURNS:
- *
- * drm_mode_status Enum
- */
enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode);
-
- /**
- * @enable:
- *
- * This function should be used to enable the pipeline.
- * It is called when the underlying crtc is enabled.
- * This hook is optional.
- */
void (*enable)(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
- /**
- * @disable:
- *
- * This function should be used to disable the pipeline.
- * It is called when the underlying crtc is disabled.
- * This hook is optional.
- */
void (*disable)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @check:
- *
- * This function is called in the check phase of an atomic update,
- * specifically when the underlying plane is checked.
- * The simple display pipeline helpers already check that the plane is
- * not scaled, fills the entire visible area and is always enabled
- * when the crtc is also enabled.
- * This hook is optional.
- *
- * RETURNS:
- *
- * 0 on success, -EINVAL if the state or the transition can't be
- * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
- * attempt to obtain another state object ran into a &drm_modeset_lock
- * deadlock.
- */
int (*check)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state,
struct drm_crtc_state *crtc_state);
- /**
- * @update:
- *
- * This function is called when the underlying plane state is updated.
- * This hook is optional.
- *
- * This is the function drivers should submit the
- * &drm_pending_vblank_event from. Using either
- * drm_crtc_arm_vblank_event(), when the driver supports vblank
- * interrupt handling, or drm_crtc_send_vblank_event() for more
- * complex case. In case the hardware lacks vblank support entirely,
- * drivers can set &struct drm_crtc_state.no_vblank in
- * &struct drm_simple_display_pipe_funcs.check and let DRM's
- * atomic helper fake a vblank event.
- */
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state);
-
- /**
- * @prepare_fb:
- *
- * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read
- * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
- * more details.
- *
- * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
- * set, drm_gem_plane_helper_prepare_fb() is called automatically
- * to implement this. Other drivers which need additional plane
- * processing can call drm_gem_plane_helper_prepare_fb() from
- * their @prepare_fb hook.
- */
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
-
- /**
- * @cleanup_fb:
- *
- * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read
- * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for
- * more details.
- */
void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
-
- /**
- * @begin_fb_access:
- *
- * Optional, called by &drm_plane_helper_funcs.begin_fb_access. Please read
- * the documentation for the &drm_plane_helper_funcs.begin_fb_access hook for
- * more details.
- */
int (*begin_fb_access)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_plane_state);
-
- /**
- * @end_fb_access:
- *
- * Optional, called by &drm_plane_helper_funcs.end_fb_access. Please read
- * the documentation for the &drm_plane_helper_funcs.end_fb_access hook for
- * more details.
- */
void (*end_fb_access)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
-
- /**
- * @enable_vblank:
- *
- * Optional, called by &drm_crtc_funcs.enable_vblank. Please read
- * the documentation for the &drm_crtc_funcs.enable_vblank hook for
- * more details.
- */
int (*enable_vblank)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @disable_vblank:
- *
- * Optional, called by &drm_crtc_funcs.disable_vblank. Please read
- * the documentation for the &drm_crtc_funcs.disable_vblank hook for
- * more details.
- */
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @reset_crtc:
- *
- * Optional, called by &drm_crtc_funcs.reset. Please read the
- * documentation for the &drm_crtc_funcs.reset hook for more details.
- */
void (*reset_crtc)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @duplicate_crtc_state:
- *
- * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please
- * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state
- * hook for more details.
- */
struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @destroy_crtc_state:
- *
- * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please
- * read the documentation for the &drm_crtc_funcs.atomic_destroy_state
- * hook for more details.
- */
void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state);
-
- /**
- * @reset_plane:
- *
- * Optional, called by &drm_plane_funcs.reset. Please read the
- * documentation for the &drm_plane_funcs.reset hook for more details.
- */
void (*reset_plane)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @duplicate_plane_state:
- *
- * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please
- * read the documentation for the &drm_plane_funcs.atomic_duplicate_state
- * hook for more details.
- */
struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe);
-
- /**
- * @destroy_plane_state:
- *
- * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please
- * read the documentation for the &drm_plane_funcs.atomic_destroy_state
- * hook for more details.
- */
void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
};
-/**
- * struct drm_simple_display_pipe - simple display pipeline
- * @crtc: CRTC control structure
- * @plane: Plane control structure
- * @encoder: Encoder control structure
- * @connector: Connector control structure
- * @funcs: Pipeline control functions (optional)
- *
- * Simple display pipeline with plane, crtc and encoder collapsed into one
- * entity. It should be initialized by calling drm_simple_display_pipe_init().
- */
struct drm_simple_display_pipe {
struct drm_crtc crtc;
struct drm_plane plane;
@@ -265,22 +75,6 @@ int drm_simple_encoder_init(struct drm_device *dev,
void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
size_t offset, int encoder_type);
-/**
- * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic
- * functionality.
- * @dev: drm device
- * @type: the type of the struct which contains struct &drm_encoder
- * @member: the name of the &drm_encoder within @type.
- * @encoder_type: user visible type of the encoder
- *
- * Allocates and initializes an encoder that has no further functionality.
- * Settings for possible CRTC and clones are left to their initial values.
- * Cleanup is automatically handled through registering drm_encoder_cleanup()
- * with drmm_add_action().
- *
- * Returns:
- * Pointer to new encoder, or ERR_PTR on failure.
- */
#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \
((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \
offsetof(type, member), \
diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h
index 7ba72a81a808..29befdda35d2 100644
--- a/include/drm/drm_suballoc.h
+++ b/include/drm/drm_suballoc.h
@@ -53,6 +53,12 @@ void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager);
+struct drm_suballoc *drm_suballoc_alloc(gfp_t gfp);
+
+int drm_suballoc_insert(struct drm_suballoc_manager *sa_manager,
+ struct drm_suballoc *sa, size_t size, bool intr,
+ size_t align);
+
struct drm_suballoc *
drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
gfp_t gfp, bool intr, size_t align);
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
index ce946859a3a9..97ec94a2e749 100644
--- a/include/drm/intel/display_parent_interface.h
+++ b/include/drm/intel/display_parent_interface.h
@@ -9,19 +9,66 @@
struct dma_fence;
struct drm_crtc;
struct drm_device;
+struct drm_file;
struct drm_framebuffer;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct drm_plane_state;
struct drm_scanout_buffer;
struct i915_vma;
+struct intel_dpt;
+struct intel_dsb_buffer;
+struct intel_frontbuffer;
struct intel_hdcp_gsc_context;
struct intel_initial_plane_config;
struct intel_panic;
struct intel_stolen_node;
struct ref_tracker;
+struct seq_file;
+struct vm_area_struct;
/* Keep struct definitions sorted */
+struct intel_display_bo_interface {
+ bool (*is_tiled)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_userptr)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_shmem)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_protected)(struct drm_gem_object *obj);
+ int (*key_check)(struct drm_gem_object *obj);
+ int (*fb_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+ int (*read_from_page)(struct drm_gem_object *obj, u64 offset, void *dst, int size);
+ void (*describe)(struct seq_file *m, struct drm_gem_object *obj); /* Optional */
+ int (*framebuffer_init)(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*framebuffer_fini)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*framebuffer_lookup)(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+};
+
+struct intel_display_dpt_interface {
+ struct intel_dpt *(*create)(struct drm_gem_object *obj, size_t size);
+ void (*destroy)(struct intel_dpt *dpt);
+ void (*suspend)(struct intel_dpt *dpt);
+ void (*resume)(struct intel_dpt *dpt);
+};
+
+struct intel_display_dsb_interface {
+ u32 (*ggtt_offset)(struct intel_dsb_buffer *dsb_buf);
+ void (*write)(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
+ u32 (*read)(struct intel_dsb_buffer *dsb_buf, u32 idx);
+ void (*fill)(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
+ struct intel_dsb_buffer *(*create)(struct drm_device *drm, size_t size);
+ void (*cleanup)(struct intel_dsb_buffer *dsb_buf);
+ void (*flush_map)(struct intel_dsb_buffer *dsb_buf);
+};
+
+struct intel_display_frontbuffer_interface {
+ struct intel_frontbuffer *(*get)(struct drm_gem_object *obj);
+ void (*ref)(struct intel_frontbuffer *front);
+ void (*put)(struct intel_frontbuffer *front);
+ void (*flush_for_display)(struct intel_frontbuffer *front);
+};
+
struct intel_display_hdcp_interface {
ssize_t (*gsc_msg_send)(struct intel_hdcp_gsc_context *gsc_context,
void *msg_in, size_t msg_in_len,
@@ -44,6 +91,35 @@ struct intel_display_irq_interface {
void (*synchronize)(struct drm_device *drm);
};
+struct intel_display_overlay_interface {
+ bool (*is_active)(struct drm_device *drm);
+
+ int (*overlay_on)(struct drm_device *drm,
+ u32 frontbuffer_bits);
+ int (*overlay_continue)(struct drm_device *drm,
+ struct i915_vma *vma,
+ bool load_polyphase_filter);
+ int (*overlay_off)(struct drm_device *drm);
+ int (*recover_from_interrupt)(struct drm_device *drm);
+ int (*release_old_vid)(struct drm_device *drm);
+
+ void (*reset)(struct drm_device *drm);
+
+ struct i915_vma *(*pin_fb)(struct drm_device *drm,
+ struct drm_gem_object *obj,
+ u32 *offset);
+ void (*unpin_fb)(struct drm_device *drm,
+ struct i915_vma *vma);
+
+ struct drm_gem_object *(*obj_lookup)(struct drm_device *drm,
+ struct drm_file *filp,
+ u32 handle);
+
+ void __iomem *(*setup)(struct drm_device *drm,
+ bool needs_physical);
+ void (*cleanup)(struct drm_device *drm);
+};
+
struct intel_display_panic_interface {
struct intel_panic *(*alloc)(void);
int (*setup)(struct intel_panic *panic, struct drm_scanout_buffer *sb);
@@ -55,6 +131,13 @@ struct intel_display_pc8_interface {
void (*unblock)(struct drm_device *drm);
};
+struct intel_display_pcode_interface {
+ int (*read)(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+ int (*write)(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+ int (*request)(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+};
+
struct intel_display_rpm_interface {
struct ref_tracker *(*get)(const struct drm_device *drm);
struct ref_tracker *(*get_raw)(const struct drm_device *drm);
@@ -93,6 +176,10 @@ struct intel_display_stolen_interface {
void (*node_free)(const struct intel_stolen_node *node);
};
+struct intel_display_vma_interface {
+ int (*fence_id)(const struct i915_vma *vma);
+};
+
/**
* struct intel_display_parent_interface - services parent driver provides to display
*
@@ -106,6 +193,18 @@ struct intel_display_stolen_interface {
* check the optional pointers.
*/
struct intel_display_parent_interface {
+ /** @bo: BO interface */
+ const struct intel_display_bo_interface *bo;
+
+ /** @dpt: DPT interface. Optional. */
+ const struct intel_display_dpt_interface *dpt;
+
+ /** @dsb: DSB buffer interface */
+ const struct intel_display_dsb_interface *dsb;
+
+ /** @frontbuffer: Frontbuffer interface */
+ const struct intel_display_frontbuffer_interface *frontbuffer;
+
/** @hdcp: HDCP GSC interface */
const struct intel_display_hdcp_interface *hdcp;
@@ -118,9 +217,15 @@ struct intel_display_parent_interface {
/** @panic: Panic interface */
const struct intel_display_panic_interface *panic;
+ /** @overlay: Overlay. Optional. */
+ const struct intel_display_overlay_interface *overlay;
+
/** @pc8: PC8 interface. Optional. */
const struct intel_display_pc8_interface *pc8;
+ /** @pcode: Pcode interface */
+ const struct intel_display_pcode_interface *pcode;
+
/** @rpm: Runtime PM functions */
const struct intel_display_rpm_interface *rpm;
@@ -130,6 +235,9 @@ struct intel_display_parent_interface {
/** @stolen: Stolen memory. */
const struct intel_display_stolen_interface *stolen;
+ /** @vma: VMA interface. Optional. */
+ const struct intel_display_vma_interface *vma;
+
/* Generic independent functions */
struct {
/** @fence_priority_display: Set display priority. Optional. */
diff --git a/include/drm/intel/i915_drm.h b/include/drm/intel/i915_drm.h
index adff68538484..1fdaabed1470 100644
--- a/include/drm/intel/i915_drm.h
+++ b/include/drm/intel/i915_drm.h
@@ -39,46 +39,46 @@ bool i915_gpu_turbo_disable(void);
extern struct resource intel_graphics_stolen_res;
/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga arbiter.
+ * The bridge device's (device 0) PCI config space has information
+ * about the fb aperture size and the amount of pre-reserved memory.
*/
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+
+/* device 2 has a read-only mirror */
#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-#define BDW_GMCH_GGMS_SHIFT 6
-#define BDW_GMCH_GGMS_MASK 0x3
-#define BDW_GMCH_GMS_SHIFT 8
-#define BDW_GMCH_GMS_MASK 0xff
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define BDW_GMCH_GGMS_SHIFT 6
+#define BDW_GMCH_GGMS_MASK 0x3
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+/* device 2 has a read-only mirror from i85x/i865 onwards */
#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_GMS_MASK (0x7 << 4)
+#define I830_GMCH_GMS_LOCAL (0x1 << 4)
+#define I830_GMCH_GMS_STOLEN_512 (0x2 << 4)
+#define I830_GMCH_GMS_STOLEN_1024 (0x3 << 4)
+#define I830_GMCH_GMS_STOLEN_8192 (0x4 << 4)
+#define I855_GMCH_GMS_MASK (0xF << 4)
+#define I855_GMCH_GMS_STOLEN_0M (0x0 << 4)
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-
-#define I855_GMCH_GMS_MASK 0xF0
-#define I855_GMCH_GMS_STOLEN_0M 0x0
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+/* valid for both I830_GMCH_CTRL and SNB_GMCH_CTRL */
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define I830_DRB3 0x63
#define I85X_DRB3 0x43
@@ -87,12 +87,12 @@ extern struct resource intel_graphics_stolen_res;
#define I830_ESMRAMC 0x91
#define I845_ESMRAMC 0x9e
#define I85X_ESMRAMC 0x61
-#define TSEG_ENABLE (1 << 0)
-#define I830_TSEG_SIZE_512K (0 << 1)
-#define I830_TSEG_SIZE_1M (1 << 1)
-#define I845_TSEG_SIZE_MASK (3 << 1)
-#define I845_TSEG_SIZE_512K (2 << 1)
-#define I845_TSEG_SIZE_1M (3 << 1)
+#define TSEG_ENABLE (1 << 0)
+#define I830_TSEG_SIZE_512K (0 << 1)
+#define I830_TSEG_SIZE_1M (1 << 1)
+#define I845_TSEG_SIZE_MASK (3 << 1)
+#define I845_TSEG_SIZE_512K (2 << 1)
+#define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
#define INTEL_GEN11_BSM_DW0 0xc0
diff --git a/include/drm/intel/intel_gmd_interrupt_regs.h b/include/drm/intel/intel_gmd_interrupt_regs.h
new file mode 100644
index 000000000000..ce66c4151e76
--- /dev/null
+++ b/include/drm/intel/intel_gmd_interrupt_regs.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_GMD_INTERRUPT_REGS_H_
+#define _INTEL_GMD_INTERRUPT_REGS_H_
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
+
+#define GEN8_MASTER_IRQ _MMIO(0x44200)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
+#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
+
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
+#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \
+ GEN11_GU_MISC_IER, \
+ GEN11_GU_MISC_IIR)
+
+#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
+#define GEN11_MASTER_IRQ (1 << 31)
+#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
+#define GEN11_DISPLAY_IRQ (1 << 16)
+#define GEN11_GT_DW_IRQ(x) (1 << (x))
+#define GEN11_GT_DW1_IRQ (1 << 1)
+#define GEN11_GT_DW0_IRQ (1 << 0)
+
+#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define SCPD_FBC_IGNORE_3D (1 << 6)
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
+
+#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
+#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
+#endif
diff --git a/include/drm/intel/intel_gmd_misc_regs.h b/include/drm/intel/intel_gmd_misc_regs.h
new file mode 100644
index 000000000000..763d7711f21c
--- /dev/null
+++ b/include/drm/intel/intel_gmd_misc_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_GMD_MISC_REGS_H_
+#define _INTEL_GMD_MISC_REGS_H_
+
+#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
+#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
+#define DISP_FBC_WM_DIS REG_BIT(15)
+
+#define INSTPM _MMIO(0x20c0)
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
+
+#endif
diff --git a/include/drm/intel/intel_pcode_regs.h b/include/drm/intel/intel_pcode_regs.h
new file mode 100644
index 000000000000..db989ee7c488
--- /dev/null
+++ b/include/drm/intel/intel_pcode_regs.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_PCODE_REGS_H_
+#define _INTEL_PCODE_REGS_H_
+
+#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
+#define GEN6_PCODE_READY (1 << 31)
+#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
+#define GEN6_PCODE_ERROR_MASK 0xFF
+#define GEN6_PCODE_SUCCESS 0x0
+#define GEN6_PCODE_ILLEGAL_CMD 0x1
+#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define GEN6_PCODE_TIMEOUT 0x3
+#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
+#define GEN7_PCODE_TIMEOUT 0x2
+#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define GEN11_PCODE_LOCKED 0x6
+#define GEN11_PCODE_REJECTED 0x11
+#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
+#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
+#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
+#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
+#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
+#define SKL_PCODE_CDCLK_CONTROL 0x7
+#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
+#define SKL_CDCLK_READY_FOR_CHANGE 0x1
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_READ_OC_PARAMS 0xc
+#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
+#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
+#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
+#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
+#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D
+#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0)
+#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK
+#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31)
+#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28)
+#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x))
+#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x))
+#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x))
+#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \
+ ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \
+ (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \
+ (DISPLAY_TO_PCODE_VOLTAGE(voltage_level)))
+#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
+#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
+#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
+#define ICL_PCODE_REP_QGV_POLL REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 1)
+#define ICL_PCODE_REP_QGV_REJECTED REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 2)
+#define ADLS_PCODE_REP_PSF_MASK REG_GENMASK(3, 2)
+#define ADLS_PCODE_REP_PSF_SAFE REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 0)
+#define ADLS_PCODE_REP_PSF_POLL REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 1)
+#define ADLS_PCODE_REP_PSF_REJECTED REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 2)
+#define ICL_PCODE_REQ_QGV_PT_MASK REG_GENMASK(7, 0)
+#define ICL_PCODE_REQ_QGV_PT(x) REG_FIELD_PREP(ICL_PCODE_REQ_QGV_PT_MASK, (x))
+#define ADLS_PCODE_REQ_PSF_PT_MASK REG_GENMASK(10, 8)
+#define ADLS_PCODE_REQ_PSF_PT(x) REG_FIELD_PREP(ADLS_PCODE_REQ_PSF_PT_MASK, (x))
+#define GEN6_PCODE_READ_D_COMP 0x10
+#define GEN6_PCODE_WRITE_D_COMP 0x11
+#define ICL_PCODE_EXIT_TCCOLD 0x12
+#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
+#define DISPLAY_IPS_CONTROL 0x19
+#define TGL_PCODE_TCCOLD 0x26
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
+/* See also IPS_CTL */
+#define IPS_PCODE_CONTROL (1 << 30)
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+#define GEN9_PCODE_SAGV_CONTROL 0x21
+#define GEN9_SAGV_DISABLE 0x0
+#define GEN9_SAGV_IS_DISABLED 0x1
+#define GEN9_SAGV_ENABLE 0x3
+#define DG1_PCODE_STATUS 0x7E
+#define DG1_UNCORE_GET_INIT_STATUS 0x0
+#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
+#define PCODE_POWER_SETUP 0x7C
+#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
+#define POWER_SETUP_SUBCOMMAND_WRITE_I1 0x5
+#define POWER_SETUP_I1_WATTS REG_BIT(31)
+#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
+#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
+/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
+/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
+#define PCODE_MBOX_DOMAIN_NONE 0x0
+#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
+
+#endif
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 52520e684ab1..33b91cb2e684 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -900,4 +900,16 @@
#define INTEL_CRI_IDS(MACRO__, ...) \
MACRO__(0x674C, ## __VA_ARGS__)
+/* NVL-P */
+#define INTEL_NVLP_IDS(MACRO__, ...) \
+ MACRO__(0xD750, ## __VA_ARGS__), \
+ MACRO__(0xD751, ## __VA_ARGS__), \
+ MACRO__(0xD752, ## __VA_ARGS__), \
+ MACRO__(0xD753, ## __VA_ARGS__), \
+ MACRO__(0XD754, ## __VA_ARGS__), \
+ MACRO__(0XD755, ## __VA_ARGS__), \
+ MACRO__(0XD756, ## __VA_ARGS__), \
+ MACRO__(0XD757, ## __VA_ARGS__), \
+ MACRO__(0xD75F, ## __VA_ARGS__)
+
#endif /* __PCIIDS_H__ */
diff --git a/include/drm/intel/pick.h b/include/drm/intel/pick.h
new file mode 100644
index 000000000000..d976fab8f270
--- /dev/null
+++ b/include/drm/intel/pick.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _PICK_H_
+#define _PICK_H_
+
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
+ * @__c_index corresponds to the index in which the second range starts to be
+ * used. Using math interval notation, the first range is used for indexes [ 0,
+ * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf004
+ * #define _FOO_C 0xf008
+ * #define _SUPER_FOO_A 0xa000
+ * #define _SUPER_FOO_B 0xa100
+ * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
+ * _FOO_A, _FOO_B, \
+ * _SUPER_FOO_A, _SUPER_FOO_B))
+ *
+ * This expands to:
+ * 0: 0xf000,
+ * 1: 0xf004,
+ * 2: 0xf008,
+ * 3: 0xa000,
+ * 4: 0xa100,
+ * 5: 0xa200,
+ * ...
+ */
+#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
+ (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
+ ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
+ _PICK_EVEN((__index) - (__c_index), __c, __d)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
+#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
+
+#endif
diff --git a/include/drm/intel/reg_bits.h b/include/drm/intel/reg_bits.h
new file mode 100644
index 000000000000..2a9066e1d808
--- /dev/null
+++ b/include/drm/intel/reg_bits.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _REG_BITS_H_
+#define _REG_BITS_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
+ */
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
+
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP8() - Prepare a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP8(__mask, __val) \
+ ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_PREP16() - Prepare a u16 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP16() to generate an integer constant
+ * expression, force u8 and for consistency with
+ * REG_FIELD_GET16(), REG_BIT16() and REG_GENMASK16().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP16(__mask, __val) \
+ ((u16)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U16_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val) \
+ ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET8() - Extract a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u8 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_GET64() - Extract a u64 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u64 and for consistency with
+ * REG_GENMASK64().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_MAX() - produce the maximum value representable by a field
+ * @__mask: shifted mask defining the field's length and position
+ *
+ * Local wrapper for FIELD_MAX() to return the maximum bit value that can
+ * be held in the field specified by @_mask, cast to u32 for consistency
+ * with other macros.
+ */
+#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+
+#define REG_MASKED_FIELD(mask, value) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask), (mask) & 0xffff0000, 0)) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(value), (value) & 0xffff0000, 0)) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask) && __builtin_constant_p(value), (value) & ~(mask), 0)) + \
+ ((mask) << 16 | (value)))
+
+#define REG_MASKED_FIELD_ENABLE(a) \
+ (__builtin_choose_expr(__builtin_constant_p(a), REG_MASKED_FIELD((a), (a)), ({ typeof(a) _a = (a); REG_MASKED_FIELD(_a, _a); })))
+
+#define REG_MASKED_FIELD_DISABLE(a) \
+ (REG_MASKED_FIELD((a), 0))
+
+#endif
diff --git a/include/drm/intel/step.h b/include/drm/intel/step.h
new file mode 100644
index 000000000000..4de7520109bc
--- /dev/null
+++ b/include/drm/intel/step.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __STEP_H__
+#define __STEP_H__
+
+#define STEP_ENUM_VAL(name) STEP_##name,
+
+#define STEP_NAME_LIST(func) \
+ func(A0) \
+ func(A1) \
+ func(A2) \
+ func(A3) \
+ func(B0) \
+ func(B1) \
+ func(B2) \
+ func(B3) \
+ func(C0) \
+ func(C1) \
+ func(C2) \
+ func(C3) \
+ func(D0) \
+ func(D1) \
+ func(D2) \
+ func(D3) \
+ func(E0) \
+ func(E1) \
+ func(E2) \
+ func(E3) \
+ func(F0) \
+ func(F1) \
+ func(F2) \
+ func(F3) \
+ func(G0) \
+ func(G1) \
+ func(G2) \
+ func(G3) \
+ func(H0) \
+ func(H1) \
+ func(H2) \
+ func(H3) \
+ func(I0) \
+ func(I1) \
+ func(I2) \
+ func(I3) \
+ func(J0) \
+ func(J1) \
+ func(J2) \
+ func(J3)
+
+/*
+ * Symbolic steppings that do not match the hardware. These are valid both as gt
+ * and display steppings as symbolic names.
+ */
+enum intel_step {
+ STEP_NONE = 0,
+ STEP_NAME_LIST(STEP_ENUM_VAL)
+ STEP_FUTURE,
+ STEP_FOREVER,
+};
+
+#endif /* __STEP_H__ */
diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h
index e9814e8149fd..27c224a70e6f 100644
--- a/include/drm/intel/xe_sriov_vfio.h
+++ b/include/drm/intel/xe_sriov_vfio.h
@@ -28,6 +28,17 @@ struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev);
bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
/**
+ * xe_sriov_vfio_flr_prepare() - Notify PF that VF FLR prepare has started.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function marks VF FLR as pending before PF receives GuC FLR event.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_flr_prepare(struct xe_device *xe, unsigned int vfid);
+
+/**
* xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
index c33cba111171..29b9c855af77 100644
--- a/include/drm/ttm/ttm_backup.h
+++ b/include/drm/ttm/ttm_backup.h
@@ -56,7 +56,7 @@ ttm_backup_page_ptr_to_handle(const struct page *page)
void ttm_backup_drop(struct file *backup, pgoff_t handle);
int ttm_backup_copy_page(struct file *backup, struct page *dst,
- pgoff_t handle, bool intr);
+ pgoff_t handle, bool intr, gfp_t additional_gfp);
s64
ttm_backup_backup_page(struct file *backup, struct page *page,
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index bca3a8849d47..8310bc3d55f9 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -167,24 +167,34 @@ struct ttm_bo_kmap_obj {
/**
* struct ttm_operation_ctx
*
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
- * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
- * BOs share the same reservation object.
- * faults. Should only be used by TTM internally.
- * @resv: Reservation object to allow reserved evictions with.
- * @bytes_moved: Statistics on how many bytes have been moved.
- *
* Context for TTM operations like changing buffer placement or general memory
* allocation.
*/
struct ttm_operation_ctx {
+ /** @interruptible: Sleep interruptible if sleeping. */
bool interruptible;
+ /** @no_wait_gpu: Return immediately if the GPU is busy. */
bool no_wait_gpu;
+ /**
+ * @gfp_retry_mayfail: Use __GFP_RETRY_MAYFAIL | __GFP_NOWARN
+ * when allocation pages. This is to avoid invoking the OOM
+ * killer when populating a buffer object, in order to
+ * forward the error for it to be dealt with.
+ */
bool gfp_retry_mayfail;
+ /**
+ * @allow_res_evict: Allow eviction of reserved BOs. Can be used
+ * when multiple BOs share the same reservation object @resv.
+ */
bool allow_res_evict;
+ /**
+ * @resv: Reservation object to be used together with
+ * @allow_res_evict.
+ */
struct dma_resv *resv;
+ /**
+ * @bytes_moved: Statistics on how many bytes have been moved.
+ */
uint64_t bytes_moved;
};
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 233581670e78..26ee592e1994 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -29,6 +29,7 @@
#include <linux/mmzone.h>
#include <linux/llist.h>
#include <linux/spinlock.h>
+#include <linux/list_lru.h>
#include <drm/ttm/ttm_caching.h>
struct device;
@@ -45,8 +46,7 @@ struct ttm_tt;
* @order: the allocation order our pages have
* @caching: the caching type our pages have
* @shrinker_list: our place on the global shrinker list
- * @lock: protection of the page list
- * @pages: the list of pages in the pool
+ * @pages: the lru_list of pages in the pool
*/
struct ttm_pool_type {
struct ttm_pool *pool;
@@ -55,8 +55,7 @@ struct ttm_pool_type {
struct list_head shrinker_list;
- spinlock_t lock;
- struct list_head pages;
+ struct list_lru pages;
};
/**
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index 8776844e0eeb..336f7bb7188a 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -245,6 +245,7 @@
#define QCOM_ID_IPQ5000 503
#define QCOM_ID_IPQ0509 504
#define QCOM_ID_IPQ0518 505
+#define QCOM_ID_SM7450 506
#define QCOM_ID_SM6375 507
#define QCOM_ID_IPQ9514 510
#define QCOM_ID_IPQ9550 511
@@ -260,10 +261,12 @@
#define QCOM_ID_SM8475 530
#define QCOM_ID_SM8475P 531
#define QCOM_ID_SA8255P 532
+#define QCOM_ID_SA8650P 533
#define QCOM_ID_SA8775P 534
#define QCOM_ID_QRU1000 539
#define QCOM_ID_SM8475_2 540
#define QCOM_ID_QDU1000 545
+#define QCOM_ID_SM7450P 547
#define QCOM_ID_X1E80100 555
#define QCOM_ID_SM8650 557
#define QCOM_ID_SM4450 568
@@ -294,6 +297,13 @@
#define QCOM_ID_QCS8275 675
#define QCOM_ID_QCS9075 676
#define QCOM_ID_QCS615 680
+#define QCOM_ID_CQ7790M 731
+#define QCOM_ID_CQ7790S 732
+#define QCOM_ID_IPQ5200 765
+#define QCOM_ID_IPQ5210 766
+#define QCOM_ID_QCF2200 767
+#define QCOM_ID_QCF3200 768
+#define QCOM_ID_QCF3210 769
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/clock/axis,artpec9-clk.h b/include/dt-bindings/clock/axis,artpec9-clk.h
new file mode 100644
index 000000000000..c6787be8d686
--- /dev/null
+++ b/include/dt-bindings/clock/axis,artpec9-clk.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Device Tree binding constants for ARTPEC-9 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_ARTPEC9_H
+#define _DT_BINDINGS_CLOCK_ARTPEC9_H
+
+/* CMU_CMU */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_DOUT_SHARED0_DIV2 2
+#define CLK_DOUT_SHARED0_DIV3 3
+#define CLK_DOUT_SHARED0_DIV4 4
+#define CLK_FOUT_SHARED1_PLL 5
+#define CLK_DOUT_SHARED1_DIV2 6
+#define CLK_DOUT_SHARED1_DIV3 7
+#define CLK_DOUT_SHARED1_DIV4 8
+#define CLK_FOUT_AUDIO_PLL 9
+#define CLK_DOUT_CMU_ADD 10
+#define CLK_DOUT_CMU_BUS 11
+#define CLK_DOUT_CMU_CDC_CORE 12
+#define CLK_DOUT_CMU_CORE_MAIN 13
+#define CLK_DOUT_CMU_CPUCL_SWITCH 14
+#define CLK_DOUT_CMU_DLP_CORE 15
+#define CLK_DOUT_CMU_FSYS0_BUS 16
+#define CLK_DOUT_CMU_FSYS0_IP 17
+#define CLK_DOUT_CMU_FSYS1_BUS 18
+#define CLK_DOUT_CMU_FSYS1_SCAN0 19
+#define CLK_DOUT_CMU_FSYS1_SCAN1 20
+#define CLK_DOUT_CMU_GPU_3D 21
+#define CLK_DOUT_CMU_GPU_2D 22
+#define CLK_DOUT_CMU_IMEM_ACLK 23
+#define CLK_DOUT_CMU_IMEM_CA5 24
+#define CLK_DOUT_CMU_IMEM_JPEG 25
+#define CLK_DOUT_CMU_IMEM_SSS 26
+#define CLK_DOUT_CMU_IPA_CORE 27
+#define CLK_DOUT_CMU_LCPU 28
+#define CLK_DOUT_CMU_MIF_SWITCH 29
+#define CLK_DOUT_CMU_MIF_BUSP 30
+#define CLK_DOUT_CMU_PERI_DISP 31
+#define CLK_DOUT_CMU_PERI_IP 32
+#define CLK_DOUT_CMU_RSP_CORE 33
+#define CLK_DOUT_CMU_TRFM 34
+#define CLK_DOUT_CMU_VIO_CORE_L 35
+#define CLK_DOUT_CMU_VIO_CORE 36
+#define CLK_DOUT_CMU_VIP0 37
+#define CLK_DOUT_CMU_VIP1 38
+#define CLK_DOUT_CMU_VPP_CORE 39
+#define CLK_DOUT_CMU_VIO_AUDIO 40
+
+/* CMU_BUS */
+#define CLK_MOUT_BUS_ACLK_USER 1
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_ACLK_USER 1
+
+/* CMU_CPUCL */
+#define CLK_FOUT_CPUCL_PLL0 1
+#define CLK_MOUT_CPUCL_PLL0 2
+#define CLK_FOUT_CPUCL_PLL1 3
+#define CLK_MOUT_CPUCL_PLL_SCU 4
+#define CLK_MOUT_CPUCL_SWITCH_SCU_USER 5
+#define CLK_MOUT_CPUCL_SWITCH_USER 6
+#define CLK_DOUT_CPUCL_CPU 7
+#define CLK_DOUT_CPUCL_CLUSTER_PERIPHCLK 8
+#define CLK_DOUT_CPUCL_CLUSTER_GICCLK 9
+#define CLK_DOUT_CPUCL_CLUSTER_PCLK 10
+#define CLK_DOUT_CPUCL_CMUREF 11
+#define CLK_DOUT_CPUCL_CLUSTER_ATCLK 12
+#define CLK_DOUT_CPUCL_CLUSTER_SCU 13
+#define CLK_DOUT_CPUCL_DBG 14
+#define CLK_GOUT_CPUCL_SHORTSTOP 15
+#define CLK_GOUT_CPUCL_CLUSTER_CPU 16
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK 17
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG 18
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_IP_USER 2
+#define CLK_MOUT_FSYS0_MAIN_USER 3
+#define CLK_DOUT_FSYS0_125 4
+#define CLK_DOUT_FSYS0_ADC 5
+#define CLK_DOUT_FSYS0_BUS_300 6
+#define CLK_DOUT_FSYS0_EQOS0 7
+#define CLK_DOUT_FSYS0_EQOS1 8
+#define CLK_DOUT_FSYS0_MMC_CARD0 9
+#define CLK_DOUT_FSYS0_MMC_CARD1 10
+#define CLK_DOUT_FSYS0_MMC_CARD2 11
+#define CLK_DOUT_FSYS0_QSPI 12
+#define CLK_DOUT_FSYS0_SFMC_NAND 13
+#define CLK_GOUT_FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 14
+#define CLK_GOUT_FSYS0_EQOS_TOP0_IPCLKPORT_CLK_CSR_I 15
+#define CLK_GOUT_FSYS0_EQOS_TOP0_IPCLKPORT_I_RGMII_PHASE_CLK_250 16
+#define CLK_GOUT_FSYS0_EQOS_TOP0_IPCLKPORT_I_RGMII_TXCLK 17
+#define CLK_GOUT_FSYS0_EQOS_TOP1_IPCLKPORT_I_RGMII_PHASE_CLK_250 18
+#define CLK_GOUT_FSYS0_EQOS_TOP1_IPCLKPORT_I_RGMII_TXCLK 19
+#define CLK_GOUT_FSYS0_EQOS_TOP1_IPCLKPORT_ACLK_I 20
+#define CLK_GOUT_FSYS0_EQOS_TOP1_IPCLKPORT_CLK_CSR_I 21
+#define CLK_GOUT_FSYS0_I3C0_IPCLKPORT_I_APB_S_PCLK 22
+#define CLK_GOUT_FSYS0_I3C0_IPCLKPORT_I_CORE_CLK 23
+#define CLK_GOUT_FSYS0_I3C0_IPCLKPORT_I_DMA_CLK 24
+#define CLK_GOUT_FSYS0_I3C0_IPCLKPORT_I_HDR_TX_CLK 25
+#define CLK_GOUT_FSYS0_I3C1_IPCLKPORT_I_APB_S_PCLK 26
+#define CLK_GOUT_FSYS0_I3C1_IPCLKPORT_I_CORE_CLK 27
+#define CLK_GOUT_FSYS0_I3C1_IPCLKPORT_I_DMA_CLK 28
+#define CLK_GOUT_FSYS0_I3C1_IPCLKPORT_I_HDR_TX_CLK 29
+#define CLK_GOUT_FSYS0_MMC0_IPCLKPORT_SDCLKIN 30
+#define CLK_GOUT_FSYS0_MMC1_IPCLKPORT_SDCLKIN 31
+#define CLK_GOUT_FSYS0_MMC2_IPCLKPORT_SDCLKIN 32
+#define CLK_GOUT_FSYS0_QSPI_IPCLKPORT_HCLK 33
+#define CLK_GOUT_FSYS0_QSPI_IPCLKPORT_SSI_CLK 34
+#define CLK_GOUT_FSYS0_SFMC_IPCLKPORT_I_ACLK_NAND 35
+#define CLK_GOUT_FSYS0_I2C0_IPCLKPORT_I_PCLK 36
+#define CLK_GOUT_FSYS0_I2C1_IPCLKPORT_I_PCLK 37
+#define CLK_GOUT_FSYS0_MMC0_IPCLKPORT_I_ACLK 38
+#define CLK_GOUT_FSYS0_MMC1_IPCLKPORT_I_ACLK 39
+#define CLK_GOUT_FSYS0_MMC2_IPCLKPORT_I_ACLK 40
+#define CLK_GOUT_FSYS0_PWM_IPCLKPORT_I_PCLK_S0 41
+
+/* CMU_FSYS1 */
+#define CLK_FOUT_FSYS1_PLL 1
+#define CLK_MOUT_FSYS1_SCAN0_USER 2
+#define CLK_MOUT_FSYS1_SCAN1_USER 3
+#define CLK_MOUT_FSYS1_BUS_USER 4
+#define CLK_DOUT_FSYS1_200 5
+#define CLK_DOUT_FSYS1_BUS_300 6
+#define CLK_DOUT_FSYS1_OTP_MEM 7
+#define CLK_DOUT_FSYS1_PCIE_PHY_REFCLK_SYSPLL 8
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_PHY_APB2CR_PCLK_100 9
+#define CLK_GOUT_FSYS1_UART0_PCLK 10
+#define CLK_GOUT_FSYS1_UART0_SCLK_UART 11
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_PHY_APB2CR_PCLK_300 12
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_SUB_CON_X1_DBI_ACLK_SOC 13
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_SUB_CON_X1_MSTR_ACLK_SOC 14
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_SUB_CON_X1_SLV_ACLK_SOC 15
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_SUB_CON_X2_DBI_ACLK_SOC 16
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_SUB_CON_X2_MSTR_ACLK_SOC 17
+#define CLK_GOUT_FSYS1_IPCLKPORT_PCIE_SUB_CON_X2_SLV_ACLK_SOC 18
+#define CLK_GOUT_FSYS1_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 19
+#define CLK_GOUT_FSYS1_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 20
+#define CLK_GOUT_FSYS1_XHB_AHBBR_FSYS1_IPCLKPORT_CLK 21
+#define CLK_GOUT_FSYS1_XHB_USB_IPCLKPORT_CLK 22
+
+/* CMU_IMEM */
+#define CLK_MOUT_IMEM_ACLK_USER 1
+#define CLK_MOUT_IMEM_CA5_USER 2
+#define CLK_MOUT_IMEM_SSS_USER 3
+#define CLK_MOUT_IMEM_JPEG_USER 4
+#define CLK_DOUT_IMEM_PCLK 5
+#define CLK_GOUT_IMEM_CA5_0_IPCLKPORT_ATCLK 6
+#define CLK_GOUT_IMEM_CA5_0_IPCLKPORT_CLKIN 7
+#define CLK_GOUT_IMEM_CA5_0_IPCLKPORT_PCLK_DBG 8
+#define CLK_GOUT_IMEM_CA5_1_IPCLKPORT_ATCLK 9
+#define CLK_GOUT_IMEM_CA5_1_IPCLKPORT_CLKIN 10
+#define CLK_GOUT_IMEM_CA5_1_IPCLKPORT_PCLK_DBG 11
+#define CLK_GOUT_IMEM_MCT0_PCLK 12
+#define CLK_GOUT_IMEM_MCT1_PCLK 13
+#define CLK_GOUT_IMEM_MCT2_PCLK 14
+#define CLK_GOUT_IMEM_MCT3_PCLK 15
+#define CLK_GOUT_IMEM_PCLK_TMU0_APBIF 16
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_IP_USER 1
+#define CLK_MOUT_PERI_DISP_USER 2
+#define CLK_DOUT_PERI_125 3
+#define CLK_DOUT_PERI_PCLK 4
+#define CLK_DOUT_PERI_SPI 5
+#define CLK_DOUT_PERI_UART1 6
+#define CLK_DOUT_PERI_UART2 7
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK 8
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK 9
+#define CLK_GOUT_PERI_I3C2_IPCLKPORT_I_APB_S_PCLK 10
+#define CLK_GOUT_PERI_I3C2_IPCLKPORT_I_CORE_CLK 11
+#define CLK_GOUT_PERI_I3C2_IPCLKPORT_I_DMA_CLK 12
+#define CLK_GOUT_PERI_I3C2_IPCLKPORT_I_HDR_TX_CLK 13
+#define CLK_GOUT_PERI_I3C3_IPCLKPORT_I_APB_S_PCLK 14
+#define CLK_GOUT_PERI_I3C3_IPCLKPORT_I_CORE_CLK 15
+#define CLK_GOUT_PERI_I3C3_IPCLKPORT_I_DMA_CLK 16
+#define CLK_GOUT_PERI_I3C3_IPCLKPORT_I_HDR_TX_CLK 17
+#define CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 18
+#define CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK 19
+#define CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK 20
+#define CLK_GOUT_PERI_SPI0_PCLK 21
+#define CLK_GOUT_PERI_SPI0_SCLK_SPI 22
+#define CLK_GOUT_PERI_UART1_PCLK 23
+#define CLK_GOUT_PERI_UART1_SCLK_UART 24
+#define CLK_GOUT_PERI_UART2_PCLK 25
+#define CLK_GOUT_PERI_UART2_SCLK_UART 26
+
+#endif /* _DT_BINDINGS_CLOCK_ARTPEC9_H */
diff --git a/include/dt-bindings/clock/mobileye,eyeq6lplus-clk.h b/include/dt-bindings/clock/mobileye,eyeq6lplus-clk.h
new file mode 100644
index 000000000000..20d84ee24ad5
--- /dev/null
+++ b/include/dt-bindings/clock/mobileye,eyeq6lplus-clk.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2025 Mobileye Vision Technologies Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ6LPLUS_CLK_H
+#define _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ6LPLUS_CLK_H
+
+#define EQ6LPC_PLL_CPU 0
+#define EQ6LPC_PLL_DDR 1
+#define EQ6LPC_PLL_PER 2
+#define EQ6LPC_PLL_VDI 3
+#define EQ6LPC_PLL_ACC 4
+
+#define EQ6LPC_CPU_OCC 5
+
+#define EQ6LPC_ACC_VDI 6
+#define EQ6LPC_ACC_OCC 7
+#define EQ6LPC_ACC_FCMU 8
+
+#define EQ6LPC_DDR_OCC 9
+
+#define EQ6LPC_PER_OCC 10
+#define EQ6LPC_PER_I2C_SER 11
+#define EQ6LPC_PER_PCLK 12
+#define EQ6LPC_PER_TSU 13
+#define EQ6LPC_PER_OSPI 14
+#define EQ6LPC_PER_GPIO 15
+#define EQ6LPC_PER_TIMER 16
+#define EQ6LPC_PER_I2C 17
+#define EQ6LPC_PER_UART 18
+#define EQ6LPC_PER_SPI 19
+#define EQ6LPC_PER_PERIPH 20
+
+#define EQ6LPC_VDI_OCC 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6125.h b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
index 4ff974f4fcc3..f58b85d2c814 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sm6125.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
@@ -6,6 +6,7 @@
#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+/* Clocks */
#define DISP_CC_PLL0 0
#define DISP_CC_MDSS_AHB_CLK 1
#define DISP_CC_MDSS_AHB_CLK_SRC 2
@@ -35,7 +36,10 @@
#define DISP_CC_MDSS_VSYNC_CLK_SRC 26
#define DISP_CC_XO_CLK 27
-/* DISP_CC GDSCR */
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+
+/* GDSCs */
#define MDSS_GDSC 0
#endif
diff --git a/include/dt-bindings/clock/qcom,eliza-gcc.h b/include/dt-bindings/clock/qcom,eliza-gcc.h
new file mode 100644
index 000000000000..4d27b329ae99
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,eliza-gcc.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_ELIZA_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_ELIZA_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 2
+#define GCC_BOOT_ROM_AHB_CLK 3
+#define GCC_CAM_BIST_MCLK_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_GP1_CLK 16
+#define GCC_GP1_CLK_SRC 17
+#define GCC_GP2_CLK 18
+#define GCC_GP2_CLK_SRC 19
+#define GCC_GP3_CLK 20
+#define GCC_GP3_CLK_SRC 21
+#define GCC_GPLL0 22
+#define GCC_GPLL0_OUT_EVEN 23
+#define GCC_GPLL4 24
+#define GCC_GPLL7 25
+#define GCC_GPLL8 26
+#define GCC_GPLL9 27
+#define GCC_GPU_CFG_AHB_CLK 28
+#define GCC_GPU_GEMNOC_GFX_CLK 29
+#define GCC_GPU_GPLL0_CPH_CLK_SRC 30
+#define GCC_GPU_GPLL0_DIV_CPH_CLK_SRC 31
+#define GCC_GPU_SMMU_VOTE_CLK 32
+#define GCC_MMU_TCU_VOTE_CLK 33
+#define GCC_PCIE_0_AUX_CLK 34
+#define GCC_PCIE_0_AUX_CLK_SRC 35
+#define GCC_PCIE_0_CFG_AHB_CLK 36
+#define GCC_PCIE_0_MSTR_AXI_CLK 37
+#define GCC_PCIE_0_PHY_RCHNG_CLK 38
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 39
+#define GCC_PCIE_0_PIPE_CLK 40
+#define GCC_PCIE_0_PIPE_CLK_SRC 41
+#define GCC_PCIE_0_PIPE_DIV2_CLK 42
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PCIE_1_AUX_CLK 46
+#define GCC_PCIE_1_AUX_CLK_SRC 47
+#define GCC_PCIE_1_CFG_AHB_CLK 48
+#define GCC_PCIE_1_MSTR_AXI_CLK 49
+#define GCC_PCIE_1_PHY_RCHNG_CLK 50
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_1_PIPE_CLK 52
+#define GCC_PCIE_1_PIPE_CLK_SRC 53
+#define GCC_PCIE_1_PIPE_DIV2_CLK 54
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 55
+#define GCC_PCIE_1_SLV_AXI_CLK 56
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 57
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 58
+#define GCC_PCIE_RSCC_XO_CLK 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 64
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 65
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 66
+#define GCC_QMIP_GPU_AHB_CLK 67
+#define GCC_QMIP_PCIE_AHB_CLK 68
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 69
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 70
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 71
+#define GCC_QUPV3_WRAP1_CORE_CLK 72
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 73
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 74
+#define GCC_QUPV3_WRAP1_S0_CLK 75
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 76
+#define GCC_QUPV3_WRAP1_S1_CLK 77
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 78
+#define GCC_QUPV3_WRAP1_S2_CLK 79
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 80
+#define GCC_QUPV3_WRAP1_S3_CLK 81
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 82
+#define GCC_QUPV3_WRAP1_S4_CLK 83
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 84
+#define GCC_QUPV3_WRAP1_S5_CLK 85
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S6_CLK 87
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S7_CLK 89
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 90
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 91
+#define GCC_QUPV3_WRAP2_CORE_CLK 92
+#define GCC_QUPV3_WRAP2_S0_CLK 93
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 94
+#define GCC_QUPV3_WRAP2_S1_CLK 95
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 96
+#define GCC_QUPV3_WRAP2_S2_CLK 97
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 98
+#define GCC_QUPV3_WRAP2_S3_CLK 99
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 100
+#define GCC_QUPV3_WRAP2_S4_CLK 101
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 102
+#define GCC_QUPV3_WRAP2_S5_CLK 103
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 104
+#define GCC_QUPV3_WRAP2_S6_CLK 105
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 106
+#define GCC_QUPV3_WRAP2_S7_CLK 107
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 108
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 109
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 110
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 111
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 112
+#define GCC_SDCC1_AHB_CLK 113
+#define GCC_SDCC1_APPS_CLK 114
+#define GCC_SDCC1_APPS_CLK_SRC 115
+#define GCC_SDCC1_ICE_CORE_CLK 116
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 117
+#define GCC_SDCC2_AHB_CLK 118
+#define GCC_SDCC2_APPS_CLK 119
+#define GCC_SDCC2_APPS_CLK_SRC 120
+#define GCC_UFS_PHY_AHB_CLK 121
+#define GCC_UFS_PHY_AXI_CLK 122
+#define GCC_UFS_PHY_AXI_CLK_SRC 123
+#define GCC_UFS_PHY_ICE_CORE_CLK 124
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 125
+#define GCC_UFS_PHY_PHY_AUX_CLK 126
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 127
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 128
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 129
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 130
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 131
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 132
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 133
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 134
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 135
+#define GCC_USB30_PRIM_ATB_CLK 136
+#define GCC_USB30_PRIM_MASTER_CLK 137
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 138
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 139
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 140
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 141
+#define GCC_USB30_PRIM_SLEEP_CLK 142
+#define GCC_USB3_PRIM_PHY_AUX_CLK 143
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 144
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 145
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 146
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 147
+#define GCC_VIDEO_AHB_CLK 148
+#define GCC_VIDEO_AXI0_CLK 149
+#define GCC_VIDEO_AXI1_CLK 150
+#define GCC_VIDEO_XO_CLK 151
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_0_PHY_GDSC 1
+#define GCC_PCIE_1_GDSC 2
+#define GCC_PCIE_1_PHY_GDSC 3
+#define GCC_UFS_MEM_PHY_GDSC 4
+#define GCC_UFS_PHY_GDSC 5
+#define GCC_USB30_PRIM_GDSC 6
+#define GCC_USB3_PHY_GDSC 7
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PCIE_RSCC_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUPV3_WRAPPER_2_BCR 19
+#define GCC_QUSB2PHY_PRIM_BCR 20
+#define GCC_QUSB2PHY_SEC_BCR 21
+#define GCC_SDCC1_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_UFS_PHY_BCR 24
+#define GCC_USB30_PRIM_BCR 25
+#define GCC_USB3_DP_PHY_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_SEC_BCR 27
+#define GCC_USB3_PHY_PRIM_BCR 28
+#define GCC_USB3_PHY_SEC_BCR 29
+#define GCC_USB3PHY_PHY_PRIM_BCR 30
+#define GCC_USB3PHY_PHY_SEC_BCR 31
+#define GCC_VIDEO_AXI0_CLK_ARES 32
+#define GCC_VIDEO_AXI1_CLK_ARES 33
+#define GCC_VIDEO_BCR 34
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,eliza-tcsr.h b/include/dt-bindings/clock/qcom,eliza-tcsr.h
new file mode 100644
index 000000000000..aeb5e2b1a47b
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,eliza-tcsr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_ELIZA_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_ELIZA_H
+
+/* TCSR_CC clocks */
+#define TCSR_HDMI_CLKREF_EN 0
+#define TCSR_PCIE_0_CLKREF_EN 1
+#define TCSR_PCIE_1_CLKREF_EN 2
+#define TCSR_UFS_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5210-gcc.h b/include/dt-bindings/clock/qcom,ipq5210-gcc.h
new file mode 100644
index 000000000000..84116f34ee4d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5210-gcc.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5210_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5210_H
+
+#define GCC_ADSS_PWM_CLK 0
+#define GCC_ADSS_PWM_CLK_SRC 1
+#define GCC_CMN_12GPLL_AHB_CLK 2
+#define GCC_CMN_12GPLL_SYS_CLK 3
+#define GCC_CNOC_LPASS_CFG_CLK 4
+#define GCC_CNOC_PCIE0_1LANE_S_CLK 5
+#define GCC_CNOC_PCIE1_2LANE_S_CLK 6
+#define GCC_CNOC_USB_CLK 7
+#define GCC_GEPHY_SYS_CLK 8
+#define GCC_LPASS_AXIM_CLK_SRC 9
+#define GCC_LPASS_CORE_AXIM_CLK 10
+#define GCC_LPASS_SWAY_CLK 11
+#define GCC_LPASS_SWAY_CLK_SRC 12
+#define GCC_MDIO_AHB_CLK 13
+#define GCC_MDIO_GEPHY_AHB_CLK 14
+#define GCC_NSS_TS_CLK 15
+#define GCC_NSS_TS_CLK_SRC 16
+#define GCC_NSSCC_CLK 17
+#define GCC_NSSCFG_CLK 18
+#define GCC_NSSNOC_ATB_CLK 19
+#define GCC_NSSNOC_MEMNOC_1_CLK 20
+#define GCC_NSSNOC_MEMNOC_BFDCD_CLK_SRC 21
+#define GCC_NSSNOC_MEMNOC_CLK 22
+#define GCC_NSSNOC_MEMNOC_DIV_CLK_SRC 23
+#define GCC_NSSNOC_NSSCC_CLK 24
+#define GCC_NSSNOC_PCNOC_1_CLK 25
+#define GCC_NSSNOC_QOSGEN_REF_CLK 26
+#define GCC_NSSNOC_SNOC_1_CLK 27
+#define GCC_NSSNOC_SNOC_CLK 28
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 29
+#define GCC_NSSNOC_XO_DCD_CLK 30
+#define GCC_PCIE0_AHB_CLK 31
+#define GCC_PCIE0_AUX_CLK 32
+#define GCC_PCIE0_AXI_M_CLK 33
+#define GCC_PCIE0_AXI_M_CLK_SRC 34
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 35
+#define GCC_PCIE0_AXI_S_CLK 36
+#define GCC_PCIE0_AXI_S_CLK_SRC 37
+#define GCC_PCIE0_PIPE_CLK 38
+#define GCC_PCIE0_PIPE_CLK_SRC 39
+#define GCC_PCIE0_RCHNG_CLK 40
+#define GCC_PCIE0_RCHNG_CLK_SRC 41
+#define GCC_PCIE1_AHB_CLK 42
+#define GCC_PCIE1_AUX_CLK 43
+#define GCC_PCIE1_AXI_M_CLK 44
+#define GCC_PCIE1_AXI_M_CLK_SRC 45
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 46
+#define GCC_PCIE1_AXI_S_CLK 47
+#define GCC_PCIE1_AXI_S_CLK_SRC 48
+#define GCC_PCIE1_PIPE_CLK 49
+#define GCC_PCIE1_PIPE_CLK_SRC 50
+#define GCC_PCIE1_RCHNG_CLK 51
+#define GCC_PCIE1_RCHNG_CLK_SRC 52
+#define GCC_PCIE_AUX_CLK_SRC 53
+#define GCC_PCNOC_BFDCD_CLK_SRC 54
+#define GCC_PON_APB_CLK 55
+#define GCC_PON_TM_CLK 56
+#define GCC_PON_TM2X_CLK 57
+#define GCC_PON_TM2X_CLK_SRC 58
+#define GCC_QDSS_AT_CLK 59
+#define GCC_QDSS_AT_CLK_SRC 60
+#define GCC_QDSS_DAP_CLK 61
+#define GCC_QDSS_TSCTR_CLK_SRC 62
+#define GCC_QPIC_AHB_CLK 63
+#define GCC_QPIC_CLK 64
+#define GCC_QPIC_CLK_SRC 65
+#define GCC_QPIC_IO_MACRO_CLK 66
+#define GCC_QPIC_IO_MACRO_CLK_SRC 67
+#define GCC_QRNG_AHB_CLK 68
+#define GCC_QUPV3_AHB_MST_CLK 69
+#define GCC_QUPV3_AHB_SLV_CLK 70
+#define GCC_QUPV3_WRAP_SE0_CLK 71
+#define GCC_QUPV3_WRAP_SE0_CLK_SRC 72
+#define GCC_QUPV3_WRAP_SE1_CLK 73
+#define GCC_QUPV3_WRAP_SE1_CLK_SRC 74
+#define GCC_QUPV3_WRAP_SE2_CLK 75
+#define GCC_QUPV3_WRAP_SE2_CLK_SRC 76
+#define GCC_QUPV3_WRAP_SE3_CLK 77
+#define GCC_QUPV3_WRAP_SE3_CLK_SRC 78
+#define GCC_QUPV3_WRAP_SE4_CLK 79
+#define GCC_QUPV3_WRAP_SE4_CLK_SRC 80
+#define GCC_QUPV3_WRAP_SE5_CLK 81
+#define GCC_QUPV3_WRAP_SE5_CLK_SRC 82
+#define GCC_SDCC1_AHB_CLK 83
+#define GCC_SDCC1_APPS_CLK 84
+#define GCC_SDCC1_APPS_CLK_SRC 85
+#define GCC_SDCC1_ICE_CORE_CLK 86
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 87
+#define GCC_SLEEP_CLK_SRC 88
+#define GCC_SNOC_LPASS_CLK 89
+#define GCC_SNOC_PCIE0_AXI_M_CLK 90
+#define GCC_SNOC_PCIE1_AXI_M_CLK 91
+#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 92
+#define GCC_UNIPHY0_AHB_CLK 93
+#define GCC_UNIPHY0_SYS_CLK 94
+#define GCC_UNIPHY1_AHB_CLK 95
+#define GCC_UNIPHY1_SYS_CLK 96
+#define GCC_UNIPHY2_AHB_CLK 97
+#define GCC_UNIPHY2_SYS_CLK 98
+#define GCC_UNIPHY_SYS_CLK_SRC 99
+#define GCC_USB0_AUX_CLK 100
+#define GCC_USB0_AUX_CLK_SRC 101
+#define GCC_USB0_MASTER_CLK 102
+#define GCC_USB0_MASTER_CLK_SRC 103
+#define GCC_USB0_MOCK_UTMI_CLK 104
+#define GCC_USB0_MOCK_UTMI_CLK_SRC 105
+#define GCC_USB0_MOCK_UTMI_DIV_CLK_SRC 106
+#define GCC_USB0_PHY_CFG_AHB_CLK 107
+#define GCC_USB0_PIPE_CLK 108
+#define GCC_USB0_PIPE_CLK_SRC 109
+#define GCC_USB0_SLEEP_CLK 110
+#define GCC_XO_CLK_SRC 111
+#define GPLL0_MAIN 112
+#define GPLL0 113
+#define GPLL2_MAIN 114
+#define GPLL2 115
+#define GPLL4_MAIN 116
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-dispcc.h b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
index d1a6c45b5029..ab8d312ade37 100644
--- a/include/dt-bindings/clock/qcom,sm6115-dispcc.h
+++ b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
@@ -6,7 +6,7 @@
#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
-/* DISP_CC clocks */
+/* Clocks */
#define DISP_CC_PLL0 0
#define DISP_CC_PLL0_OUT_MAIN 1
#define DISP_CC_MDSS_AHB_CLK 2
@@ -30,7 +30,10 @@
#define DISP_CC_SLEEP_CLK 20
#define DISP_CC_SLEEP_CLK_SRC 21
-/* DISP_CC GDSCR */
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+
+/* GDSCs */
#define MDSS_GDSC 0
#endif
diff --git a/include/dt-bindings/clock/renesas,r9a08g046-cpg.h b/include/dt-bindings/clock/renesas,r9a08g046-cpg.h
new file mode 100644
index 000000000000..018b0a1e4340
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a08g046-cpg.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2026 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A08G046_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A08G046_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A08G046 CPG Core Clocks */
+#define R9A08G046_CLK_I 0
+#define R9A08G046_CLK_IC0 1
+#define R9A08G046_CLK_IC1 2
+#define R9A08G046_CLK_IC2 3
+#define R9A08G046_CLK_IC3 4
+#define R9A08G046_CLK_P0 5
+#define R9A08G046_CLK_P1 6
+#define R9A08G046_CLK_P2 7
+#define R9A08G046_CLK_P3 8
+#define R9A08G046_CLK_P4 9
+#define R9A08G046_CLK_P5 10
+#define R9A08G046_CLK_P6 11
+#define R9A08G046_CLK_P7 12
+#define R9A08G046_CLK_P8 13
+#define R9A08G046_CLK_P9 14
+#define R9A08G046_CLK_P10 15
+#define R9A08G046_CLK_P13 16
+#define R9A08G046_CLK_P14 17
+#define R9A08G046_CLK_P15 18
+#define R9A08G046_CLK_P16 19
+#define R9A08G046_CLK_P17 20
+#define R9A08G046_CLK_P18 21
+#define R9A08G046_CLK_P19 22
+#define R9A08G046_CLK_P20 23
+#define R9A08G046_CLK_M0 24
+#define R9A08G046_CLK_M1 25
+#define R9A08G046_CLK_M2 26
+#define R9A08G046_CLK_M3 27
+#define R9A08G046_CLK_M4 28
+#define R9A08G046_CLK_M5 29
+#define R9A08G046_CLK_M6 30
+#define R9A08G046_CLK_AT 31
+#define R9A08G046_CLK_B 32
+#define R9A08G046_CLK_ETHTX01 33
+#define R9A08G046_CLK_ETHTX02 34
+#define R9A08G046_CLK_ETHRX01 35
+#define R9A08G046_CLK_ETHRX02 36
+#define R9A08G046_CLK_ETHRM0 37
+#define R9A08G046_CLK_ETHTX11 38
+#define R9A08G046_CLK_ETHTX12 39
+#define R9A08G046_CLK_ETHRX11 40
+#define R9A08G046_CLK_ETHRX12 41
+#define R9A08G046_CLK_ETHRM1 42
+#define R9A08G046_CLK_G 43
+#define R9A08G046_CLK_HP 44
+#define R9A08G046_CLK_SD0 45
+#define R9A08G046_CLK_SD1 46
+#define R9A08G046_CLK_SD2 47
+#define R9A08G046_CLK_SPI0 48
+#define R9A08G046_CLK_SPI1 49
+#define R9A08G046_CLK_S0 50
+#define R9A08G046_CLK_SWD 51
+#define R9A08G046_OSCCLK 52
+#define R9A08G046_OSCCLK2 53
+#define R9A08G046_MIPI_DSI_PLLCLK 54
+#define R9A08G046_USB_SCLK 55
+
+/* R9A08G046 Module Clocks */
+#define R9A08G046_CA55_SCLK 0
+#define R9A08G046_CA55_PCLK 1
+#define R9A08G046_CA55_ATCLK 2
+#define R9A08G046_CA55_GICCLK 3
+#define R9A08G046_CA55_PERICLK 4
+#define R9A08G046_CA55_ACLK 5
+#define R9A08G046_CA55_TSCLK 6
+#define R9A08G046_CA55_CORECLK0 7
+#define R9A08G046_CA55_CORECLK1 8
+#define R9A08G046_CA55_CORECLK2 9
+#define R9A08G046_CA55_CORECLK3 10
+#define R9A08G046_SRAM_ACPU_ACLK0 11
+#define R9A08G046_SRAM_ACPU_ACLK1 12
+#define R9A08G046_SRAM_ACPU_ACLK2 13
+#define R9A08G046_GIC600_GICCLK 14
+#define R9A08G046_IA55_CLK 15
+#define R9A08G046_IA55_PCLK 16
+#define R9A08G046_MHU_PCLK 17
+#define R9A08G046_SYC_CNT_CLK 18
+#define R9A08G046_DMAC_ACLK 19
+#define R9A08G046_DMAC_PCLK 20
+#define R9A08G046_OSTM0_PCLK 21
+#define R9A08G046_OSTM1_PCLK 22
+#define R9A08G046_OSTM2_PCLK 23
+#define R9A08G046_MTU_X_MCK_MTU3 24
+#define R9A08G046_POE3_CLKM_POE 25
+#define R9A08G046_GPT_PCLK 26
+#define R9A08G046_POEG_A_CLKP 27
+#define R9A08G046_POEG_B_CLKP 28
+#define R9A08G046_POEG_C_CLKP 29
+#define R9A08G046_POEG_D_CLKP 30
+#define R9A08G046_WDT0_PCLK 31
+#define R9A08G046_WDT0_CLK 32
+#define R9A08G046_WDT1_PCLK 33
+#define R9A08G046_WDT1_CLK 34
+#define R9A08G046_WDT2_PCLK 35
+#define R9A08G046_WDT2_CLK 36
+#define R9A08G046_XSPI_HCLK 37
+#define R9A08G046_XSPI_ACLK 38
+#define R9A08G046_XSPI_CLK 39
+#define R9A08G046_XSPI_CLKX2 40
+#define R9A08G046_SDHI0_IMCLK 41
+#define R9A08G046_SDHI0_IMCLK2 42
+#define R9A08G046_SDHI0_CLK_HS 43
+#define R9A08G046_SDHI0_IACLKS 44
+#define R9A08G046_SDHI0_IACLKM 45
+#define R9A08G046_SDHI1_IMCLK 46
+#define R9A08G046_SDHI1_IMCLK2 47
+#define R9A08G046_SDHI1_CLK_HS 48
+#define R9A08G046_SDHI1_IACLKS 49
+#define R9A08G046_SDHI1_IACLKM 50
+#define R9A08G046_SDHI2_IMCLK 51
+#define R9A08G046_SDHI2_IMCLK2 52
+#define R9A08G046_SDHI2_CLK_HS 53
+#define R9A08G046_SDHI2_IACLKS 54
+#define R9A08G046_SDHI2_IACLKM 55
+#define R9A08G046_GE3D_CLK 56
+#define R9A08G046_GE3D_AXI_CLK 57
+#define R9A08G046_GE3D_ACE_CLK 58
+#define R9A08G046_ISU_ACLK 59
+#define R9A08G046_ISU_PCLK 60
+#define R9A08G046_H264_CLK_A 61
+#define R9A08G046_H264_CLK_P 62
+#define R9A08G046_CRU_SYSCLK 63
+#define R9A08G046_CRU_VCLK 64
+#define R9A08G046_CRU_PCLK 65
+#define R9A08G046_CRU_ACLK 66
+#define R9A08G046_MIPI_DSI_SYSCLK 67
+#define R9A08G046_MIPI_DSI_ACLK 68
+#define R9A08G046_MIPI_DSI_PCLK 69
+#define R9A08G046_MIPI_DSI_VCLK 70
+#define R9A08G046_MIPI_DSI_LPCLK 71
+#define R9A08G046_LVDS_PLLCLK 72
+#define R9A08G046_LVDS_CLK_DOT0 73
+#define R9A08G046_LCDC_CLK_A 74
+#define R9A08G046_LCDC_CLK_D 75
+#define R9A08G046_LCDC_CLK_P 76
+#define R9A08G046_SSI0_PCLK2 77
+#define R9A08G046_SSI0_PCLK_SFR 78
+#define R9A08G046_SSI1_PCLK2 79
+#define R9A08G046_SSI1_PCLK_SFR 80
+#define R9A08G046_SSI2_PCLK2 81
+#define R9A08G046_SSI2_PCLK_SFR 82
+#define R9A08G046_SSI3_PCLK2 83
+#define R9A08G046_SSI3_PCLK_SFR 84
+#define R9A08G046_USB_U2H0_HCLK 85
+#define R9A08G046_USB_U2H1_HCLK 86
+#define R9A08G046_USB_U2P0_EXR_CPUCLK 87
+#define R9A08G046_USB_U2P1_EXR_CPUCLK 88
+#define R9A08G046_USB_PCLK 89
+#define R9A08G046_ETH0_CLK_AXI 90
+#define R9A08G046_ETH0_CLK_CHI 91
+#define R9A08G046_ETH0_CLK_TX_I 92
+#define R9A08G046_ETH0_CLK_RX_I 93
+#define R9A08G046_ETH0_CLK_TX_180_I 94
+#define R9A08G046_ETH0_CLK_RX_180_I 95
+#define R9A08G046_ETH0_CLK_RMII_I 96
+#define R9A08G046_ETH0_CLK_PTP_REF_I 97
+#define R9A08G046_ETH0_CLK_TX_I_RMII 98
+#define R9A08G046_ETH0_CLK_RX_I_RMII 99
+#define R9A08G046_ETH1_CLK_AXI 100
+#define R9A08G046_ETH1_CLK_CHI 101
+#define R9A08G046_ETH1_CLK_TX_I 102
+#define R9A08G046_ETH1_CLK_RX_I 103
+#define R9A08G046_ETH1_CLK_TX_180_I 104
+#define R9A08G046_ETH1_CLK_RX_180_I 105
+#define R9A08G046_ETH1_CLK_RMII_I 106
+#define R9A08G046_ETH1_CLK_PTP_REF_I 107
+#define R9A08G046_ETH1_CLK_TX_I_RMII 108
+#define R9A08G046_ETH1_CLK_RX_I_RMII 109
+#define R9A08G046_I2C0_PCLK 110
+#define R9A08G046_I2C1_PCLK 111
+#define R9A08G046_I2C2_PCLK 112
+#define R9A08G046_I2C3_PCLK 113
+#define R9A08G046_SCIF0_CLK_PCK 114
+#define R9A08G046_SCIF1_CLK_PCK 115
+#define R9A08G046_SCIF2_CLK_PCK 116
+#define R9A08G046_SCIF3_CLK_PCK 117
+#define R9A08G046_SCIF4_CLK_PCK 118
+#define R9A08G046_SCIF5_CLK_PCK 119
+#define R9A08G046_RSCI0_PCLK 120
+#define R9A08G046_RSCI0_TCLK 121
+#define R9A08G046_RSCI1_PCLK 122
+#define R9A08G046_RSCI1_TCLK 123
+#define R9A08G046_RSCI2_PCLK 124
+#define R9A08G046_RSCI2_TCLK 125
+#define R9A08G046_RSCI3_PCLK 126
+#define R9A08G046_RSCI3_TCLK 127
+#define R9A08G046_RSPI0_PCLK 128
+#define R9A08G046_RSPI0_TCLK 129
+#define R9A08G046_RSPI1_PCLK 130
+#define R9A08G046_RSPI1_TCLK 131
+#define R9A08G046_RSPI2_PCLK 132
+#define R9A08G046_RSPI2_TCLK 133
+#define R9A08G046_CANFD_PCLK 134
+#define R9A08G046_CANFD_CLK_RAM 135
+#define R9A08G046_GPIO_HCLK 136
+#define R9A08G046_ADC0_ADCLK 137
+#define R9A08G046_ADC0_PCLK 138
+#define R9A08G046_ADC1_ADCLK 139
+#define R9A08G046_ADC1_PCLK 140
+#define R9A08G046_TSU_PCLK 141
+#define R9A08G046_PDM_PCLK 142
+#define R9A08G046_PDM_CCLK 143
+#define R9A08G046_PCI_ACLK 144
+#define R9A08G046_PCI_CLKL1PM 145
+#define R9A08G046_PCI_CLK_PMU 146
+#define R9A08G046_SPDIF_PCLK 147
+#define R9A08G046_I3C_TCLK 148
+#define R9A08G046_I3C_PCLK 149
+#define R9A08G046_VBAT_BCLK 150
+#define R9A08G046_BSC_X_BCK_BSC 151
+
+/* R9A08G046 Resets */
+#define R9A08G046_CA55_RST0_0 0
+#define R9A08G046_CA55_RST0_1 1
+#define R9A08G046_CA55_RST0_2 2
+#define R9A08G046_CA55_RST0_3 3
+#define R9A08G046_CA55_RST4_0 4
+#define R9A08G046_CA55_RST4_1 5
+#define R9A08G046_CA55_RST4_2 6
+#define R9A08G046_CA55_RST4_3 7
+#define R9A08G046_CA55_RST8 8
+#define R9A08G046_CA55_RST9 9
+#define R9A08G046_CA55_RST10 10
+#define R9A08G046_CA55_RST11 11
+#define R9A08G046_CA55_RST12 12
+#define R9A08G046_CA55_RST13 13
+#define R9A08G046_CA55_RST14 14
+#define R9A08G046_CA55_RST15 15
+#define R9A08G046_CA55_RST16 16
+#define R9A08G046_SRAM_ACPU_ARESETN0 17
+#define R9A08G046_SRAM_ACPU_ARESETN1 18
+#define R9A08G046_SRAM_ACPU_ARESETN2 19
+#define R9A08G046_GIC600_GICRESET_N 20
+#define R9A08G046_GIC600_DBG_GICRESET_N 21
+#define R9A08G046_IA55_RESETN 22
+#define R9A08G046_MHU_RESETN 23
+#define R9A08G046_SYC_RESETN 24
+#define R9A08G046_DMAC_ARESETN 25
+#define R9A08G046_DMAC_RST_ASYNC 26
+#define R9A08G046_GTM0_PRESETZ 27
+#define R9A08G046_GTM1_PRESETZ 28
+#define R9A08G046_GTM2_PRESETZ 29
+#define R9A08G046_MTU_X_PRESET_MTU3 30
+#define R9A08G046_POE3_RST_M_REG 31
+#define R9A08G046_GPT_RST_C 32
+#define R9A08G046_POEG_A_RST 33
+#define R9A08G046_POEG_B_RST 34
+#define R9A08G046_POEG_C_RST 35
+#define R9A08G046_POEG_D_RST 36
+#define R9A08G046_WDT0_PRESETN 37
+#define R9A08G046_WDT1_PRESETN 38
+#define R9A08G046_WDT2_PRESETN 39
+#define R9A08G046_XSPI_HRESETN 40
+#define R9A08G046_XSPI_ARESETN 41
+#define R9A08G046_SDHI0_IXRST 42
+#define R9A08G046_SDHI1_IXRST 43
+#define R9A08G046_SDHI2_IXRST 44
+#define R9A08G046_SDHI0_IXRSTAXIM 45
+#define R9A08G046_SDHI0_IXRSTAXIS 46
+#define R9A08G046_SDHI1_IXRSTAXIM 47
+#define R9A08G046_SDHI1_IXRSTAXIS 48
+#define R9A08G046_SDHI2_IXRSTAXIM 49
+#define R9A08G046_SDHI2_IXRSTAXIS 50
+#define R9A08G046_GE3D_RESETN 51
+#define R9A08G046_GE3D_AXI_RESETN 52
+#define R9A08G046_GE3D_ACE_RESETN 53
+#define R9A08G046_ISU_ARESETN 54
+#define R9A08G046_ISU_PRESETN 55
+#define R9A08G046_H264_X_RESET_VCP 56
+#define R9A08G046_H264_CP_PRESET_P 57
+#define R9A08G046_CRU_CMN_RSTB 58
+#define R9A08G046_CRU_PRESETN 59
+#define R9A08G046_CRU_ARESETN 60
+#define R9A08G046_MIPI_DSI_CMN_RSTB 61
+#define R9A08G046_MIPI_DSI_ARESET_N 62
+#define R9A08G046_MIPI_DSI_PRESET_N 63
+#define R9A08G046_LCDC_RESET_N 64
+#define R9A08G046_SSI0_RST_M2_REG 65
+#define R9A08G046_SSI1_RST_M2_REG 66
+#define R9A08G046_SSI2_RST_M2_REG 67
+#define R9A08G046_SSI3_RST_M2_REG 68
+#define R9A08G046_USB_U2H0_HRESETN 69
+#define R9A08G046_USB_U2H1_HRESETN 70
+#define R9A08G046_USB_U2P0_EXL_SYSRST 71
+#define R9A08G046_USB_PRESETN 72
+#define R9A08G046_USB_U2P1_EXL_SYSRST 73
+#define R9A08G046_ETH0_ARESET_N 74
+#define R9A08G046_ETH1_ARESET_N 75
+#define R9A08G046_I2C0_MRST 76
+#define R9A08G046_I2C1_MRST 77
+#define R9A08G046_I2C2_MRST 78
+#define R9A08G046_I2C3_MRST 79
+#define R9A08G046_SCIF0_RST_SYSTEM_N 80
+#define R9A08G046_SCIF1_RST_SYSTEM_N 81
+#define R9A08G046_SCIF2_RST_SYSTEM_N 82
+#define R9A08G046_SCIF3_RST_SYSTEM_N 83
+#define R9A08G046_SCIF4_RST_SYSTEM_N 84
+#define R9A08G046_SCIF5_RST_SYSTEM_N 85
+#define R9A08G046_RSPI0_PRESETN 86
+#define R9A08G046_RSPI1_PRESETN 87
+#define R9A08G046_RSPI2_PRESETN 88
+#define R9A08G046_RSPI0_TRESETN 89
+#define R9A08G046_RSPI1_TRESETN 90
+#define R9A08G046_RSPI2_TRESETN 91
+#define R9A08G046_CANFD_RSTP_N 92
+#define R9A08G046_CANFD_RSTC_N 93
+#define R9A08G046_GPIO_RSTN 94
+#define R9A08G046_GPIO_PORT_RESETN 95
+#define R9A08G046_GPIO_SPARE_RESETN 96
+#define R9A08G046_ADC0_PRESETN 97
+#define R9A08G046_ADC0_ADRST_N 98
+#define R9A08G046_ADC1_PRESETN 99
+#define R9A08G046_ADC1_ADRST_N 100
+#define R9A08G046_TSU_PRESETN 101
+#define R9A08G046_PDM_PRESETN 102
+#define R9A08G046_PCI_ARESETN 103
+#define R9A08G046_SPDIF_RST 104
+#define R9A08G046_I3C_TRESETN 105
+#define R9A08G046_I3C_PRESETN 106
+#define R9A08G046_VBAT_BRESETN 107
+#define R9A08G046_RSCI0_PRESETN 108
+#define R9A08G046_RSCI1_PRESETN 109
+#define R9A08G046_RSCI2_PRESETN 110
+#define R9A08G046_RSCI3_PRESETN 111
+#define R9A08G046_RSCI0_TRESETN 112
+#define R9A08G046_RSCI1_TRESETN 113
+#define R9A08G046_RSCI2_TRESETN 114
+#define R9A08G046_RSCI3_TRESETN 115
+#define R9A08G046_LVDS_RESET_N 116
+#define R9A08G046_BSC_X_PRESET_BSC 117
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A08G046_CPG_H__ */
diff --git a/include/dt-bindings/clock/rockchip,rv1103b-cru.h b/include/dt-bindings/clock/rockchip,rv1103b-cru.h
new file mode 100644
index 000000000000..35afdee7e961
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1103b-cru.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1103B_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1103B_H
+
+#define PLL_GPLL 0
+#define ARMCLK 1
+#define PLL_DPLL 2
+#define XIN_OSC0_HALF 3
+#define CLK_GPLL_DIV24 4
+#define CLK_GPLL_DIV12 5
+#define CLK_GPLL_DIV6 6
+#define CLK_GPLL_DIV4 7
+#define CLK_GPLL_DIV3 8
+#define CLK_GPLL_DIV2P5 9
+#define CLK_GPLL_DIV2 10
+#define CLK_UART0_SRC 11
+#define CLK_UART1_SRC 12
+#define CLK_UART2_SRC 13
+#define CLK_UART0_FRAC 14
+#define CLK_UART1_FRAC 15
+#define CLK_UART2_FRAC 16
+#define CLK_SAI_SRC 17
+#define CLK_SAI_FRAC 18
+#define LSCLK_NPU_SRC 19
+#define CLK_NPU_SRC 20
+#define ACLK_VEPU_SRC 21
+#define CLK_VEPU_SRC 22
+#define ACLK_VI_SRC 23
+#define CLK_ISP_SRC 24
+#define DCLK_VICAP 25
+#define CCLK_EMMC 26
+#define CCLK_SDMMC0 27
+#define SCLK_SFC_2X 28
+#define LSCLK_PERI_SRC 29
+#define ACLK_PERI_SRC 30
+#define HCLK_HPMCU 31
+#define SCLK_UART0 32
+#define SCLK_UART1 33
+#define SCLK_UART2 34
+#define CLK_I2C_PMU 35
+#define CLK_I2C_PERI 36
+#define CLK_SPI0 37
+#define CLK_PWM0_SRC 38
+#define CLK_PWM1 39
+#define CLK_PWM2 40
+#define DCLK_DECOM_SRC 41
+#define CCLK_SDMMC1 42
+#define CLK_CORE_CRYPTO 43
+#define CLK_PKA_CRYPTO 44
+#define CLK_CORE_RGA 45
+#define MCLK_SAI_SRC 46
+#define CLK_FREQ_PWM0_SRC 47
+#define CLK_COUNTER_PWM0_SRC 48
+#define PCLK_TOP_ROOT 49
+#define CLK_REF_MIPI0 50
+#define CLK_MIPI0_OUT2IO 51
+#define CLK_REF_MIPI1 52
+#define CLK_MIPI1_OUT2IO 53
+#define MCLK_SAI_OUT2IO 54
+#define ACLK_NPU_ROOT 55
+#define HCLK_RKNN 56
+#define ACLK_RKNN 57
+#define LSCLK_VEPU_ROOT 58
+#define HCLK_VEPU 59
+#define ACLK_VEPU 60
+#define CLK_CORE_VEPU 61
+#define PCLK_IOC_VCCIO3 62
+#define PCLK_ACODEC 63
+#define PCLK_USBPHY 64
+#define LSCLK_VI_100M 65
+#define LSCLK_VI_ROOT 66
+#define HCLK_ISP 67
+#define ACLK_ISP 68
+#define CLK_CORE_ISP 69
+#define ACLK_VICAP 70
+#define HCLK_VICAP 71
+#define ISP0CLK_VICAP 72
+#define PCLK_CSI2HOST0 73
+#define PCLK_CSI2HOST1 74
+#define HCLK_EMMC 75
+#define HCLK_SFC 76
+#define HCLK_SFC_XIP 77
+#define HCLK_SDMMC0 78
+#define PCLK_CSIPHY 79
+#define PCLK_GPIO1 80
+#define DBCLK_GPIO1 81
+#define PCLK_IOC_VCCIO47 82
+#define LSCLK_DDR_ROOT 83
+#define CLK_TIMER_DDRMON 84
+#define LSCLK_PMU_ROOT 85
+#define PCLK_PMU 86
+#define XIN_RC_DIV 87
+#define CLK_32K 88
+#define PCLK_PMU_GPIO0 89
+#define DBCLK_PMU_GPIO0 90
+#define CLK_DDR_FAIL_SAFE 91
+#define PCLK_PMU_HP_TIMER 92
+#define CLK_PMU_32K_HP_TIMER 93
+#define PCLK_PWM0 94
+#define CLK_PWM0 95
+#define CLK_OSC_PWM0 96
+#define CLK_RC_PWM0 97
+#define CLK_FREQ_PWM0 98
+#define CLK_COUNTER_PWM0 99
+#define PCLK_I2C0 100
+#define CLK_I2C0 101
+#define PCLK_UART0 102
+#define PCLK_IOC_PMUIO0 103
+#define CLK_REFOUT 104
+#define CLK_PREROLL 105
+#define CLK_PREROLL_32K 106
+#define CLK_LPMCU_PMU 107
+#define PCLK_SPI2AHB 108
+#define HCLK_SPI2AHB 109
+#define SCLK_SPI2AHB 110
+#define PCLK_WDT_LPMCU 111
+#define TCLK_WDT_LPMCU 112
+#define HCLK_SFC_PMU1 113
+#define HCLK_SFC_XIP_PMU1 114
+#define SCLK_SFC_2X_PMU1 115
+#define CLK_LPMCU 116
+#define CLK_LPMCU_RTC 117
+#define PCLK_LPMCU_MAILBOX 118
+#define PCLK_IOC_PMUIO1 119
+#define PCLK_CRU_PMU1 120
+#define PCLK_PERI_ROOT 121
+#define PCLK_RTC_ROOT 122
+#define CLK_TIMER_ROOT 123
+#define PCLK_TIMER 124
+#define CLK_TIMER0 125
+#define CLK_TIMER1 126
+#define CLK_TIMER2 127
+#define CLK_TIMER3 128
+#define CLK_TIMER4 129
+#define CLK_TIMER5 130
+#define PCLK_STIMER 131
+#define CLK_STIMER0 132
+#define CLK_STIMER1 133
+#define PCLK_WDT_NS 134
+#define TCLK_WDT_NS 135
+#define PCLK_WDT_S 136
+#define TCLK_WDT_S 137
+#define PCLK_WDT_HPMCU 138
+#define TCLK_WDT_HPMCU 139
+#define PCLK_I2C1 140
+#define CLK_I2C1 141
+#define PCLK_I2C2 142
+#define CLK_I2C2 143
+#define PCLK_I2C3 144
+#define CLK_I2C3 145
+#define PCLK_I2C4 146
+#define CLK_I2C4 147
+#define PCLK_SPI0 148
+#define PCLK_PWM1 149
+#define CLK_OSC_PWM1 150
+#define PCLK_PWM2 151
+#define CLK_OSC_PWM2 152
+#define PCLK_UART2 153
+#define PCLK_UART1 154
+#define ACLK_RKDMA 155
+#define PCLK_TSADC 156
+#define CLK_TSADC 157
+#define CLK_TSADC_TSEN 158
+#define PCLK_SARADC 159
+#define CLK_SARADC 160
+#define PCLK_GPIO2 161
+#define DBCLK_GPIO2 162
+#define PCLK_IOC_VCCIO6 163
+#define ACLK_USBOTG 164
+#define CLK_REF_USBOTG 165
+#define HCLK_SDMMC1 166
+#define HCLK_SAI 167
+#define MCLK_SAI 168
+#define ACLK_CRYPTO 169
+#define HCLK_CRYPTO 170
+#define HCLK_RK_RNG_NS 171
+#define HCLK_RK_RNG_S 172
+#define PCLK_OTPC_NS 173
+#define CLK_OTPC_ROOT_NS 174
+#define CLK_SBPI_OTPC_NS 175
+#define CLK_USER_OTPC_NS 176
+#define PCLK_OTPC_S 177
+#define CLK_OTPC_ROOT_S 178
+#define CLK_SBPI_OTPC_S 179
+#define CLK_USER_OTPC_S 180
+#define CLK_OTPC_ARB 181
+#define PCLK_OTP_MASK 182
+#define HCLK_RGA 183
+#define ACLK_RGA 184
+#define ACLK_MAC 185
+#define PCLK_MAC 186
+#define CLK_MACPHY 187
+#define ACLK_SPINLOCK 188
+#define HCLK_CACHE 189
+#define PCLK_HPMCU_MAILBOX 190
+#define PCLK_HPMCU_INTMUX 191
+#define CLK_HPMCU 192
+#define CLK_HPMCU_RTC 193
+#define DCLK_DECOM 194
+#define ACLK_DECOM 195
+#define PCLK_DECOM 196
+#define ACLK_SYS_SRAM 197
+#define PCLK_DMA2DDR 198
+#define ACLK_DMA2DDR 199
+#define PCLK_DCF 200
+#define ACLK_DCF 201
+#define MCLK_ACODEC_TX 202
+#define SCLK_UART0_SRC 203
+#define SCLK_UART1_SRC 204
+#define SCLK_UART2_SRC 205
+#define XIN_RC_SRC 206
+#define CLK_UTMI_USBOTG 207
+#define CLK_REF_USBPHY 208
+
+#endif // _DT_BINDINGS_CLK_ROCKCHIP_RV1103B_H
diff --git a/include/dt-bindings/interconnect/qcom,eliza-rpmh.h b/include/dt-bindings/interconnect/qcom,eliza-rpmh.h
new file mode 100644
index 000000000000..95db2fe647de
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,eliza-rpmh.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_ELIZA_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_ELIZA_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_UFS_MEM 2
+#define MASTER_USB3_0 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_QUP_2 0
+#define MASTER_CRYPTO 1
+#define MASTER_IPA 2
+#define MASTER_SOCCP_AGGR_NOC 3
+#define MASTER_QDSS_ETR 4
+#define MASTER_QDSS_ETR_1 5
+#define MASTER_SDCC_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_1 0
+#define MASTER_QUP_CORE_2 1
+#define SLAVE_QUP_CORE_1 2
+#define SLAVE_QUP_CORE_2 3
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_CRYPTO_0_CFG 5
+#define SLAVE_DISPLAY_CFG 6
+#define SLAVE_GFX3D_CFG 7
+#define SLAVE_I3C_IBI0_CFG 8
+#define SLAVE_I3C_IBI1_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_CNOC_MSS 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PRNG 13
+#define SLAVE_QDSS_CFG 14
+#define SLAVE_QSPI_0 15
+#define SLAVE_QUP_1 16
+#define SLAVE_QUP_2 17
+#define SLAVE_SDCC_2 18
+#define SLAVE_TCSR 19
+#define SLAVE_TLMM 20
+#define SLAVE_UFS_MEM_CFG 21
+#define SLAVE_USB3_0 22
+#define SLAVE_VENUS_CFG 23
+#define SLAVE_VSENSE_CTRL_CFG 24
+#define SLAVE_CNOC_MNOC_HF_CFG 25
+#define SLAVE_CNOC_MNOC_SF_CFG 26
+#define SLAVE_PCIE_ANOC_CFG 27
+#define SLAVE_QDSS_STM 28
+#define SLAVE_TCU 29
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_SOCCP 5
+#define SLAVE_TME_CFG 6
+#define SLAVE_APPSS 7
+#define SLAVE_CNOC_CFG 8
+#define SLAVE_DDRSS_CFG 9
+#define SLAVE_BOOT_IMEM 10
+#define SLAVE_IMEM 11
+#define SLAVE_BOOT_IMEM_2 12
+#define SLAVE_SERVICE_CNOC 13
+#define SLAVE_PCIE_0 14
+#define SLAVE_PCIE_1 15
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_WLAN_Q6 11
+#define MASTER_GIC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_NRT_ICP_SF 0
+#define MASTER_CAMNOC_RT_CDM_SF 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_VIDEO_MVP 3
+#define MASTER_VIDEO_V_PROC 4
+#define MASTER_CNOC_MNOC_SF_CFG 5
+#define MASTER_CAMNOC_HF 6
+#define MASTER_MDP 7
+#define MASTER_CNOC_MNOC_HF_CFG 8
+#define SLAVE_MNOC_SF_MEM_NOC 9
+#define SLAVE_SERVICE_MNOC_SF 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC_HF 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_CNOC_SNOC 2
+#define MASTER_NSINOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#endif
diff --git a/include/dt-bindings/power/allwinner,sun60i-a733-pck-600.h b/include/dt-bindings/power/allwinner,sun60i-a733-pck-600.h
new file mode 100644
index 000000000000..cf476a005b55
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun60i-a733-pck-600.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN60I_A733_PCK600_H_
+#define _DT_BINDINGS_POWER_SUN60I_A733_PCK600_H_
+
+#define PD_VI 0
+#define PD_DE_SYS 1
+#define PD_VE_DEC 2
+#define PD_VE_ENC 3
+#define PD_NPU 4
+#define PD_GPU_TOP 5
+#define PD_GPU_CORE 6
+#define PD_PCIE 7
+#define PD_USB2 8
+#define PD_VO 9
+#define PD_VO1 10
+
+#endif /* _DT_BINDINGS_POWER_SUN60I_A733_PCK600_H_ */
diff --git a/include/dt-bindings/power/marvell,pxa1908-power.h b/include/dt-bindings/power/marvell,pxa1908-power.h
index 19b088351af1..173f47e0e69d 100644
--- a/include/dt-bindings/power/marvell,pxa1908-power.h
+++ b/include/dt-bindings/power/marvell,pxa1908-power.h
@@ -13,5 +13,6 @@
#define PXA1908_POWER_DOMAIN_GPU2D 2
#define PXA1908_POWER_DOMAIN_DSI 3
#define PXA1908_POWER_DOMAIN_ISP 4
+#define PXA1908_POWER_DOMAIN_AUDIO 5
#endif
diff --git a/include/dt-bindings/power/mediatek,mt8189-power.h b/include/dt-bindings/power/mediatek,mt8189-power.h
new file mode 100644
index 000000000000..70a8c2113457
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8189-power.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Author: Qiqi Wang <qiqi.wang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8189_POWER_H
+#define _DT_BINDINGS_POWER_MT8189_POWER_H
+
+/* SPM */
+#define MT8189_POWER_DOMAIN_CONN 0
+#define MT8189_POWER_DOMAIN_AUDIO 1
+#define MT8189_POWER_DOMAIN_ADSP_TOP_DORMANT 2
+#define MT8189_POWER_DOMAIN_ADSP_INFRA 3
+#define MT8189_POWER_DOMAIN_ADSP_AO 4
+#define MT8189_POWER_DOMAIN_MM_INFRA 5
+#define MT8189_POWER_DOMAIN_ISP_IMG1 6
+#define MT8189_POWER_DOMAIN_ISP_IMG2 7
+#define MT8189_POWER_DOMAIN_ISP_IPE 8
+#define MT8189_POWER_DOMAIN_VDE0 9
+#define MT8189_POWER_DOMAIN_VEN0 10
+#define MT8189_POWER_DOMAIN_CAM_MAIN 11
+#define MT8189_POWER_DOMAIN_CAM_SUBA 12
+#define MT8189_POWER_DOMAIN_CAM_SUBB 13
+#define MT8189_POWER_DOMAIN_MDP0 14
+#define MT8189_POWER_DOMAIN_DISP 15
+#define MT8189_POWER_DOMAIN_DP_TX 16
+#define MT8189_POWER_DOMAIN_CSI_RX 17
+#define MT8189_POWER_DOMAIN_SSUSB 18
+#define MT8189_POWER_DOMAIN_MFG0 19
+#define MT8189_POWER_DOMAIN_MFG1 20
+#define MT8189_POWER_DOMAIN_MFG2 21
+#define MT8189_POWER_DOMAIN_MFG3 22
+#define MT8189_POWER_DOMAIN_EDP_TX_DORMANT 23
+#define MT8189_POWER_DOMAIN_PCIE 24
+#define MT8189_POWER_DOMAIN_PCIE_PHY 25
+
+#endif /* _DT_BINDINGS_POWER_MT8189_POWER_H */
diff --git a/include/dt-bindings/power/mt7622-power.h b/include/dt-bindings/power/mt7622-power.h
index ffad81ad3d46..ec244989eeba 100644
--- a/include/dt-bindings/power/mt7622-power.h
+++ b/include/dt-bindings/power/mt7622-power.h
@@ -10,5 +10,6 @@
#define MT7622_POWER_DOMAIN_HIF0 1
#define MT7622_POWER_DOMAIN_HIF1 2
#define MT7622_POWER_DOMAIN_WB 3
+#define MT7622_POWER_DOMAIN_AUDIO 4
#endif /* _DT_BINDINGS_POWER_MT7622_POWER_H */
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
index 06851363ae0e..67e2634fdc99 100644
--- a/include/dt-bindings/power/qcom,rpmhpd.h
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -28,15 +28,20 @@
#define RPMHPD_XO 18
#define RPMHPD_NSP2 19
#define RPMHPD_GMXC 20
+#define RPMHPD_DCX 21
+#define RPMHPD_GBX 22
/* RPMh Power Domain performance levels */
#define RPMH_REGULATOR_LEVEL_RETENTION 16
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D3_0 49
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D3 50
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2_1 51
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1_1 54
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1_0 55
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0_0 59
#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
#define RPMH_REGULATOR_LEVEL_LOW_SVS_P1 72
@@ -47,6 +52,7 @@
#define RPMH_REGULATOR_LEVEL_SVS_L0 144
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
#define RPMH_REGULATOR_LEVEL_SVS_L2 224
+#define RPMH_REGULATOR_LEVEL_SVS_L2_0 225
#define RPMH_REGULATOR_LEVEL_NOM 256
#define RPMH_REGULATOR_LEVEL_NOM_L0 288
#define RPMH_REGULATOR_LEVEL_NOM_L1 320
@@ -54,8 +60,14 @@
#define RPMH_REGULATOR_LEVEL_TURBO 384
#define RPMH_REGULATOR_LEVEL_TURBO_L0 400
#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_TURBO_L1_0 417
+#define RPMH_REGULATOR_LEVEL_TURBO_L1_1 418
+#define RPMH_REGULATOR_LEVEL_TURBO_L1_2 419
#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
+#define RPMH_REGULATOR_LEVEL_TURBO_L3_0 449
+#define RPMH_REGULATOR_LEVEL_TURBO_L3_1 450
+#define RPMH_REGULATOR_LEVEL_TURBO_L3_2 451
#define RPMH_REGULATOR_LEVEL_TURBO_L4 452
#define RPMH_REGULATOR_LEVEL_TURBO_L5 456
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
diff --git a/include/dt-bindings/reset/cix,sky1-s5-system-control.h b/include/dt-bindings/reset/cix,sky1-s5-system-control.h
new file mode 100644
index 000000000000..808bbcbe0c98
--- /dev/null
+++ b/include/dt-bindings/reset/cix,sky1-s5-system-control.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Author: Jerry Zhu <jerry.zhu@cixtech.com> */
+#ifndef DT_BINDING_RESET_CIX_SKY1_S5_SYSTEM_CONTROL_H
+#define DT_BINDING_RESET_CIX_SKY1_S5_SYSTEM_CONTROL_H
+
+/* reset for csu_pm */
+#define SKY1_CSU_PM_RESET_N 0
+#define SKY1_SENSORFUSION_RESET_N 1
+#define SKY1_SENSORFUSION_NOC_RESET_N 2
+
+/* reset group0 for s0 domain modules */
+#define SKY1_DDRC_RESET_N 3
+#define SKY1_GIC_RESET_N 4
+#define SKY1_CI700_RESET_N 5
+#define SKY1_SYS_NI700_RESET_N 6
+#define SKY1_MM_NI700_RESET_N 7
+#define SKY1_PCIE_NI700_RESET_N 8
+#define SKY1_GPU_RESET_N 9
+#define SKY1_NPUTOP_RESET_N 10
+#define SKY1_NPUCORE0_RESET_N 11
+#define SKY1_NPUCORE1_RESET_N 12
+#define SKY1_NPUCORE2_RESET_N 13
+#define SKY1_VPU_RESET_N 14
+#define SKY1_ISP_SRESET_N 15
+#define SKY1_ISP_ARESET_N 16
+#define SKY1_ISP_HRESET_N 17
+#define SKY1_ISP_GDCRESET_N 18
+#define SKY1_DPU_RESET0_N 19
+#define SKY1_DPU_RESET1_N 20
+#define SKY1_DPU_RESET2_N 21
+#define SKY1_DPU_RESET3_N 22
+#define SKY1_DPU_RESET4_N 23
+#define SKY1_DP_RESET0_N 24
+#define SKY1_DP_RESET1_N 25
+#define SKY1_DP_RESET2_N 26
+#define SKY1_DP_RESET3_N 27
+#define SKY1_DP_RESET4_N 28
+#define SKY1_DP_PHY_RST_N 29
+
+/* reset group1 for s0 domain modules */
+#define SKY1_AUDIO_HIFI5_RESET_N 30
+#define SKY1_AUDIO_HIFI5_NOC_RESET_N 31
+#define SKY1_CSIDPHY_PRST0_N 32
+#define SKY1_CSIDPHY_CMNRST0_N 33
+#define SKY1_CSI0_RST_N 34
+#define SKY1_CSIDPHY_PRST1_N 35
+#define SKY1_CSIDPHY_CMNRST1_N 36
+#define SKY1_CSI1_RST_N 37
+#define SKY1_CSI2_RST_N 38
+#define SKY1_CSI3_RST_N 39
+#define SKY1_CSIBRDGE0_RST_N 40
+#define SKY1_CSIBRDGE1_RST_N 41
+#define SKY1_CSIBRDGE2_RST_N 42
+#define SKY1_CSIBRDGE3_RST_N 43
+#define SKY1_GMAC0_RST_N 44
+#define SKY1_GMAC1_RST_N 45
+#define SKY1_PCIE0_RESET_N 46
+#define SKY1_PCIE1_RESET_N 47
+#define SKY1_PCIE2_RESET_N 48
+#define SKY1_PCIE3_RESET_N 49
+#define SKY1_PCIE4_RESET_N 50
+
+/* reset group1 for usb phys */
+#define SKY1_USB_DP_PHY0_PRST_N 51
+#define SKY1_USB_DP_PHY1_PRST_N 52
+#define SKY1_USB_DP_PHY2_PRST_N 53
+#define SKY1_USB_DP_PHY3_PRST_N 54
+#define SKY1_USB_DP_PHY0_RST_N 55
+#define SKY1_USB_DP_PHY1_RST_N 56
+#define SKY1_USB_DP_PHY2_RST_N 57
+#define SKY1_USB_DP_PHY3_RST_N 58
+#define SKY1_USBPHY_SS_PST_N 59
+#define SKY1_USBPHY_SS_RST_N 60
+#define SKY1_USBPHY_HS0_PRST_N 61
+#define SKY1_USBPHY_HS1_PRST_N 62
+#define SKY1_USBPHY_HS2_PRST_N 63
+#define SKY1_USBPHY_HS3_PRST_N 64
+#define SKY1_USBPHY_HS4_PRST_N 65
+#define SKY1_USBPHY_HS5_PRST_N 66
+#define SKY1_USBPHY_HS6_PRST_N 67
+#define SKY1_USBPHY_HS7_PRST_N 68
+#define SKY1_USBPHY_HS8_PRST_N 69
+#define SKY1_USBPHY_HS9_PRST_N 70
+
+/* reset group1 for usb controllers */
+#define SKY1_USBC_SS0_PRST_N 71
+#define SKY1_USBC_SS1_PRST_N 72
+#define SKY1_USBC_SS2_PRST_N 73
+#define SKY1_USBC_SS3_PRST_N 74
+#define SKY1_USBC_SS4_PRST_N 75
+#define SKY1_USBC_SS5_PRST_N 76
+#define SKY1_USBC_SS0_RST_N 77
+#define SKY1_USBC_SS1_RST_N 78
+#define SKY1_USBC_SS2_RST_N 79
+#define SKY1_USBC_SS3_RST_N 80
+#define SKY1_USBC_SS4_RST_N 81
+#define SKY1_USBC_SS5_RST_N 82
+#define SKY1_USBC_HS0_PRST_N 83
+#define SKY1_USBC_HS1_PRST_N 84
+#define SKY1_USBC_HS2_PRST_N 85
+#define SKY1_USBC_HS3_PRST_N 86
+#define SKY1_USBC_HS0_RST_N 87
+#define SKY1_USBC_HS1_RST_N 88
+#define SKY1_USBC_HS2_RST_N 89
+#define SKY1_USBC_HS3_RST_N 90
+
+/* reset group0 for rcsu */
+#define SKY1_AUDIO_RCSU_RESET_N 91
+#define SKY1_CI700_RCSU_RESET_N 92
+#define SKY1_CSI_RCSU0_RESET_N 93
+#define SKY1_CSI_RCSU1_RESET_N 94
+#define SKY1_CSU_PM_RCSU_RESET_N 95
+#define SKY1_DDR_BROADCAST_RCSU_RESET_N 96
+#define SKY1_DDR_CTRL_RCSU_0_RESET_N 97
+#define SKY1_DDR_CTRL_RCSU_1_RESET_N 98
+#define SKY1_DDR_CTRL_RCSU_2_RESET_N 99
+#define SKY1_DDR_CTRL_RCSU_3_RESET_N 100
+#define SKY1_DDR_TZC400_RCSU_0_RESET_N 101
+#define SKY1_DDR_TZC400_RCSU_1_RESET_N 102
+#define SKY1_DDR_TZC400_RCSU_2_RESET_N 103
+#define SKY1_DDR_TZC400_RCSU_3_RESET_N 104
+#define SKY1_DP0_RCSU_RESET_N 105
+#define SKY1_DP1_RCSU_RESET_N 106
+#define SKY1_DP2_RCSU_RESET_N 107
+#define SKY1_DP3_RCSU_RESET_N 108
+#define SKY1_DP4_RCSU_RESET_N 109
+#define SKY1_DPU0_RCSU_RESET_N 110
+#define SKY1_DPU1_RCSU_RESET_N 111
+#define SKY1_DPU2_RCSU_RESET_N 112
+#define SKY1_DPU3_RCSU_RESET_N 113
+#define SKY1_DPU4_RCSU_RESET_N 114
+#define SKY1_DSU_RCSU_RESET_N 115
+#define SKY1_FCH_RCSU_RESET_N 116
+#define SKY1_GICD_RCSU_RESET_N 117
+#define SKY1_GMAC_RCSU_RESET_N 118
+#define SKY1_GPU_RCSU_RESET_N 119
+#define SKY1_ISP_RCSU0_RESET_N 120
+#define SKY1_ISP_RCSU1_RESET_N 121
+#define SKY1_NI700_MMHUB_RCSU_RESET_N 122
+
+/* reset group1 for rcsu */
+#define SKY1_NPU_RCSU_RESET_N 123
+#define SKY1_NI700_PCIE_RCSU_RESET_N 124
+#define SKY1_PCIE_X421_RCSU_RESET_N 125
+#define SKY1_PCIE_X8_RCSU_RESET_N 126
+#define SKY1_SF_RCSU_RESET_N 127
+#define SKY1_RCSU_SMMU_MMHUB_RESET_N 128
+#define SKY1_RCSU_SMMU_PCIEHUB_RESET_N 129
+#define SKY1_RCSU_SYSHUB_RESET_N 130
+#define SKY1_NI700_SMN_RCSU_RESET_N 131
+#define SKY1_NI700_SYSHUB_RCSU_RESET_N 132
+#define SKY1_RCSU_USB2_HOST0_RESET_N 133
+#define SKY1_RCSU_USB2_HOST1_RESET_N 134
+#define SKY1_RCSU_USB2_HOST2_RESET_N 135
+#define SKY1_RCSU_USB2_HOST3_RESET_N 136
+#define SKY1_RCSU_USB3_TYPEA_DRD_RESET_N 137
+#define SKY1_RCSU_USB3_TYPEC_DRD_RESET_N 138
+#define SKY1_RCSU_USB3_TYPEC_HOST0_RESET_N 139
+#define SKY1_RCSU_USB3_TYPEC_HOST1_RESET_N 140
+#define SKY1_RCSU_USB3_TYPEC_HOST2_RESET_N 141
+#define SKY1_VPU_RCSU_RESET_N 142
+
+#endif
diff --git a/include/dt-bindings/reset/cix,sky1-system-control.h b/include/dt-bindings/reset/cix,sky1-system-control.h
new file mode 100644
index 000000000000..7a16fc4ef3b5
--- /dev/null
+++ b/include/dt-bindings/reset/cix,sky1-system-control.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Author: Jerry Zhu <jerry.zhu@cixtech.com> */
+#ifndef DT_BINDING_RESET_CIX_SKY1_SYSTEM_CONTROL_H
+#define DT_BINDING_RESET_CIX_SKY1_SYSTEM_CONTROL_H
+
+/* func reset for sky1 fch */
+#define SW_I3C0_RST_FUNC_G_N 0
+#define SW_I3C0_RST_FUNC_I_N 1
+#define SW_I3C1_RST_FUNC_G_N 2
+#define SW_I3C1_RST_FUNC_I_N 3
+#define SW_UART0_RST_FUNC_N 4
+#define SW_UART1_RST_FUNC_N 5
+#define SW_UART2_RST_FUNC_N 6
+#define SW_UART3_RST_FUNC_N 7
+#define SW_TIMER_RST_FUNC_N 8
+
+/* apb reset for sky1 fch */
+#define SW_I3C0_RST_APB_N 9
+#define SW_I3C1_RST_APB_N 10
+#define SW_DMA_RST_AXI_N 11
+#define SW_UART0_RST_APB_N 12
+#define SW_UART1_RST_APB_N 13
+#define SW_UART2_RST_APB_N 14
+#define SW_UART3_RST_APB_N 15
+#define SW_SPI0_RST_APB_N 16
+#define SW_SPI1_RST_APB_N 17
+#define SW_I2C0_RST_APB_N 18
+#define SW_I2C1_RST_APB_N 19
+#define SW_I2C2_RST_APB_N 20
+#define SW_I2C3_RST_APB_N 21
+#define SW_I2C4_RST_APB_N 22
+#define SW_I2C5_RST_APB_N 23
+#define SW_I2C6_RST_APB_N 24
+#define SW_I2C7_RST_APB_N 25
+#define SW_GPIO_RST_APB_N 26
+
+/* fch rst for xspi */
+#define SW_XSPI_REG_RST_N 27
+#define SW_XSPI_SYS_RST_N 28
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq5210-gcc.h b/include/dt-bindings/reset/qcom,ipq5210-gcc.h
new file mode 100644
index 000000000000..09890a09087c
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq5210-gcc.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_IPQ5210_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_IPQ5210_H
+
+#define GCC_ADSS_BCR 0
+#define GCC_ADSS_PWM_ARES 1
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 2
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_GPLL0_ARES 3
+#define GCC_APSS_AHB_ARES 4
+#define GCC_APSS_ATB_ARES 5
+#define GCC_APSS_AXI_ARES 6
+#define GCC_APSS_TS_ARES 7
+#define GCC_BOOT_ROM_AHB_ARES 8
+#define GCC_BOOT_ROM_BCR 9
+#define GCC_GEPHY_BCR 10
+#define GCC_GEPHY_SYS_ARES 11
+#define GCC_GP1_ARES 12
+#define GCC_GP2_ARES 13
+#define GCC_GP3_ARES 14
+#define GCC_MDIO_AHB_ARES 15
+#define GCC_MDIO_BCR 16
+#define GCC_MDIO_GEPHY_AHB_ARES 17
+#define GCC_NSS_BCR 18
+#define GCC_NSS_TS_ARES 19
+#define GCC_NSSCC_ARES 20
+#define GCC_NSSCFG_ARES 21
+#define GCC_NSSNOC_ATB_ARES 22
+#define GCC_NSSNOC_MEMNOC_1_ARES 23
+#define GCC_NSSNOC_MEMNOC_ARES 24
+#define GCC_NSSNOC_NSSCC_ARES 25
+#define GCC_NSSNOC_PCNOC_1_ARES 26
+#define GCC_NSSNOC_QOSGEN_REF_ARES 27
+#define GCC_NSSNOC_SNOC_1_ARES 28
+#define GCC_NSSNOC_SNOC_ARES 29
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 30
+#define GCC_NSSNOC_XO_DCD_ARES 31
+#define GCC_PCIE0_AHB_ARES 32
+#define GCC_PCIE0_AUX_ARES 33
+#define GCC_PCIE0_AXI_M_ARES 34
+#define GCC_PCIE0_AXI_S_BRIDGE_ARES 35
+#define GCC_PCIE0_AXI_S_ARES 36
+#define GCC_PCIE0_BCR 37
+#define GCC_PCIE0_LINK_DOWN_BCR 38
+#define GCC_PCIE0_PHY_BCR 39
+#define GCC_PCIE0_PIPE_ARES 40
+#define GCC_PCIE0PHY_PHY_BCR 41
+#define GCC_PCIE1_AHB_ARES 42
+#define GCC_PCIE1_AUX_ARES 43
+#define GCC_PCIE1_AXI_M_ARES 44
+#define GCC_PCIE1_AXI_S_BRIDGE_ARES 45
+#define GCC_PCIE1_AXI_S_ARES 46
+#define GCC_PCIE1_BCR 47
+#define GCC_PCIE1_LINK_DOWN_BCR 48
+#define GCC_PCIE1_PHY_BCR 49
+#define GCC_PCIE1_PIPE_ARES 50
+#define GCC_PCIE1PHY_PHY_BCR 51
+#define GCC_QRNG_AHB_ARES 52
+#define GCC_QRNG_BCR 53
+#define GCC_QUPV3_2X_CORE_ARES 54
+#define GCC_QUPV3_AHB_MST_ARES 55
+#define GCC_QUPV3_AHB_SLV_ARES 56
+#define GCC_QUPV3_BCR 57
+#define GCC_QUPV3_CORE_ARES 58
+#define GCC_QUPV3_WRAP_SE0_ARES 59
+#define GCC_QUPV3_WRAP_SE0_BCR 60
+#define GCC_QUPV3_WRAP_SE1_ARES 61
+#define GCC_QUPV3_WRAP_SE1_BCR 62
+#define GCC_QUPV3_WRAP_SE2_ARES 63
+#define GCC_QUPV3_WRAP_SE2_BCR 64
+#define GCC_QUPV3_WRAP_SE3_ARES 65
+#define GCC_QUPV3_WRAP_SE3_BCR 66
+#define GCC_QUPV3_WRAP_SE4_ARES 67
+#define GCC_QUPV3_WRAP_SE4_BCR 68
+#define GCC_QUPV3_WRAP_SE5_ARES 69
+#define GCC_QUPV3_WRAP_SE5_BCR 70
+#define GCC_QUSB2_0_PHY_BCR 71
+#define GCC_SDCC1_AHB_ARES 72
+#define GCC_SDCC1_APPS_ARES 73
+#define GCC_SDCC1_ICE_CORE_ARES 74
+#define GCC_SDCC_BCR 75
+#define GCC_TLMM_AHB_ARES 76
+#define GCC_TLMM_ARES 77
+#define GCC_TLMM_BCR 78
+#define GCC_UNIPHY0_AHB_ARES 79
+#define GCC_UNIPHY0_BCR 80
+#define GCC_UNIPHY0_SYS_ARES 81
+#define GCC_UNIPHY1_AHB_ARES 82
+#define GCC_UNIPHY1_BCR 83
+#define GCC_UNIPHY1_SYS_ARES 84
+#define GCC_UNIPHY2_AHB_ARES 85
+#define GCC_UNIPHY2_BCR 86
+#define GCC_UNIPHY2_SYS_ARES 87
+#define GCC_USB0_AUX_ARES 88
+#define GCC_USB0_MASTER_ARES 89
+#define GCC_USB0_MOCK_UTMI_ARES 90
+#define GCC_USB0_PHY_BCR 91
+#define GCC_USB0_PHY_CFG_AHB_ARES 92
+#define GCC_USB0_PIPE_ARES 93
+#define GCC_USB0_SLEEP_ARES 94
+#define GCC_USB3PHY_0_PHY_BCR 95
+#define GCC_USB_BCR 96
+#define GCC_PCIE0_PIPE_RESET 97
+#define GCC_PCIE0_CORE_STICKY_RESET 98
+#define GCC_PCIE0_AXI_S_STICKY_RESET 99
+#define GCC_PCIE0_AXI_S_RESET 100
+#define GCC_PCIE0_AXI_M_STICKY_RESET 101
+#define GCC_PCIE0_AXI_M_RESET 102
+#define GCC_PCIE0_AUX_RESET 103
+#define GCC_PCIE0_AHB_RESET 104
+#define GCC_PCIE1_PIPE_RESET 105
+#define GCC_PCIE1_CORE_STICKY_RESET 106
+#define GCC_PCIE1_AXI_S_STICKY_RESET 107
+#define GCC_PCIE1_AXI_S_RESET 108
+#define GCC_PCIE1_AXI_M_STICKY_RESET 109
+#define GCC_PCIE1_AXI_M_RESET 110
+#define GCC_PCIE1_AUX_RESET 111
+#define GCC_PCIE1_AHB_RESET 112
+#define GCC_UNIPHY0_XPCS_ARES 113
+#define GCC_UNIPHY1_XPCS_ARES 114
+#define GCC_UNIPHY2_XPCS_ARES 115
+#define GCC_QDSS_BCR 116
+
+#endif
diff --git a/include/dt-bindings/reset/spacemit,k3-resets.h b/include/dt-bindings/reset/spacemit,k3-resets.h
index 79ac1c22b7b5..dc1ef009ba79 100644
--- a/include/dt-bindings/reset/spacemit,k3-resets.h
+++ b/include/dt-bindings/reset/spacemit,k3-resets.h
@@ -97,11 +97,11 @@
#define RESET_APMU_SDH0 13
#define RESET_APMU_SDH1 14
#define RESET_APMU_SDH2 15
-#define RESET_APMU_USB2 16
-#define RESET_APMU_USB3_PORTA 17
-#define RESET_APMU_USB3_PORTB 18
-#define RESET_APMU_USB3_PORTC 19
-#define RESET_APMU_USB3_PORTD 20
+#define RESET_APMU_USB2_AHB 16
+#define RESET_APMU_USB2_VCC 17
+#define RESET_APMU_USB2_PHY 18
+#define RESET_APMU_USB3_A_AHB 19
+#define RESET_APMU_USB3_A_VCC 20
#define RESET_APMU_QSPI 21
#define RESET_APMU_QSPI_BUS 22
#define RESET_APMU_DMA 23
@@ -132,8 +132,8 @@
#define RESET_APMU_CPU7_SW 48
#define RESET_APMU_C1_MPSUB_SW 49
#define RESET_APMU_MPSUB_DBG 50
-#define RESET_APMU_UCIE 51
-#define RESET_APMU_RCPU 52
+#define RESET_APMU_USB3_A_PHY 51 /* USB3 A */
+#define RESET_APMU_USB3_B_AHB 52
#define RESET_APMU_DSI4LN2_ESCCLK 53
#define RESET_APMU_DSI4LN2_LCD_SW 54
#define RESET_APMU_DSI4LN2_LCD_MCLK 55
@@ -143,16 +143,40 @@
#define RESET_APMU_UFS_ACLK 59
#define RESET_APMU_EDP0 60
#define RESET_APMU_EDP1 61
-#define RESET_APMU_PCIE_PORTA 62
-#define RESET_APMU_PCIE_PORTB 63
-#define RESET_APMU_PCIE_PORTC 64
-#define RESET_APMU_PCIE_PORTD 65
-#define RESET_APMU_PCIE_PORTE 66
+#define RESET_APMU_USB3_B_VCC 62 /* USB3 B */
+#define RESET_APMU_USB3_B_PHY 63
+#define RESET_APMU_USB3_C_AHB 64
+#define RESET_APMU_USB3_C_VCC 65
+#define RESET_APMU_USB3_C_PHY 66
#define RESET_APMU_EMAC0 67
#define RESET_APMU_EMAC1 68
#define RESET_APMU_EMAC2 69
#define RESET_APMU_ESPI_MCLK 70
#define RESET_APMU_ESPI_SCLK 71
+#define RESET_APMU_USB3_D_AHB 72 /* USB3 D */
+#define RESET_APMU_USB3_D_VCC 73
+#define RESET_APMU_USB3_D_PHY 74
+#define RESET_APMU_UCIE_IP 75
+#define RESET_APMU_UCIE_HOT 76
+#define RESET_APMU_UCIE_MON 77
+#define RESET_APMU_RCPU_AUDIO_SYS 78
+#define RESET_APMU_RCPU_MCU_CORE 79
+#define RESET_APMU_RCPU_AUDIO_APMU 80
+#define RESET_APMU_PCIE_A_DBI 81
+#define RESET_APMU_PCIE_A_SLAVE 82
+#define RESET_APMU_PCIE_A_MASTER 83
+#define RESET_APMU_PCIE_B_DBI 84
+#define RESET_APMU_PCIE_B_SLAVE 85
+#define RESET_APMU_PCIE_B_MASTER 86
+#define RESET_APMU_PCIE_C_DBI 87
+#define RESET_APMU_PCIE_C_SLAVE 88
+#define RESET_APMU_PCIE_C_MASTER 89
+#define RESET_APMU_PCIE_D_DBI 90
+#define RESET_APMU_PCIE_D_SLAVE 91
+#define RESET_APMU_PCIE_D_MASTER 92
+#define RESET_APMU_PCIE_E_DBI 93
+#define RESET_APMU_PCIE_E_SLAVE 94
+#define RESET_APMU_PCIE_E_MASTER 95
/* DCIU resets*/
#define RESET_DCIU_HDMA 0
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
index 6d1ce7f5da51..45850f2d4342 100644
--- a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -140,6 +140,18 @@
#define DISPLAY_PORT_RX_6 134
#define DISPLAY_PORT_RX_7 135
#define USB_RX 136
+#define LPI_MI2S_RX_0 137
+#define LPI_MI2S_TX_0 138
+#define LPI_MI2S_RX_1 139
+#define LPI_MI2S_TX_1 140
+#define LPI_MI2S_RX_2 141
+#define LPI_MI2S_TX_2 142
+#define LPI_MI2S_RX_3 143
+#define LPI_MI2S_TX_3 144
+#define LPI_MI2S_RX_4 145
+#define LPI_MI2S_TX_4 146
+#define SENARY_MI2S_RX 147
+#define SENARY_MI2S_TX 148
#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 1823a290a7b7..f9600f87186a 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -1533,4 +1533,10 @@ struct hv_mmio_write_input {
u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
} __packed;
+enum hv_intercept_access_type {
+ HV_INTERCEPT_ACCESS_READ = 0,
+ HV_INTERCEPT_ACCESS_WRITE = 1,
+ HV_INTERCEPT_ACCESS_EXECUTE = 2
+};
+
#endif /* _HV_HVGDK_MINI_H */
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
index 245f3db53bf1..5e83d3714966 100644
--- a/include/hyperv/hvhdk.h
+++ b/include/hyperv/hvhdk.h
@@ -779,7 +779,7 @@ struct hv_x64_intercept_message_header {
u32 vp_index;
u8 instruction_length:4;
u8 cr8:4; /* Only set for exo partitions */
- u8 intercept_access_type;
+ u8 intercept_access_type; /* enum hv_intercept_access_type */
union hv_x64_vp_execution_state execution_state;
struct hv_x64_segment_register cs_segment;
u64 rip;
@@ -825,7 +825,7 @@ union hv_arm64_vp_execution_state {
struct hv_arm64_intercept_message_header {
u32 vp_index;
u8 instruction_length;
- u8 intercept_access_type;
+ u8 intercept_access_type; /* enum hv_intercept_access_type */
union hv_arm64_vp_execution_state execution_state;
u64 pc;
u64 cpsr;
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 7310841f4512..bf8cc9589bd0 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -10,6 +10,8 @@
#include <linux/clocksource.h>
#include <linux/hrtimer.h>
+#include <linux/irqchip/arm-gic-v5.h>
+
enum kvm_arch_timers {
TIMER_PTIMER,
TIMER_VTIMER,
@@ -47,7 +49,7 @@ struct arch_timer_vm_data {
u64 poffset;
/* The PPI for each timer, global to the VM */
- u8 ppi[NR_KVM_TIMERS];
+ u32 ppi[NR_KVM_TIMERS];
};
struct arch_timer_context {
@@ -130,6 +132,10 @@ void kvm_timer_init_vhe(void);
#define timer_vm_data(ctx) (&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
+#define get_vgic_ppi(k, i) (((k)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V5) ? \
+ (i) : (FIELD_PREP(GICV5_HWIRQ_ID, i) | \
+ FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_PPI)))
+
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 96754b51b411..0a36a3d5c894 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -12,6 +12,9 @@
#define KVM_ARMV8_PMU_MAX_COUNTERS 32
+/* PPI #23 - architecturally specified for GICv5 */
+#define KVM_ARMV8_PMU_GICV5_IRQ 0x20000017
+
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
@@ -38,7 +41,7 @@ struct arm_pmu_entry {
};
bool kvm_supports_guest_pmuv3(void);
-#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
+#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num != 0)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f2eafc65bbf4..1388dc6028a9 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -19,7 +19,9 @@
#include <linux/jump_label.h>
#include <linux/irqchip/arm-gic-v4.h>
+#include <linux/irqchip/arm-gic-v5.h>
+#define VGIC_V5_MAX_CPUS 512
#define VGIC_V3_MAX_CPUS 512
#define VGIC_V2_MAX_CPUS 8
#define VGIC_NR_IRQS_LEGACY 256
@@ -31,9 +33,96 @@
#define VGIC_MIN_LPI 8192
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
-#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
-#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
- (irq) <= VGIC_MAX_SPI)
+/*
+ * GICv5 supports 128 PPIs, but only the first 64 are architected. We only
+ * support the timers and PMU in KVM, both of which are architected. Rather than
+ * handling twice the state, we instead opt to only support the architected set
+ * in KVM for now. At a future stage, this can be bumped up to 128, if required.
+ */
+#define VGIC_V5_NR_PRIVATE_IRQS 64
+
+#define is_v5_type(t, i) (FIELD_GET(GICV5_HWIRQ_TYPE, (i)) == (t))
+
+#define __irq_is_sgi(t, i) \
+ ({ \
+ bool __ret; \
+ \
+ switch (t) { \
+ case KVM_DEV_TYPE_ARM_VGIC_V5: \
+ __ret = false; \
+ break; \
+ default: \
+ __ret = (i) < VGIC_NR_SGIS; \
+ } \
+ \
+ __ret; \
+ })
+
+#define __irq_is_ppi(t, i) \
+ ({ \
+ bool __ret; \
+ \
+ switch (t) { \
+ case KVM_DEV_TYPE_ARM_VGIC_V5: \
+ __ret = is_v5_type(GICV5_HWIRQ_TYPE_PPI, (i)); \
+ break; \
+ default: \
+ __ret = (i) >= VGIC_NR_SGIS; \
+ __ret &= (i) < VGIC_NR_PRIVATE_IRQS; \
+ } \
+ \
+ __ret; \
+ })
+
+#define __irq_is_spi(t, i) \
+ ({ \
+ bool __ret; \
+ \
+ switch (t) { \
+ case KVM_DEV_TYPE_ARM_VGIC_V5: \
+ __ret = is_v5_type(GICV5_HWIRQ_TYPE_SPI, (i)); \
+ break; \
+ default: \
+ __ret = (i) <= VGIC_MAX_SPI; \
+ __ret &= (i) >= VGIC_NR_PRIVATE_IRQS; \
+ } \
+ \
+ __ret; \
+ })
+
+#define __irq_is_lpi(t, i) \
+ ({ \
+ bool __ret; \
+ \
+ switch (t) { \
+ case KVM_DEV_TYPE_ARM_VGIC_V5: \
+ __ret = is_v5_type(GICV5_HWIRQ_TYPE_LPI, (i)); \
+ break; \
+ default: \
+ __ret = (i) >= 8192; \
+ } \
+ \
+ __ret; \
+ })
+
+#define irq_is_sgi(k, i) __irq_is_sgi((k)->arch.vgic.vgic_model, i)
+#define irq_is_ppi(k, i) __irq_is_ppi((k)->arch.vgic.vgic_model, i)
+#define irq_is_spi(k, i) __irq_is_spi((k)->arch.vgic.vgic_model, i)
+#define irq_is_lpi(k, i) __irq_is_lpi((k)->arch.vgic.vgic_model, i)
+
+#define irq_is_private(k, i) (irq_is_ppi(k, i) || irq_is_sgi(k, i))
+
+#define vgic_v5_get_hwirq_id(x) FIELD_GET(GICV5_HWIRQ_ID, (x))
+#define vgic_v5_set_hwirq_id(x) FIELD_PREP(GICV5_HWIRQ_ID, (x))
+
+#define __vgic_v5_set_type(t) (FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_##t))
+#define vgic_v5_make_ppi(x) (__vgic_v5_set_type(PPI) | vgic_v5_set_hwirq_id(x))
+#define vgic_v5_make_spi(x) (__vgic_v5_set_type(SPI) | vgic_v5_set_hwirq_id(x))
+#define vgic_v5_make_lpi(x) (__vgic_v5_set_type(LPI) | vgic_v5_set_hwirq_id(x))
+
+#define __vgic_is_v(k, v) ((k)->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V##v)
+#define vgic_is_v3(k) (__vgic_is_v(k, 3))
+#define vgic_is_v5(k) (__vgic_is_v(k, 5))
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
@@ -101,6 +190,8 @@ enum vgic_irq_config {
VGIC_CONFIG_LEVEL
};
+struct vgic_irq;
+
/*
* Per-irq ops overriding some common behavious.
*
@@ -119,6 +210,19 @@ struct irq_ops {
* peaking into the physical GIC.
*/
bool (*get_input_level)(int vintid);
+
+ /*
+ * Function pointer to override the queuing of an IRQ.
+ */
+ bool (*queue_irq_unlock)(struct kvm *kvm, struct vgic_irq *irq,
+ unsigned long flags) __releases(&irq->irq_lock);
+
+ /*
+ * Callback function pointer to either enable or disable direct
+ * injection for a mapped interrupt.
+ */
+ void (*set_direct_injection)(struct kvm_vcpu *vcpu,
+ struct vgic_irq *irq, bool direct);
};
struct vgic_irq {
@@ -238,6 +342,26 @@ struct vgic_redist_region {
struct list_head list;
};
+struct vgic_v5_vm {
+ /*
+ * We only expose a subset of PPIs to the guest. This subset is a
+ * combination of the PPIs that are actually implemented and what we
+ * actually choose to expose.
+ */
+ DECLARE_BITMAP(vgic_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS);
+
+ /* A mask of the PPIs that are exposed for userspace to drive. */
+ DECLARE_BITMAP(userspace_ppis, VGIC_V5_NR_PRIVATE_IRQS);
+
+ /*
+ * The HMR itself is handled by the hardware, but we still need to have
+ * a mask that we can use when merging in pending state (only the state
+ * of Edge PPIs is merged back in from the guest an the HMR provides a
+ * convenient way to do that).
+ */
+ DECLARE_BITMAP(vgic_ppi_hmr, VGIC_V5_NR_PRIVATE_IRQS);
+};
+
struct vgic_dist {
bool in_kernel;
bool ready;
@@ -310,6 +434,11 @@ struct vgic_dist {
* else.
*/
struct its_vm its_vm;
+
+ /*
+ * GICv5 per-VM data.
+ */
+ struct vgic_v5_vm gicv5_vm;
};
struct vgic_v2_cpu_if {
@@ -340,11 +469,40 @@ struct vgic_v3_cpu_if {
unsigned int used_lrs;
};
+struct vgic_v5_cpu_if {
+ u64 vgic_apr;
+ u64 vgic_vmcr;
+
+ /* PPI register state */
+ DECLARE_BITMAP(vgic_ppi_dvir, VGIC_V5_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP(vgic_ppi_activer, VGIC_V5_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP(vgic_ppi_enabler, VGIC_V5_NR_PRIVATE_IRQS);
+ /* We have one byte (of which 5 bits are used) per PPI for priority */
+ u64 vgic_ppi_priorityr[VGIC_V5_NR_PRIVATE_IRQS / 8];
+
+ /*
+ * The ICSR is re-used across host and guest, and hence it needs to be
+ * saved/restored. Only one copy is required as the host should block
+ * preemption between executing GIC CDRCFG and acccessing the
+ * ICC_ICSR_EL1. A guest, of course, can never guarantee this, and hence
+ * it is the hyp's responsibility to keep the state constistent.
+ */
+ u64 vgic_icsr;
+
+ struct gicv5_vpe gicv5_vpe;
+};
+
+/* What PPI capabilities does a GICv5 host have */
+struct vgic_v5_ppi_caps {
+ DECLARE_BITMAP(impl_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS);
+};
+
struct vgic_cpu {
/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
+ struct vgic_v5_cpu_if vgic_v5;
};
struct vgic_irq *private_irqs;
@@ -392,13 +550,17 @@ int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_vgic_map_resources(struct kvm *kvm);
+void kvm_vgic_finalize_idregs(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int intid, bool level, void *owner);
+void kvm_vgic_set_irq_ops(struct kvm_vcpu *vcpu, u32 vintid,
+ struct irq_ops *ops);
+void kvm_vgic_clear_irq_ops(struct kvm_vcpu *vcpu, u32 vintid);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, struct irq_ops *ops);
+ u32 vintid);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
@@ -414,8 +576,20 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
-#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
- ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
+#define vgic_valid_spi(k, i) \
+ ({ \
+ bool __ret = irq_is_spi(k, i); \
+ \
+ switch ((k)->arch.vgic.vgic_model) { \
+ case KVM_DEV_TYPE_ARM_VGIC_V5: \
+ __ret &= FIELD_GET(GICV5_HWIRQ_ID, i) < (k)->arch.vgic.nr_spis; \
+ break; \
+ default: \
+ __ret &= (i) < ((k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); \
+ } \
+ \
+ __ret; \
+ })
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
@@ -455,6 +629,11 @@ int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu);
+int vgic_v5_finalize_ppi_state(struct kvm *kvm);
+bool vgic_v5_ppi_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
+ unsigned long flags);
+void vgic_v5_set_ppi_dvi(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool dvi);
+
bool vgic_state_is_nested(struct kvm_vcpu *vcpu);
/* CPU HP callbacks */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4d2f0bed7a06..bfacb9475aac 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -324,6 +324,17 @@ int acpi_unmap_cpu(int cpu);
acpi_handle acpi_get_processor_handle(int cpu);
+/**
+ * acpi_get_cpu_uid() - Get ACPI Processor UID of from MADT table
+ * @cpu: Logical CPU number (0-based)
+ * @uid: Pointer to store ACPI Processor UID
+ *
+ * Return: 0 on success (ACPI Processor ID stored in *uid);
+ * -EINVAL if CPU number is invalid or out of range;
+ * -ENODEV if ACPI Processor UID for the CPU is not found.
+ */
+int acpi_get_cpu_uid(unsigned int cpu, u32 *uid);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -791,6 +802,14 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
int acpi_mrrm_max_mem_region(void);
#endif
+#define ACPI_CMOS_RTC_IDS \
+ { "PNP0B00", }, \
+ { "PNP0B01", }, \
+ { "PNP0B02", }, \
+ { "", }
+
+extern bool cmos_rtc_platform_device_present;
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -1116,6 +1135,8 @@ static inline int acpi_mrrm_max_mem_region(void)
return 1;
}
+#define cmos_rtc_platform_device_present false
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HMAT
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
index 7f00c5285a32..f92a36187a52 100644
--- a/include/linux/arm_mpam.h
+++ b/include/linux/arm_mpam.h
@@ -5,6 +5,7 @@
#define __LINUX_ARM_MPAM_H
#include <linux/acpi.h>
+#include <linux/resctrl_types.h>
#include <linux/types.h>
struct mpam_msc;
@@ -49,6 +50,37 @@ static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
}
#endif
+bool resctrl_arch_alloc_capable(void);
+bool resctrl_arch_mon_capable(void);
+
+void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid);
+void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid);
+void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 rmid);
+void resctrl_arch_sched_in(struct task_struct *tsk);
+bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid);
+bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid);
+u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid);
+void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid);
+u32 resctrl_arch_system_num_rmid_idx(void);
+
+struct rdt_resource;
+void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, enum resctrl_event_id evtid);
+void resctrl_arch_mon_ctx_free(struct rdt_resource *r, enum resctrl_event_id evtid, void *ctx);
+
+/*
+ * The CPU configuration for MPAM is cheap to write, and is only written if it
+ * has changed. No need for fine grained enables.
+ */
+static inline void resctrl_arch_enable_mon(void) { }
+static inline void resctrl_arch_disable_mon(void) { }
+static inline void resctrl_arch_enable_alloc(void) { }
+static inline void resctrl_arch_disable_alloc(void) { }
+
+static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
+{
+ return val;
+}
+
/**
* mpam_register_requestor() - Register a requestor with the MPAM driver
* @partid_max: The maximum PARTID value the requestor can generate.
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 70807c679f1a..82a32526df64 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -309,17 +309,19 @@ struct atm_ioctl {
/**
* register_atm_ioctl - register handler for ioctl operations
+ * @ioctl: ioctl handler to register
*
* Special (non-device) handlers of ioctl's should
* register here. If you're a normal device, you should
* set .ioctl in your atmdev_ops instead.
*/
-void register_atm_ioctl(struct atm_ioctl *);
+void register_atm_ioctl(struct atm_ioctl *ioctl);
/**
* deregister_atm_ioctl - remove the ioctl handler
+ * @ioctl: ioctl handler to deregister
*/
-void deregister_atm_ioctl(struct atm_ioctl *);
+void deregister_atm_ioctl(struct atm_ioctl *ioctl);
/* register_atmdevice_notifier - register atm_dev notify events
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b642b5faca65..803b0183d98d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -15,7 +15,16 @@
#include <uapi/linux/audit.h>
#include <uapi/linux/fanotify.h>
-#define AUDIT_INO_UNSET ((unsigned long)-1)
+#define AUDIT_STATUS_ALL (AUDIT_STATUS_ENABLED | \
+ AUDIT_STATUS_FAILURE | \
+ AUDIT_STATUS_PID | \
+ AUDIT_STATUS_RATE_LIMIT | \
+ AUDIT_STATUS_BACKLOG_LIMIT | \
+ AUDIT_STATUS_BACKLOG_WAIT_TIME | \
+ AUDIT_STATUS_LOST | \
+ AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL)
+
+#define AUDIT_INO_UNSET ((u64)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
struct audit_sig_info {
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index 4086afd0cc6b..bc09b55e3682 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -271,6 +271,8 @@ struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
__devm_auxiliary_device_create(dev, KBUILD_MODNAME, devname, \
platform_data, 0)
+bool dev_is_auxiliary(struct device *dev);
+
/**
* module_auxiliary_driver() - Helper macro for registering an auxiliary driver
* @__auxiliary_driver: auxiliary driver struct
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c88fd4d37d1f..a06b93446d10 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -237,7 +237,7 @@ static inline void wb_get(struct bdi_writeback *wb)
}
/**
- * wb_put - decrement a wb's refcount
+ * wb_put_many - decrement a wb's refcount
* @wb: bdi_writeback to put
* @nr: number of references to put
*/
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0c8342747cab..5b7d12b40d5e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -136,6 +136,19 @@ static inline bool mapping_can_writeback(struct address_space *mapping)
return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
+/* Must not be used by file systems that support cgroup writeback */
+static inline int bdi_wb_dirty_exceeded(struct backing_dev_info *bdi)
+{
+ return bdi->wb.dirty_exceeded;
+}
+
+/* Must not be used by file systems that support cgroup writeback */
+static inline void bdi_wb_stat_mod(struct inode *inode, enum wb_stat_item item,
+ s64 amount)
+{
+ wb_stat_mod(&inode_to_bdi(inode)->wb, item, amount);
+}
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
index 1476a6ed1bfd..c939cd222730 100644
--- a/include/linux/backing-file.h
+++ b/include/linux/backing-file.h
@@ -18,10 +18,10 @@ struct backing_file_ctx {
void (*end_write)(struct kiocb *iocb, ssize_t);
};
-struct file *backing_file_open(const struct path *user_path, int flags,
+struct file *backing_file_open(const struct file *user_file, int flags,
const struct path *real_path,
const struct cred *cred);
-struct file *backing_tmpfile_open(const struct path *user_path, int flags,
+struct file *backing_tmpfile_open(const struct file *user_file, int flags,
const struct path *real_parentpath,
umode_t mode, const struct cred *cred);
ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
index 21e4652dcfd2..af5178434ec6 100644
--- a/include/linux/bio-integrity.h
+++ b/include/linux/bio-integrity.h
@@ -78,7 +78,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
void bio_integrity_unmap_user(struct bio *bio);
-bool bio_integrity_prep(struct bio *bio);
+void bio_integrity_prep(struct bio *bio, unsigned int action);
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
void bio_integrity_trim(struct bio *bio);
int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
@@ -104,9 +104,8 @@ static inline void bio_integrity_unmap_user(struct bio *bio)
{
}
-static inline bool bio_integrity_prep(struct bio *bio)
+static inline void bio_integrity_prep(struct bio *bio, unsigned int action)
{
- return true;
}
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
@@ -144,5 +143,12 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer);
void bio_integrity_free_buf(struct bio_integrity_payload *bip);
+void bio_integrity_setup_default(struct bio *bio);
+
+unsigned int fs_bio_integrity_alloc(struct bio *bio);
+void fs_bio_integrity_free(struct bio *bio);
+void fs_bio_integrity_generate(struct bio *bio);
+int fs_bio_integrity_verify(struct bio *bio, sector_t sector,
+ unsigned int size);
#endif /* _LINUX_BIO_INTEGRITY_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 36a3f2275ecd..97d747320b35 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -350,8 +350,7 @@ extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
- blk_opf_t opf, gfp_t gfp_mask,
- struct bio_set *bs);
+ blk_opf_t opf, gfp_t gfp, struct bio_set *bs);
struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
@@ -433,6 +432,8 @@ extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_reuse(struct bio *bio, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
+void bio_await(struct bio *bio, void *priv,
+ void (*submit)(struct bio *bio, void *priv));
int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
unsigned off);
@@ -474,7 +475,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
+int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter, size_t maxlen);
void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty);
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index b0395e4ccf90..b007d54a9036 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -46,6 +46,7 @@ struct device;
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
* bitmap_weighted_or(dst, src1, src2, nbits) *dst = *src1 | *src2. Returns Hamming Weight of dst
+ * bitmap_weighted_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2. Returns Hamming Weight of dst
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
@@ -57,6 +58,7 @@ struct device;
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
* bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
+ * bitmap_weight_from(src, start, end) Hamming Weight starting from @start
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
@@ -168,6 +170,8 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weighted_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
@@ -353,6 +357,18 @@ unsigned int bitmap_weighted_or(unsigned long *dst, const unsigned long *src1,
}
static __always_inline
+unsigned int bitmap_weighted_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits)) {
+ *dst = *src1 ^ *src2;
+ return hweight_long(*dst & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return __bitmap_weighted_xor(dst, src1, src2, nbits);
+ }
+}
+
+static __always_inline
void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
@@ -479,6 +495,38 @@ unsigned long bitmap_weight_andnot(const unsigned long *src1,
return __bitmap_weight_andnot(src1, src2, nbits);
}
+/**
+ * bitmap_weight_from - Hamming weight for a memory region
+ * @bitmap: The base address
+ * @start: The bitnumber to starts weighting
+ * @end: the bitmap size in bits
+ *
+ * Returns the number of set bits in the region. If @start >= @end,
+ * return >= end.
+ */
+static __always_inline
+unsigned long bitmap_weight_from(const unsigned long *bitmap,
+ unsigned int start, unsigned int end)
+{
+ unsigned long w;
+
+ if (unlikely(start >= end))
+ return end;
+
+ if (small_const_nbits(end))
+ return hweight_long(*bitmap & GENMASK(end - 1, start));
+
+ bitmap += start / BITS_PER_LONG;
+ /* Opencode round_down() to not include math.h */
+ end -= start & ~(BITS_PER_LONG - 1);
+ start %= BITS_PER_LONG;
+ w = bitmap_weight(bitmap, end);
+ if (start)
+ w -= hweight_long(*bitmap & BITMAP_LAST_WORD_MASK(start));
+
+ return w;
+}
+
static __always_inline
void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index ea7898cc5903..657eab2725ce 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -179,9 +179,11 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
/**
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
- * @index: 0 based bit index (0<=index<32) to sign bit
+ * @index: 0 based bit index (0 <= index < 32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
+ *
+ * Return: 32-bit sign extended value
*/
static __always_inline __s32 sign_extend32(__u32 value, int index)
{
@@ -192,7 +194,11 @@ static __always_inline __s32 sign_extend32(__u32 value, int index)
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
- * @index: 0 based bit index (0<=index<64) to sign bit
+ * @index: 0 based bit index (0 <= index < 64) to sign bit
+ *
+ * This is safe to use for 32-, 16- and 8-bit types as well.
+ *
+ * Return: 64-bit sign extended value
*/
static __always_inline __s64 sign_extend64(__u64 value, int index)
{
@@ -230,7 +236,7 @@ static inline int get_count_order_long(unsigned long l)
/**
* parity8 - get the parity of an u8 value
- * @value: the value to be examined
+ * @val: the value to be examined
*
* Determine the parity of the u8 argument.
*
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index c15b1ac62765..b1b530613c34 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -8,17 +8,13 @@
struct request;
-/*
- * Maximum contiguous integrity buffer allocation.
- */
-#define BLK_INTEGRITY_MAX_SIZE SZ_2M
-
enum blk_integrity_flags {
BLK_INTEGRITY_NOVERIFY = 1 << 0,
BLK_INTEGRITY_NOGENERATE = 1 << 1,
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
BLK_INTEGRITY_REF_TAG = 1 << 3,
BLK_INTEGRITY_STACKED = 1 << 4,
+ BLK_SPLIT_INTERVAL_CAPABLE = 1 << 5,
};
const char *blk_integrity_profile_name(struct blk_integrity *bi);
@@ -180,4 +176,27 @@ static inline struct bio_vec rq_integrity_vec(struct request *rq)
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+enum bio_integrity_action {
+ BI_ACT_BUFFER = (1u << 0), /* allocate buffer */
+ BI_ACT_CHECK = (1u << 1), /* generate / verify PI */
+ BI_ACT_ZERO = (1u << 2), /* zero buffer */
+};
+
+/**
+ * bio_integrity_action - return the integrity action needed for a bio
+ * @bio: bio to operate on
+ *
+ * Returns the mask of integrity actions (BI_ACT_*) that need to be performed
+ * for @bio.
+ */
+unsigned int __bio_integrity_action(struct bio *bio);
+static inline unsigned int bio_integrity_action(struct bio *bio)
+{
+ if (!blk_get_integrity(bio->bi_bdev->bd_disk))
+ return 0;
+ if (bio_integrity(bio))
+ return 0;
+ return __bio_integrity_action(bio);
+}
+
#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d463b9b5a0a5..890128cdea1c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -13,6 +13,7 @@
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/completion.h>
#include <linux/wait.h>
#include <linux/bio.h>
#include <linux/gfp.h>
@@ -38,6 +39,7 @@ struct blk_flush_queue;
struct kiocb;
struct pr_ops;
struct rq_qos;
+struct hd_geometry;
struct blk_report_zones_args;
struct blk_queue_stats;
struct blk_stat_callback;
@@ -200,10 +202,14 @@ struct gendisk {
u8 __rcu *zones_cond;
unsigned int zone_wplugs_hash_bits;
atomic_t nr_zone_wplugs;
- spinlock_t zone_wplugs_lock;
+ spinlock_t zone_wplugs_hash_lock;
struct mempool *zone_wplugs_pool;
struct hlist_head *zone_wplugs_hash;
struct workqueue_struct *zone_wplugs_wq;
+ spinlock_t zone_wplugs_list_lock;
+ struct list_head zone_wplugs_list;
+ struct task_struct *zone_wplugs_worker;
+ struct completion zone_wplugs_worker_bio_done;
#endif /* CONFIG_BLK_DEV_ZONED */
#if IS_ENABLED(CONFIG_CDROM)
@@ -502,7 +508,7 @@ struct request_queue {
/* hw dispatch queues */
unsigned int nr_hw_queues;
- struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
+ struct blk_mq_hw_ctx * __rcu *queue_hw_ctx __counted_by_ptr(nr_hw_queues);
struct percpu_ref q_usage_counter;
struct lock_class_key io_lock_cls_key;
@@ -668,6 +674,7 @@ enum {
QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
+ QUEUE_FLAG_ZONED_QD1_WRITES, /* Limit zoned devices writes to QD=1 */
QUEUE_FLAG_MAX
};
@@ -707,6 +714,8 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
#define blk_queue_no_elv_switch(q) \
test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
+#define blk_queue_zoned_qd1_writes(q) \
+ test_bit(QUEUE_FLAG_ZONED_QD1_WRITES, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@@ -1467,24 +1476,23 @@ static inline bool bdev_rot(struct block_device *bdev)
return blk_queue_rot(bdev_get_queue(bdev));
}
-static inline bool bdev_nonrot(struct block_device *bdev)
+static inline bool bdev_synchronous(struct block_device *bdev)
{
- return !bdev_rot(bdev);
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
-static inline bool bdev_synchronous(struct block_device *bdev)
+static inline bool bdev_has_integrity_csum(struct block_device *bdev)
{
- return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
+ struct queue_limits *lim = bdev_limits(bdev);
+
+ return IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ lim->integrity.csum_type != BLK_INTEGRITY_CSUM_NONE;
}
static inline bool bdev_stable_writes(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
- q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
- return true;
- return q->limits.features & BLK_FEAT_STABLE_WRITES;
+ return bdev_has_integrity_csum(bdev) ||
+ (bdev_limits(bdev)->features & BLK_FEAT_STABLE_WRITES);
}
static inline bool blk_queue_write_cache(struct request_queue *q)
@@ -1877,6 +1885,24 @@ static inline int bio_split_rw_at(struct bio *bio,
return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
}
+/*
+ * Maximum contiguous integrity buffer allocation.
+ */
+#define BLK_INTEGRITY_MAX_SIZE SZ_2M
+
+/*
+ * Maximum size of I/O that needs a block layer integrity buffer. Limited
+ * by the number of intervals for which we can fit the integrity buffer into
+ * the buffer size. Because the buffer is a single segment it is also limited
+ * by the maximum segment size.
+ */
+static inline unsigned int max_integrity_io_size(struct queue_limits *lim)
+{
+ return min_t(unsigned int, lim->max_segment_size,
+ (BLK_INTEGRITY_MAX_SIZE / lim->integrity.metadata_size) <<
+ lim->integrity.interval_exp);
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/bnxt/ulp.h b/include/linux/bnxt/ulp.h
new file mode 100644
index 000000000000..0851ad3394b0
--- /dev/null
+++ b/include/linux/bnxt/ulp.h
@@ -0,0 +1,144 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ULP_H
+#define BNXT_ULP_H
+
+#include <linux/auxiliary_bus.h>
+
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS 1
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
+
+struct hwrm_async_event_cmpl;
+struct bnxt;
+
+enum bnxt_auxdev_type {
+ BNXT_AUXDEV_RDMA = 0,
+ BNXT_AUXDEV_FWCTL,
+ __BNXT_AUXDEV_MAX
+};
+
+struct bnxt_aux_priv {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
+
+struct bnxt_msix_entry {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
+struct bnxt_ulp_ops {
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
+ void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
+};
+
+struct bnxt_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnxt_ulp {
+ void *handle;
+ struct bnxt_ulp_ops __rcu *ulp_ops;
+ unsigned long *async_events_bmap;
+ u16 max_async_event_id;
+ u16 msix_requested;
+};
+
+struct bnxt_en_dev {
+ struct net_device *net;
+ struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
+ u32 flags;
+ #define BNXT_EN_FLAG_ROCEV1_CAP 0x1
+ #define BNXT_EN_FLAG_ROCEV2_CAP 0x2
+ #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
+ BNXT_EN_FLAG_ROCEV2_CAP)
+ #define BNXT_EN_FLAG_ULP_STOPPED 0x8
+ #define BNXT_EN_FLAG_VF 0x10
+#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+ #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
+
+ struct bnxt_ulp *ulp_tbl;
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
+ int l2_db_offset; /* Doorbell offset in
+ * bytes within
+ * l2_db_size_nc.
+ */
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state; /* Could be checked in
+ * RoCE driver suspend
+ * mode only. Will be
+ * updated in resume.
+ */
+ void __iomem *bar0;
+
+ u16 ulp_num_msix_vec;
+ u16 ulp_num_ctxs;
+
+ /* serialize ulp operations */
+ struct mutex en_dev_lock;
+};
+
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
+{
+ if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
+ return true;
+ return false;
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt *bp);
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_ulp_stop(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+void bnxt_ulp_irq_stop(struct bnxt *bp);
+void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
+void bnxt_aux_devices_uninit(struct bnxt *bp);
+void bnxt_aux_devices_del(struct bnxt *bp);
+void bnxt_aux_devices_add(struct bnxt *bp);
+void bnxt_aux_devices_init(struct bnxt *bp);
+int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
+ void *handle);
+void bnxt_unregister_dev(struct bnxt_en_dev *edev);
+int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
+int bnxt_auxdev_id_alloc(struct bnxt *bp);
+void bnxt_auxdev_id_free(struct bnxt *bp, int id);
+#endif
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 25df9260d206..692a5acc2ffc 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -36,9 +36,9 @@ bool __init cmdline_has_extra_options(void);
* The checksum will be used with the BOOTCONFIG_MAGIC and the size for
* embedding the bootconfig in the initrd image.
*/
-static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
+static inline __init uint32_t xbc_calc_checksum(const void *data, uint32_t size)
{
- unsigned char *p = data;
+ const unsigned char *p = data;
uint32_t ret = 0;
while (size--)
@@ -66,7 +66,7 @@ struct xbc_node {
/* Node tree access raw APIs */
struct xbc_node * __init xbc_root_node(void);
-int __init xbc_node_index(struct xbc_node *node);
+uint16_t __init xbc_node_index(struct xbc_node *node);
struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 2f535331f926..b2e79c2b41d5 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -184,7 +184,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
struct bpf_prog_array *array;
array = rcu_access_pointer(cgrp->bpf.effective[type]);
- return array != &bpf_empty_prog_array.hdr;
+ return array != &bpf_empty_prog_array;
}
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 05b34a6355b0..b4b703c90ca9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1541,6 +1541,8 @@ bool bpf_has_frame_pointer(unsigned long ip);
int bpf_jit_charge_modmem(u32 size);
void bpf_jit_uncharge_modmem(u32 size);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
+bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog,
+ int insn_idx);
#else
static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
struct bpf_trampoline *tr,
@@ -1854,6 +1856,10 @@ struct bpf_link_ops {
* target hook is sleepable, we'll go through tasks trace RCU GP and
* then "classic" RCU GP; this need for chaining tasks trace and
* classic RCU GPs is designated by setting bpf_link->sleepable flag
+ *
+ * For non-sleepable tracepoint links we go through SRCU gp instead,
+ * since RCU is not used in that case. Sleepable tracepoints still
+ * follow the scheme above.
*/
void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
@@ -2365,18 +2371,13 @@ struct bpf_prog_array {
struct bpf_prog_array_item items[];
};
-struct bpf_empty_prog_array {
- struct bpf_prog_array hdr;
- struct bpf_prog *null_prog;
-};
-
/* to avoid allocating empty bpf_prog_array for cgroups that
* don't have bpf program attached use one global 'bpf_empty_prog_array'
* It will not be modified the caller of bpf_prog_array_alloc()
* (since caller requested prog_cnt == 0)
* that pointer should be 'freed' by bpf_prog_array_free()
*/
-extern struct bpf_empty_prog_array bpf_empty_prog_array;
+extern struct bpf_prog_array bpf_empty_prog_array;
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array *progs);
@@ -3946,6 +3947,9 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
return prog->aux->func_idx != 0;
}
+const struct bpf_line_info *bpf_find_linfo(const struct bpf_prog *prog, u32 insn_off);
+void bpf_get_linfo_file_line(struct btf *btf, const struct bpf_line_info *linfo,
+ const char **filep, const char **linep, int *nump);
int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
const char **linep, int *nump);
struct bpf_prog *bpf_prog_find_from_stack(void);
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 8157e8da61d4..9e4f5c45c974 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -54,7 +54,6 @@ struct bpf_local_storage_map {
u32 bucket_log;
u16 elem_size;
u16 cache_idx;
- bool use_kmalloc_nolock;
};
struct bpf_local_storage_data {
@@ -86,8 +85,7 @@ struct bpf_local_storage_elem {
*/
};
atomic_t state;
- bool use_kmalloc_nolock;
- /* 3 bytes hole */
+ /* 4 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
@@ -104,7 +102,6 @@ struct bpf_local_storage {
rqspinlock_t lock; /* Protect adding/removing from the "list" */
u64 mem_charge; /* Copy of mem charged to owner. Protected by "lock" */
refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
- bool use_kmalloc_nolock;
};
/* U16_MAX is much more than enough for sk local storage
@@ -137,8 +134,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr *attr,
- struct bpf_local_storage_cache *cache,
- bool use_kmalloc_nolock);
+ struct bpf_local_storage_cache *cache);
void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
@@ -192,7 +188,7 @@ int bpf_selem_link_map(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
- bool swap_uptrs, gfp_t gfp_flags);
+ bool swap_uptrs);
void bpf_selem_free(struct bpf_local_storage_elem *selem,
bool reuse_now);
@@ -200,12 +196,11 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem,
int
bpf_local_storage_alloc(void *owner,
struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *first_selem,
- gfp_t gfp_flags);
+ struct bpf_local_storage_elem *first_selem);
struct bpf_local_storage_data *
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
- void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
+ void *value, u64 map_flags, bool swap_uptrs);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index ef8e45a362d9..b148f816f25b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -38,10 +38,9 @@ struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
/*
- * Fixed part of pointer offset, pointer types only.
- * Or constant delta between "linked" scalars with the same ID.
+ * Constant delta between "linked" scalars with the same ID.
*/
- s32 off;
+ s32 delta;
union {
/* valid when type == PTR_TO_PACKET */
int range;
@@ -146,9 +145,9 @@ struct bpf_reg_state {
* Upper bit of ID is used to remember relationship between "linked"
* registers. Example:
* r1 = r2; both will have r1->id == r2->id == N
- * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->delta == 10
* r3 = r2; both will have r3->id == r2->id == N
- * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->off == 10
+ * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->delta == 10
*/
#define BPF_ADD_CONST64 (1U << 31)
#define BPF_ADD_CONST32 (1U << 30)
@@ -221,14 +220,67 @@ enum bpf_stack_slot_type {
STACK_DYNPTR,
STACK_ITER,
STACK_IRQ_FLAG,
+ STACK_POISON,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+/* 4-byte stack slot granularity for liveness analysis */
+#define BPF_HALF_REG_SIZE 4
+#define STACK_SLOT_SZ 4
+#define STACK_SLOTS (MAX_BPF_STACK / BPF_HALF_REG_SIZE) /* 128 */
+
+typedef struct {
+ u64 v[2];
+} spis_t;
+
+#define SPIS_ZERO ((spis_t){})
+#define SPIS_ALL ((spis_t){{ U64_MAX, U64_MAX }})
+
+static inline bool spis_is_zero(spis_t s)
+{
+ return s.v[0] == 0 && s.v[1] == 0;
+}
+
+static inline bool spis_equal(spis_t a, spis_t b)
+{
+ return a.v[0] == b.v[0] && a.v[1] == b.v[1];
+}
+
+static inline spis_t spis_or(spis_t a, spis_t b)
+{
+ return (spis_t){{ a.v[0] | b.v[0], a.v[1] | b.v[1] }};
+}
+
+static inline spis_t spis_and(spis_t a, spis_t b)
+{
+ return (spis_t){{ a.v[0] & b.v[0], a.v[1] & b.v[1] }};
+}
+
+static inline spis_t spis_not(spis_t s)
+{
+ return (spis_t){{ ~s.v[0], ~s.v[1] }};
+}
+
+static inline bool spis_test_bit(spis_t s, u32 slot)
+{
+ return s.v[slot / 64] & BIT_ULL(slot % 64);
+}
+
+static inline void spis_or_range(spis_t *mask, u32 lo, u32 hi)
+{
+ u32 w;
+
+ for (w = lo; w <= hi && w < STACK_SLOTS; w++)
+ mask->v[w / 64] |= BIT_ULL(w % 64);
+}
+
#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
(1 << BPF_REG_3) | (1 << BPF_REG_4) | \
(1 << BPF_REG_5))
+#define BPF_MAIN_FUNC (-1)
+
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
@@ -266,6 +318,7 @@ struct bpf_reference_state {
struct bpf_retval_range {
s32 minval;
s32 maxval;
+ bool return_32bit;
};
/* state of the program:
@@ -424,7 +477,6 @@ struct bpf_verifier_state {
bool speculative;
bool in_sleepable;
- bool cleaned;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
@@ -578,16 +630,17 @@ struct bpf_insn_aux_data {
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
- bool jmp_point;
- bool prune_point;
+ u32 jmp_point:1;
+ u32 prune_point:1;
/* ensure we check state equivalence and save state checkpoint and
* this instruction, regardless of any heuristics
*/
- bool force_checkpoint;
+ u32 force_checkpoint:1;
/* true if instruction is a call to a helper function that
* accepts callback function as a parameter.
*/
- bool calls_callback;
+ u32 calls_callback:1;
+ u32 indirect_target:1; /* if it is an indirect jump target */
/*
* CFG strongly connected component this instruction belongs to,
* zero if it is a singleton SCC.
@@ -595,6 +648,18 @@ struct bpf_insn_aux_data {
u32 scc;
/* registers alive before this instruction. */
u16 live_regs_before;
+ /*
+ * Bitmask of R0-R9 that hold known values at this instruction.
+ * const_reg_mask: scalar constants that fit in 32 bits.
+ * const_reg_map_mask: map pointers, val is map_index into used_maps[].
+ * const_reg_subprog_mask: subprog pointers, val is subprog number.
+ * const_reg_vals[i] holds the 32-bit value for register i.
+ * Populated by compute_const_regs() pre-pass.
+ */
+ u16 const_reg_mask;
+ u16 const_reg_map_mask;
+ u16 const_reg_subprog_mask;
+ u32 const_reg_vals[10];
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -652,7 +717,7 @@ enum priv_stack_mode {
};
struct bpf_subprog_info {
- /* 'start' has to be the first field otherwise find_subprog() won't work */
+ const char *name; /* name extracted from BTF */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
@@ -787,6 +852,8 @@ struct bpf_verifier_env {
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
+ /* subprog indices sorted in topological order: leaves first, callers last */
+ int subprog_topo_order[BPF_MAX_SUBPROGS + 2];
union {
struct bpf_idmap idmap_scratch;
struct bpf_idset idset_scratch;
@@ -805,6 +872,8 @@ struct bpf_verifier_env {
} cfg;
struct backtrack_state bt;
struct bpf_jmp_history_entry *cur_hist_ent;
+ /* Per-callsite copy of parent's converged at_stack_in for cross-frame fills. */
+ struct arg_track **callsite_at_stack;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -837,7 +906,9 @@ struct bpf_verifier_env {
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
/* buffer used to temporary hold constants as scalar registers */
- struct bpf_reg_state fake_reg[2];
+ struct bpf_reg_state fake_reg[1];
+ /* buffers used to save updated reg states while simulating branches */
+ struct bpf_reg_state true_reg1, true_reg2, false_reg1, false_reg2;
/* buffer used to generate temporary string representations,
* e.g., in reg_type_str() to generate reg_type string
*/
@@ -863,6 +934,30 @@ static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env
return &env->subprog_info[subprog];
}
+struct bpf_call_summary {
+ u8 num_params;
+ bool is_void;
+ bool fastcall;
+};
+
+static inline bool bpf_helper_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == 0;
+}
+
+static inline bool bpf_pseudo_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_CALL;
+}
+
+static inline bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
@@ -891,6 +986,41 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
})
+static inline void mark_prune_point(struct bpf_verifier_env *env, int idx)
+{
+ env->insn_aux_data[idx].prune_point = true;
+}
+
+static inline bool bpf_is_prune_point(struct bpf_verifier_env *env, int insn_idx)
+{
+ return env->insn_aux_data[insn_idx].prune_point;
+}
+
+static inline void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
+{
+ env->insn_aux_data[idx].force_checkpoint = true;
+}
+
+static inline bool bpf_is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
+{
+ return env->insn_aux_data[insn_idx].force_checkpoint;
+}
+
+static inline void mark_calls_callback(struct bpf_verifier_env *env, int idx)
+{
+ env->insn_aux_data[idx].calls_callback = true;
+}
+
+static inline bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx)
+{
+ return env->insn_aux_data[insn_idx].calls_callback;
+}
+
+static inline void mark_jmp_point(struct bpf_verifier_env *env, int idx)
+{
+ env->insn_aux_data[idx].jmp_point = true;
+}
+
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
@@ -932,6 +1062,11 @@ static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
*btf_id = key & 0x7FFFFFFF;
}
+int bpf_check_btf_info_early(struct bpf_verifier_env *env,
+ const union bpf_attr *attr, bpfptr_t uattr);
+int bpf_check_btf_info(struct bpf_verifier_env *env,
+ const union bpf_attr *attr, bpfptr_t uattr);
+
int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *prog,
const struct bpf_prog *tgt_prog,
@@ -941,6 +1076,93 @@ void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+int bpf_is_state_visited(struct bpf_verifier_env *env, int insn_idx);
+int bpf_update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
+
+void bpf_clear_jmp_history(struct bpf_verifier_state *state);
+int bpf_copy_verifier_state(struct bpf_verifier_state *dst_state,
+ const struct bpf_verifier_state *src);
+struct list_head *bpf_explored_state(struct bpf_verifier_env *env, int idx);
+void bpf_free_verifier_state(struct bpf_verifier_state *state, bool free_self);
+void bpf_free_backedges(struct bpf_scc_visit *visit);
+int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
+ int insn_flags, u64 linked_regs);
+void bpf_bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist);
+void bpf_mark_reg_not_init(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
+void bpf_mark_reg_unknown_imprecise(struct bpf_reg_state *reg);
+void bpf_mark_all_scalars_precise(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *st);
+void bpf_clear_singular_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
+int bpf_mark_chain_precision(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *starting_state,
+ int regno, bool *changed);
+
+static inline int bpf_get_spi(s32 off)
+{
+ return (-off - 1) / BPF_REG_SIZE;
+}
+
+static inline struct bpf_func_state *bpf_func(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[reg->frameno];
+}
+
+/* Return IP for a given frame in a call stack */
+static inline u32 bpf_frame_insn_idx(struct bpf_verifier_state *st, u32 frame)
+{
+ return frame == st->curframe
+ ? st->insn_idx
+ : st->frame[frame + 1]->callsite;
+}
+
+static inline bool bpf_is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
+{
+ return env->insn_aux_data[insn_idx].jmp_point;
+}
+
+static inline bool bpf_is_spilled_reg(const struct bpf_stack_state *stack)
+{
+ return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
+}
+
+static inline bool bpf_is_spilled_scalar_reg(const struct bpf_stack_state *stack)
+{
+ return bpf_is_spilled_reg(stack) && stack->spilled_ptr.type == SCALAR_VALUE;
+}
+
+static inline bool bpf_register_is_null(struct bpf_reg_state *reg)
+{
+ return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
+}
+
+static inline void bpf_bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
+{
+ bt->reg_masks[frame] |= 1 << reg;
+}
+
+static inline void bpf_bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ bt->stack_masks[frame] |= 1ull << slot;
+}
+
+static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg)
+{
+ return bt->reg_masks[frame] & (1 << reg);
+}
+
+static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ return bt->stack_masks[frame] & (1ull << slot);
+}
+
+bool bpf_map_is_rdonly(const struct bpf_map *map);
+int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
+ bool is_ldsx);
+
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
/* extract base type from bpf_{arg, return, reg}_type. */
@@ -1077,22 +1299,194 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
u32 frameno, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno);
+u32 bpf_vlog_alignment(u32 pos);
struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
int bpf_jmp_offset(struct bpf_insn *insn);
struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
-bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
+bool bpf_subprog_is_global(const struct bpf_verifier_env *env, int subprog);
+
+int bpf_find_subprog(struct bpf_verifier_env *env, int off);
+int bpf_compute_const_regs(struct bpf_verifier_env *env);
+int bpf_prune_dead_branches(struct bpf_verifier_env *env);
+int bpf_check_cfg(struct bpf_verifier_env *env);
+int bpf_compute_postorder(struct bpf_verifier_env *env);
+int bpf_compute_scc(struct bpf_verifier_env *env);
+
+struct bpf_map_desc {
+ struct bpf_map *ptr;
+ int uid;
+};
+
+struct bpf_kfunc_call_arg_meta {
+ /* In parameters */
+ struct btf *btf;
+ u32 func_id;
+ u32 kfunc_flags;
+ const struct btf_type *func_proto;
+ const char *func_name;
+ /* Out parameters */
+ u32 ref_obj_id;
+ u8 release_regno;
+ bool r0_rdonly;
+ u32 ret_btf_id;
+ u64 r0_size;
+ u32 subprogno;
+ struct {
+ u64 value;
+ bool found;
+ } arg_constant;
+
+ /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
+ * generally to pass info about user-defined local kptr types to later
+ * verification logic
+ * bpf_obj_drop/bpf_percpu_obj_drop
+ * Record the local kptr type to be drop'd
+ * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type)
+ * Record the local kptr type to be refcount_incr'd and use
+ * arg_owning_ref to determine whether refcount_acquire should be
+ * fallible
+ */
+ struct btf *arg_btf;
+ u32 arg_btf_id;
+ bool arg_owning_ref;
+ bool arg_prog;
+
+ struct {
+ struct btf_field *field;
+ } arg_list_head;
+ struct {
+ struct btf_field *field;
+ } arg_rbtree_root;
+ struct {
+ enum bpf_dynptr_type type;
+ u32 id;
+ u32 ref_obj_id;
+ } initialized_dynptr;
+ struct {
+ u8 spi;
+ u8 frameno;
+ } iter;
+ struct bpf_map_desc map;
+ u64 mem_size;
+};
+
+int bpf_get_helper_proto(struct bpf_verifier_env *env, int func_id,
+ const struct bpf_func_proto **ptr);
+int bpf_fetch_kfunc_arg_meta(struct bpf_verifier_env *env, s32 func_id,
+ s16 offset, struct bpf_kfunc_call_arg_meta *meta);
+bool bpf_is_async_callback_calling_insn(struct bpf_insn *insn);
+bool bpf_is_sync_callback_calling_insn(struct bpf_insn *insn);
+static inline bool bpf_is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
+{
+ return meta->kfunc_flags & KF_ITER_NEXT;
+}
+
+static inline bool bpf_is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
+{
+ return meta->kfunc_flags & KF_SLEEPABLE;
+}
+bool bpf_is_kfunc_pkt_changing(struct bpf_kfunc_call_arg_meta *meta);
+struct bpf_iarray *bpf_iarray_realloc(struct bpf_iarray *old, size_t n_elem);
+int bpf_copy_insn_array_uniq(struct bpf_map *map, u32 start, u32 end, u32 *off);
+bool bpf_insn_is_cond_jump(u8 code);
+bool bpf_is_may_goto_insn(struct bpf_insn *insn);
+
+void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn);
+bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
+ struct bpf_call_summary *cs);
+s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env,
+ struct bpf_insn *insn, int arg,
+ int insn_idx);
+s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env,
+ struct bpf_insn *insn, int arg,
+ int insn_idx);
+int bpf_compute_subprog_arg_access(struct bpf_verifier_env *env);
int bpf_stack_liveness_init(struct bpf_verifier_env *env);
void bpf_stack_liveness_free(struct bpf_verifier_env *env);
-int bpf_update_live_stack(struct bpf_verifier_env *env);
-int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
-void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
-int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
-int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
-void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
+int bpf_compute_live_registers(struct bpf_verifier_env *env);
+
+#define BPF_MAP_KEY_POISON (1ULL << 63)
+#define BPF_MAP_KEY_SEEN (1ULL << 62)
+
+static inline bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
+{
+ return aux->map_ptr_state.poison;
+}
+
+static inline bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
+{
+ return aux->map_ptr_state.unpriv;
+}
+
+static inline bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
+{
+ return aux->map_key_state & BPF_MAP_KEY_POISON;
+}
+
+static inline bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
+{
+ return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
+}
+
+static inline u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
+{
+ return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
+}
+
+#define MAX_PACKET_OFF 0xffff
+#define CALLER_SAVED_REGS 6
+
+enum bpf_reg_arg_type {
+ SRC_OP, /* register is used as source operand */
+ DST_OP, /* register is used as destination operand */
+ DST_OP_NO_MARK /* same as above, check only, don't mark */
+};
+
+#define MAX_KFUNC_DESCS 256
+
+struct bpf_kfunc_desc {
+ struct btf_func_model func_model;
+ u32 func_id;
+ s32 imm;
+ u16 offset;
+ unsigned long addr;
+};
+
+struct bpf_kfunc_desc_tab {
+ /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
+ * verification. JITs do lookups by bpf_insn, where func_id may not be
+ * available, therefore at the end of verification do_misc_fixups()
+ * sorts this by imm and offset.
+ */
+ struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
+ u32 nr_descs;
+};
+
+/* Functions exported from verifier.c, used by fixups.c */
+bool bpf_is_reg64(struct bpf_insn *insn, u32 regno, struct bpf_reg_state *reg, enum bpf_reg_arg_type t);
+void bpf_clear_insn_aux_data(struct bpf_verifier_env *env, int start, int len);
+void bpf_mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog);
+bool bpf_allow_tail_call_in_subprogs(struct bpf_verifier_env *env);
+bool bpf_verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm);
+int bpf_add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, u16 offset);
+int bpf_fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ struct bpf_insn *insn_buf, int insn_idx, int *cnt);
+
+/* Functions in fixups.c, called from bpf_check() */
+int bpf_remove_fastcall_spills_fills(struct bpf_verifier_env *env);
+int bpf_optimize_bpf_loop(struct bpf_verifier_env *env);
+void bpf_opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env);
+int bpf_opt_remove_dead_code(struct bpf_verifier_env *env);
+int bpf_opt_remove_nops(struct bpf_verifier_env *env);
+int bpf_opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, const union bpf_attr *attr);
+int bpf_convert_ctx_accesses(struct bpf_verifier_env *env);
+int bpf_jit_subprogs(struct bpf_verifier_env *env);
+int bpf_fixup_call_args(struct bpf_verifier_env *env);
+int bpf_do_misc_fixups(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 115a964f3006..174687c4c80a 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -266,6 +266,9 @@
#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+#define BCM54XX_TOP_MISC_MII_BUF_CNTL0 (MII_BCM54XX_EXP_SEL_TOP + 0x00)
+#define BCM54XX_MII_BUF_CNTL0_AUTOGREEEN_EN BIT(0)
+
#define BCM54XX_TOP_MISC_LED_CTL (MII_BCM54XX_EXP_SEL_TOP + 0x0C)
#define BCM54XX_LED4_SEL_INTR BIT(1)
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index ee2df73edf83..162730bfc2d8 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -7,13 +7,17 @@
struct bsg_device;
struct device;
struct request_queue;
+struct io_uring_cmd;
typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
bool open_for_write, unsigned int timeout);
+typedef int (bsg_uring_cmd_fn)(struct request_queue *q, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags, bool open_for_write);
+
struct bsg_device *bsg_register_queue(struct request_queue *q,
struct device *parent, const char *name,
- bsg_sg_io_fn *sg_io_fn);
+ bsg_sg_io_fn *sg_io_fn, bsg_uring_cmd_fn *uring_cmd_fn);
void bsg_unregister_queue(struct bsg_device *bcd);
#endif /* _LINUX_BSG_H */
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 139bdececdcf..af011db39ab3 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -217,7 +217,7 @@ BTF_SET8_END(name)
#else
-#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[128];
#define BTF_ID(prefix, name)
#define BTF_ID_FLAGS(prefix, name, ...)
#define BTF_ID_UNUSED
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index b16b88bfbc3e..e4939e33b4b5 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -73,8 +73,8 @@ struct buffer_head {
bh_end_io_t *b_end_io; /* I/O completion */
void *b_private; /* reserved for b_end_io */
struct list_head b_assoc_buffers; /* associated with another mapping */
- struct address_space *b_assoc_map; /* mapping this buffer is
- associated with */
+ struct mapping_metadata_bhs *b_mmb; /* head of the list of metadata bhs
+ * this buffer is associated with */
atomic_t b_count; /* users using this buffer_head */
spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
* serialise IO completion of other
@@ -205,12 +205,12 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-/* Things to do with buffers at mapping->private_list */
-void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
- bool datasync);
-int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
- bool datasync);
+/* Things to do with metadata buffers list */
+void mmb_mark_buffer_dirty(struct buffer_head *bh, struct mapping_metadata_bhs *mmb);
+int mmb_fsync_noflush(struct file *file, struct mapping_metadata_bhs *mmb,
+ loff_t start, loff_t end, bool datasync);
+int mmb_fsync(struct file *file, struct mapping_metadata_bhs *mmb,
+ loff_t start, loff_t end, bool datasync);
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
sector_t len);
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
@@ -515,10 +515,10 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
void buffer_init(void);
bool try_to_free_buffers(struct folio *folio);
-int inode_has_buffers(struct inode *inode);
-void invalidate_inode_buffers(struct inode *inode);
-int remove_inode_buffers(struct inode *inode);
-int sync_mapping_buffers(struct address_space *mapping);
+void mmb_init(struct mapping_metadata_bhs *mmb, struct address_space *mapping);
+bool mmb_has_buffers(struct mapping_metadata_bhs *mmb);
+void mmb_invalidate(struct mapping_metadata_bhs *mmb);
+int mmb_sync(struct mapping_metadata_bhs *mmb);
void invalidate_bh_lrus(void);
void invalidate_bh_lrus_cpu(void);
bool has_bh_in_lru(int cpu, void *dummy);
@@ -528,10 +528,7 @@ extern int buffer_heads_over_limit;
static inline void buffer_init(void) {}
static inline bool try_to_free_buffers(struct folio *folio) { return true; }
-static inline int inode_has_buffers(struct inode *inode) { return 0; }
-static inline void invalidate_inode_buffers(struct inode *inode) {}
-static inline int remove_inode_buffers(struct inode *inode) { return 1; }
-static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline int mmb_sync(struct mapping_metadata_bhs *mmb) { return 0; }
static inline void invalidate_bh_lrus(void) {}
static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
diff --git a/include/linux/bus/stm32_firewall.h b/include/linux/bus/stm32_firewall.h
new file mode 100644
index 000000000000..e5fac85fe346
--- /dev/null
+++ b/include/linux/bus/stm32_firewall.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef _STM32_FIREWALL_H
+#define _STM32_FIREWALL_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/**
+ * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals
+ * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory
+ * zones
+ * STM32_NOTYPE_FIREWALL: Undefined firewall type
+ */
+
+#define STM32_PERIPHERAL_FIREWALL BIT(1)
+#define STM32_MEMORY_FIREWALL BIT(2)
+#define STM32_NOTYPE_FIREWALL BIT(3)
+
+/**
+ * struct stm32_firewall_controller - Information on firewall controller supplying services
+ *
+ * @name: Name of the firewall controller
+ * @dev: Device reference of the firewall controller
+ * @mmio: Base address of the firewall controller
+ * @entry: List entry of the firewall controller list
+ * @type: Type of firewall
+ * @max_entries: Number of entries covered by the firewall
+ * @grant_access: Callback used to grant access for a device access against a
+ * firewall controller
+ * @release_access: Callback used to release resources taken by a device when access was
+ * granted
+ * @grant_memory_range_access: Callback used to grant access for a device to a given memory region
+ */
+struct stm32_firewall_controller {
+ const char *name;
+ struct device *dev;
+ void __iomem *mmio;
+ struct list_head entry;
+ unsigned int type;
+ unsigned int max_entries;
+
+ int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr,
+ size_t size);
+};
+
+/**
+ * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall
+ * framework
+ * @firewall_controller: Firewall controller to register
+ *
+ * Returns 0 in case of success or -ENODEV if no controller was given.
+ */
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32
+ * firewall framework
+ * @firewall_controller: Firewall controller to unregister
+ */
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall
+ * configuration. This is used at boot-time only, as a sanity check
+ * between device tree and firewalls hardware configurations to
+ * prevent a kernel crash when a device driver is not granted access
+ *
+ * @firewall_controller: Firewall controller which nodes will be populated or not
+ *
+ * Returns 0 in case of success or appropriate errno code if error occurred.
+ */
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller);
+
+#endif /* _STM32_FIREWALL_H */
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
index eaa7a3f54450..6c878f3ca86f 100644
--- a/include/linux/bus/stm32_firewall_device.h
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -112,6 +112,25 @@ int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsy
*/
void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+/**
+ * stm32_firewall_get_grant_all_access - Allocate and get all the firewall(s) associated to given
+ * device. Then, try to grant access rights for each element.
+ * This function is basically a helper function that wraps
+ * both stm32_firewall_get_firewall() and
+ * stm32_firewall_grant_access() on all firewall references of
+ * a device along with the allocation of the array.
+ * Realease access using stm32_firewall_release_access* APIs
+ * when done.
+ *
+ * @dev: Device performing the checks
+ * @firewall: Pointer to the array of firewall references to be allocated
+ * @nb_firewall: Number of allocated elements in @firewall
+ *
+ * Returns 0 on success, or appropriate errno code if error occurred.
+ */
+int stm32_firewall_get_grant_all_access(struct device *dev, struct stm32_firewall **firewall,
+ int *nb_firewall);
+
#else /* CONFIG_STM32_FIREWALL */
static inline int stm32_firewall_get_firewall(struct device_node *np,
@@ -141,5 +160,12 @@ static inline void stm32_firewall_release_access_by_id(struct stm32_firewall *fi
{
}
+static inline int stm32_firewall_get_grant_all_access(struct device *dev,
+ struct stm32_firewall **firewall,
+ int *nb_firewall)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_STM32_FIREWALL */
#endif /* STM32_FIREWALL_DEVICE_H */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 06fb60471aaf..d36dd476feda 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -203,15 +203,6 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
-/* for iterating one bio from start to end */
-#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
-{ \
- .bi_sector = 0, \
- .bi_size = UINT_MAX, \
- .bi_idx = 0, \
- .bi_bvec_done = 0, \
-}
-
static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
{
iter_all->done = 0;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 7f87399938fa..f42563739d2e 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -17,6 +17,7 @@
#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
+#include <linux/sched.h>
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup-defs.h>
@@ -167,6 +168,7 @@ struct cgroup_file {
struct kernfs_node *kn;
unsigned long notified_at;
struct timer_list notify_timer;
+ spinlock_t lock;
};
/*
@@ -627,6 +629,9 @@ struct cgroup {
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *bpf_cgrp_storage;
#endif
+#ifdef CONFIG_EXT_SUB_SCHED
+ struct scx_sched __rcu *scx_sched;
+#endif
/* All ancestors including self */
union {
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index bc892e3b37ee..e52160e85af4 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -42,6 +42,14 @@ struct kernel_clone_args;
#ifdef CONFIG_CGROUPS
+/*
+ * To avoid confusing the compiler (and generating warnings) with code
+ * that attempts to access what would be a 0-element array (i.e. sized
+ * to a potentially empty array when CGROUP_SUBSYS_COUNT == 0), this
+ * constant expression can be added.
+ */
+#define CGROUP_HAS_SUBSYS_CONFIG (CGROUP_SUBSYS_COUNT > 0)
+
enum css_task_iter_flags {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
@@ -76,6 +84,7 @@ enum cgroup_lifetime_events {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
extern struct blocking_notifier_head cgroup_lifetime_notifier;
@@ -103,6 +112,8 @@ extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define cgroup_subsys_on_dfl(ss) \
static_branch_likely(&ss ## _on_dfl_key)
+bool cgroup_on_dfl(const struct cgroup *cgrp);
+
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
@@ -274,6 +285,32 @@ void css_task_iter_end(struct css_task_iter *it);
for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
(pos) = css_next_descendant_post((pos), (css)))
+/* iterate over child cgrps, lock should be held throughout iteration */
+#define cgroup_for_each_live_child(child, cgrp) \
+ list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
+ if (({ lockdep_assert_held(&cgroup_mutex); \
+ cgroup_is_dead(child); })) \
+ ; \
+ else
+
+/* walk live descendants in pre order */
+#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
+ css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
+ if (({ lockdep_assert_held(&cgroup_mutex); \
+ (dsct) = (d_css)->cgroup; \
+ cgroup_is_dead(dsct); })) \
+ ; \
+ else
+
+/* walk live descendants in postorder */
+#define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \
+ css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
+ if (({ lockdep_assert_held(&cgroup_mutex); \
+ (dsct) = (d_css)->cgroup; \
+ cgroup_is_dead(dsct); })) \
+ ; \
+ else
+
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
@@ -337,6 +374,27 @@ static inline u64 cgroup_id(const struct cgroup *cgrp)
}
/**
+ * cgroup_css - obtain a cgroup's css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest (%NULL returns @cgrp->self)
+ *
+ * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
+ * function must be called either under cgroup_mutex or rcu_read_lock() and
+ * the caller is responsible for pinning the returned css if it wants to
+ * keep accessing it outside the said locks. This function may return
+ * %NULL if @cgrp doesn't have @subsys_id enabled.
+ */
+static inline struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ if (CGROUP_HAS_SUBSYS_CONFIG && ss)
+ return rcu_dereference_check(cgrp->subsys[ss->id],
+ lockdep_is_held(&cgroup_mutex));
+ else
+ return &cgrp->self;
+}
+
+/**
* css_is_dying - test whether the specified css is dying
* @css: target css
*
@@ -372,6 +430,11 @@ static inline bool css_is_self(struct cgroup_subsys_state *css)
return false;
}
+static inline bool cgroup_is_dead(const struct cgroup *cgrp)
+{
+ return !(cgrp->self.flags & CSS_ONLINE);
+}
+
static inline void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
@@ -387,8 +450,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
-extern struct mutex cgroup_mutex;
-
static inline void cgroup_lock(void)
{
mutex_lock(&cgroup_mutex);
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index dbc4162921e9..ea95ca4bc11c 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -286,15 +286,18 @@ static __always_inline _type class_##_name##_constructor(_init_args) \
__no_context_analysis \
{ _type t = _init; return t; }
-#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
-typedef lock_##_name##_t lock_##_name##ext##_t; \
+#define EXTEND_CLASS_COND(_name, ext, _cond, _init, _init_args...) \
+typedef lock_##_name##_t lock_##_name##ext##_t; \
typedef class_##_name##_t class_##_name##ext##_t; \
-static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
-{ class_##_name##_destructor(p); } \
+static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *_T) \
+{ if (_cond) return; class_##_name##_destructor(_T); } \
static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
__no_context_analysis \
{ class_##_name##_t t = _init; return t; }
+#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+ EXTEND_CLASS_COND(_name, ext, 0, _init, _init_args)
+
#define CLASS(_name, var) \
class_##_name##_t var __cleanup(class_##_name##_destructor) = \
class_##_name##_constructor
@@ -394,12 +397,12 @@ static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
__DEFINE_GUARD_LOCK_PTR(_name, _T)
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
- DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
DEFINE_CLASS_IS_GUARD(_name)
#define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
- EXTEND_CLASS(_name, _ext, \
+ EXTEND_CLASS_COND(_name, _ext, __GUARD_IS_ERR(*_T), \
({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
class_##_name##_t _T) \
static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
@@ -488,7 +491,7 @@ typedef struct { \
static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
__no_context_analysis \
{ \
- if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
+ if (_T->lock) { _unlock; } \
} \
\
__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
@@ -565,7 +568,7 @@ __DEFINE_LOCK_GUARD_0(_name, _lock)
#define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
- EXTEND_CLASS(_name, _ext, \
+ EXTEND_CLASS_COND(_name, _ext, __GUARD_IS_ERR(_T->lock), \
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
int _RET = (_lock); \
if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index b0df28ddd394..6adb72761246 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -43,9 +43,9 @@ enum clock_event_state {
/*
* Clock event features
*/
-# define CLOCK_EVT_FEAT_PERIODIC 0x000001
-# define CLOCK_EVT_FEAT_ONESHOT 0x000002
-# define CLOCK_EVT_FEAT_KTIME 0x000004
+# define CLOCK_EVT_FEAT_PERIODIC 0x000001
+# define CLOCK_EVT_FEAT_ONESHOT 0x000002
+# define CLOCK_EVT_FEAT_CLOCKSOURCE_COUPLED 0x000004
/*
* x86(64) specific (mis)features:
@@ -73,6 +73,7 @@ enum clock_event_state {
* level handler of the event source
* @set_next_event: set next event function using a clocksource delta
* @set_next_ktime: set next event function using a direct ktime value
+ * @set_next_coupled: set next event function for clocksource coupled mode
* @next_event: local storage for the next event in oneshot mode
* @max_delta_ns: maximum delta value in ns
* @min_delta_ns: minimum delta value in ns
@@ -80,6 +81,8 @@ enum clock_event_state {
* @shift: nanoseconds to cycles divisor (power of two)
* @state_use_accessors:current state of the device, assigned by the core code
* @features: features
+ * @cs_id: Clocksource ID to denote the clocksource for coupled mode
+ * @next_event_forced: True if the last programming was a forced event
* @retries: number of forced programming retries
* @set_state_periodic: switch state to periodic
* @set_state_oneshot: switch state to oneshot
@@ -101,6 +104,7 @@ struct clock_event_device {
void (*event_handler)(struct clock_event_device *);
int (*set_next_event)(unsigned long evt, struct clock_event_device *);
int (*set_next_ktime)(ktime_t expires, struct clock_event_device *);
+ void (*set_next_coupled)(u64 cycles, struct clock_event_device *);
ktime_t next_event;
u64 max_delta_ns;
u64 min_delta_ns;
@@ -108,6 +112,8 @@ struct clock_event_device {
u32 shift;
enum clock_event_state state_use_accessors;
unsigned int features;
+ enum clocksource_ids cs_id;
+ unsigned int next_event_forced;
unsigned long retries;
int (*set_state_periodic)(struct clock_event_device *);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 65b7c41471c3..7c38190b10bf 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -25,8 +25,7 @@ struct clocksource_base;
struct clocksource;
struct module;
-#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \
- defined(CONFIG_GENERIC_GETTIMEOFDAY)
+#if defined(CONFIG_GENERIC_GETTIMEOFDAY)
#include <asm/clocksource.h>
#endif
@@ -44,8 +43,6 @@ struct module;
* @shift: Cycle to nanosecond divisor (power of two)
* @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
* @maxadj: Maximum adjustment value to mult (~11%)
- * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
- * Zero says to use default WATCHDOG_THRESHOLD.
* @archdata: Optional arch-specific data
* @max_cycles: Maximum safe cycle value which won't overflow on
* multiplication
@@ -105,10 +102,6 @@ struct clocksource {
u32 shift;
u64 max_idle_ns;
u32 maxadj;
- u32 uncertainty_margin;
-#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
- struct arch_clocksource_data archdata;
-#endif
u64 max_cycles;
u64 max_raw_delta;
const char *name;
@@ -133,6 +126,7 @@ struct clocksource {
struct list_head wd_list;
u64 cs_last;
u64 wd_last;
+ unsigned int wd_cpu;
#endif
struct module *owner;
};
@@ -142,13 +136,19 @@ struct clocksource {
*/
#define CLOCK_SOURCE_IS_CONTINUOUS 0x01
#define CLOCK_SOURCE_MUST_VERIFY 0x02
+#define CLOCK_SOURCE_CALIBRATED 0x04
#define CLOCK_SOURCE_WATCHDOG 0x10
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
+#define CLOCK_SOURCE_CAN_INLINE_READ 0x200
+#define CLOCK_SOURCE_HAS_COUPLED_CLOCK_EVENT 0x400
+
+#define CLOCK_SOURCE_WDTEST 0x800
+#define CLOCK_SOURCE_WDTEST_PERCPU 0x1000
+
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -298,21 +298,6 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
-static inline unsigned int clocksource_get_max_watchdog_retry(void)
-{
- /*
- * When system is in the boot phase or under heavy workload, there
- * can be random big latencies during the clocksource/watchdog
- * read, so allow retries to filter the noise latency. As the
- * latency's frequency and maximum value goes up with the number of
- * CPUs, scale the number of retries with the number of online
- * CPUs.
- */
- return (ilog2(num_online_cpus()) / 2) + 1;
-}
-
-void clocksource_verify_percpu(struct clocksource *cs);
-
/**
* struct clocksource_base - hardware abstraction for clock on which a clocksource
* is based
diff --git a/include/linux/cma.h b/include/linux/cma.h
index d0793eaaadaa..8555d38a97b1 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -61,14 +61,4 @@ extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
-
-#ifdef CONFIG_DMA_CMA
-extern bool cma_skip_dt_default_reserved_mem(void);
-#else
-static inline bool cma_skip_dt_default_reserved_mem(void)
-{
- return false;
-}
-#endif
-
#endif
diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h
index 00c074a2ccb0..a9317571e6af 100644
--- a/include/linux/compiler-context-analysis.h
+++ b/include/linux/compiler-context-analysis.h
@@ -320,6 +320,38 @@ static inline void _context_unsafe_alias(void **p) { }
*/
#define __releases(...) __releases_ctx_lock(__VA_ARGS__)
+/*
+ * Clang's analysis does not care precisely about the value, only that it is
+ * either zero or non-zero. So the __cond_acquires() interface might be
+ * misleading if we say that @ret is the value returned if acquired. Instead,
+ * provide symbolic variants which we translate.
+ */
+#define __cond_acquires_impl_not_true(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_not_false(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_not_nonzero(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_not_0(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_not_nonnull(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_not_NULL(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+
+/**
+ * __cond_releases() - function attribute, function conditionally
+ * releases a context lock exclusively
+ * @ret: abstract value returned by function if context lock releases
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally releases the
+ * given context lock instance @x exclusively. The associated context(s) must
+ * be active on entry. The function return value @ret denotes when the context
+ * lock is released.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ *
+ * NOTE: clang does not have a native attribute for this; instead implement
+ * it as an unconditional release and a conditional acquire for the
+ * inverted condition -- which is semantically equivalent.
+ */
+#define __cond_releases(ret, x) __releases(x) __cond_acquires_impl_not_##ret(x)
+
/**
* __acquire() - function to acquire context lock exclusively
* @x: context lock instance pointer
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index af16624b29fd..cb2f6050bdf7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -149,10 +149,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#ifndef RELOC_HIDE
-# define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __ptr = (unsigned long) (ptr); \
- (typeof(ptr)) (__ptr + (off)); })
+# define RELOC_HIDE(ptr, off) ((typeof(ptr))((unsigned long)(ptr) + (off)))
#endif
#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 890076d0974b..e8fd77593b68 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -432,18 +432,11 @@ struct ftrace_likely_data {
#define at_least
#endif
-/* Do not trap wrapping arithmetic within an annotated function. */
-#ifdef CONFIG_UBSAN_INTEGER_WRAP
-# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
-#else
-# define __signed_wrap
-#endif
-
/* Section for code which can't be instrumented at all */
#define __noinstr_section(section) \
noinline notrace __attribute((__section__(section))) \
__no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
- __no_sanitize_memory __signed_wrap
+ __no_sanitize_memory
#define noinstr __noinstr_section(".noinstr.text")
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index d5ca855116df..fe915afdece5 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -13,8 +13,8 @@
#ifndef _LINUX_CONSOLE_STRUCT_H
#define _LINUX_CONSOLE_STRUCT_H
-#include <linux/wait.h>
#include <linux/vt.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
struct uni_pagedict;
@@ -58,6 +58,33 @@ struct vc_state {
bool reverse;
};
+/**
+ * struct vc_font - Describes a font
+ * @width: The width of a single glyph in bits
+ * @height: The height of a single glyph in scanlines
+ * @charcount: The number of glyphs in the font
+ * @data: The raw font data
+ *
+ * Font data is organized as an array of glyphs. Each glyph is a bitmap with
+ * set bits indicating the foreground color. Unset bits indicate background
+ * color. The fields @width and @height store a single glyph's number of
+ * horizontal bits and vertical scanlines. If width is not a multiple of 8,
+ * there are trailing bits to fill up the byte. These bits should not be drawn.
+ *
+ * The field @data points to the first glyph's first byte. The value @charcount
+ * gives the number of glyphs in the font. There are no empty scanlines between
+ * two adjacent glyphs.
+ */
+struct vc_font {
+ unsigned int width;
+ unsigned int height;
+ unsigned int charcount;
+ const unsigned char *data;
+};
+
+unsigned int vc_font_pitch(const struct vc_font *font);
+unsigned int vc_font_size(const struct vc_font *font);
+
/*
* Example: vc_data of a console that was scrolled 3 lines down.
*
@@ -120,9 +147,9 @@ struct vc_data {
unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */
unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */
unsigned long vc_pos; /* Cursor address */
- /* fonts */
+ /* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
- struct console_font vc_font; /* Current VC font set */
+ struct vc_font vc_font; /* Current VC font set */
unsigned short vc_video_erase_char; /* Background erase character */
/* VT terminal data */
unsigned int vc_state; /* Escape sequence parser state */
diff --git a/include/linux/coreboot.h b/include/linux/coreboot.h
new file mode 100644
index 000000000000..5d40ca7a1d89
--- /dev/null
+++ b/include/linux/coreboot.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * coreboot.h
+ *
+ * Coreboot device and driver interfaces.
+ *
+ * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _LINUX_COREBOOT_H
+#define _LINUX_COREBOOT_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+typedef __aligned(4) u64 cb_u64;
+
+/* List of coreboot entry structures that is used */
+
+#define CB_TAG_FRAMEBUFFER 0x12
+#define LB_TAG_CBMEM_ENTRY 0x31
+
+/* Generic */
+struct coreboot_table_entry {
+ u32 tag;
+ u32 size;
+};
+
+/* Points to a CBMEM entry */
+struct lb_cbmem_ref {
+ u32 tag;
+ u32 size;
+
+ cb_u64 cbmem_addr;
+};
+
+/* Corresponds to LB_TAG_CBMEM_ENTRY */
+struct lb_cbmem_entry {
+ u32 tag;
+ u32 size;
+
+ cb_u64 address;
+ u32 entry_size;
+ u32 id;
+};
+
+#define LB_FRAMEBUFFER_ORIENTATION_NORMAL 0
+#define LB_FRAMEBUFFER_ORIENTATION_BOTTOM_UP 1
+#define LB_FRAMEBUFFER_ORIENTATION_LEFT_UP 2
+#define LB_FRAMEBUFFER_ORIENTATION_RIGHT_UP 3
+
+/* Describes framebuffer setup by coreboot */
+struct lb_framebuffer {
+ u32 tag;
+ u32 size;
+
+ cb_u64 physical_address;
+ u32 x_resolution;
+ u32 y_resolution;
+ u32 bytes_per_line;
+ u8 bits_per_pixel;
+ u8 red_mask_pos;
+ u8 red_mask_size;
+ u8 green_mask_pos;
+ u8 green_mask_size;
+ u8 blue_mask_pos;
+ u8 blue_mask_size;
+ u8 reserved_mask_pos;
+ u8 reserved_mask_size;
+ u8 orientation;
+};
+
+/*
+ * True if the coreboot-provided data is large enough to hold information
+ * on the linear framebuffer. False otherwise.
+ */
+#define LB_FRAMEBUFFER_HAS_LFB(__fb) \
+ ((__fb)->size >= offsetofend(struct lb_framebuffer, reserved_mask_size))
+
+/*
+ * True if the coreboot-provided data is large enough to hold information
+ * on the display orientation. False otherwise.
+ */
+#define LB_FRAMEBUFFER_HAS_ORIENTATION(__fb) \
+ ((__fb)->size >= offsetofend(struct lb_framebuffer, orientation))
+
+#endif /* _LINUX_COREBOOT_H */
diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h
index 5b8ff5ac660d..b72ba3faa036 100644
--- a/include/linux/count_zeros.h
+++ b/include/linux/count_zeros.h
@@ -18,7 +18,7 @@
*
* If the MSB of @x is set, the result is 0.
* If only the LSB of @x is set, then the result is BITS_PER_LONG-1.
- * If @x is 0 then the result is COUNT_LEADING_ZEROS_0.
+ * If @x is 0 then the result is BITS_PER_LONG.
*/
static inline int count_leading_zeros(unsigned long x)
{
@@ -28,8 +28,6 @@ static inline int count_leading_zeros(unsigned long x)
return BITS_PER_LONG - fls64(x);
}
-#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG
-
/**
* count_trailing_zeros - Count the number of zeros from the LSB forwards
* @x: The value
@@ -38,16 +36,11 @@ static inline int count_leading_zeros(unsigned long x)
*
* If the LSB of @x is set, the result is 0.
* If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
- * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0.
+ * If @x is 0 then the result is BITS_PER_LONG.
*/
static inline int count_trailing_zeros(unsigned long x)
{
-#define COUNT_TRAILING_ZEROS_0 (-1)
-
- if (sizeof(x) == 4)
- return ffs(x);
- else
- return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0;
+ return x ? __ffs(x) : BITS_PER_LONG;
}
#endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 8239cd95a005..9b6b0d87fdb0 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -229,8 +229,8 @@ static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
#define smt_mitigations SMT_MITIGATIONS_OFF
#endif
-int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status);
-int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status);
-int arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status);
+int arch_prctl_get_branch_landing_pad_state(struct task_struct *t, unsigned long __user *state);
+int arch_prctl_set_branch_landing_pad_state(struct task_struct *t, unsigned long state);
+int arch_prctl_lock_branch_landing_pad_state(struct task_struct *t);
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index cc894fc38971..2ab691828e48 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -79,8 +79,9 @@ struct cpufreq_policy {
* called, but you're in IRQ context */
struct freq_constraints constraints;
- struct freq_qos_request *min_freq_req;
- struct freq_qos_request *max_freq_req;
+ struct freq_qos_request min_freq_req;
+ struct freq_qos_request max_freq_req;
+ struct freq_qos_request boost_freq_req;
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -232,7 +233,7 @@ static inline bool policy_is_inactive(struct cpufreq_policy *policy)
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
- return cpumask_weight(policy->cpus) > 1;
+ return cpumask_nth(1, policy->cpus) < nr_cpumask_bits;
}
#ifdef CONFIG_CPU_FREQ
@@ -372,7 +373,7 @@ struct cpufreq_driver {
* conditions) scale invariance can be disabled, which causes the
* schedutil governor to fall back to the latter.
*/
- void (*adjust_perf)(unsigned int cpu,
+ void (*adjust_perf)(struct cpufreq_policy *policy,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity);
@@ -617,7 +618,7 @@ struct cpufreq_governor {
/* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq);
-void cpufreq_driver_adjust_perf(unsigned int cpu,
+void cpufreq_driver_adjust_perf(struct cpufreq_policy *policy,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 62cd7b35a29c..22ba327ec227 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -92,7 +92,6 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
- CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4073690504a7..a2485348def3 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -188,6 +188,7 @@ extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
+extern void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev);
extern int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus);
extern void cpuidle_unregister(struct cpuidle_driver *drv);
@@ -226,6 +227,7 @@ static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
+static inline void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev) {}
static inline int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus)
{return -ENODEV; }
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index d35726d6a415..c1dee3f971a9 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -34,13 +34,6 @@ static inline void arch_kexec_protect_crashkres(void) { }
static inline void arch_kexec_unprotect_crashkres(void) { }
#endif
-#ifdef CONFIG_CRASH_DM_CRYPT
-int crash_load_dm_crypt_keys(struct kimage *image);
-ssize_t dm_crypt_keys_read(char *buf, size_t count, u64 *ppos);
-#else
-static inline int crash_load_dm_crypt_keys(struct kimage *image) {return 0; }
-#endif
-
#ifndef arch_crash_handle_hotplug_event
static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { }
#endif
@@ -96,4 +89,11 @@ static inline void crash_save_cpu(struct pt_regs *regs, int cpu) {};
static inline int kimage_crash_copy_vmcoreinfo(struct kimage *image) { return 0; };
#endif /* CONFIG_CRASH_DUMP*/
+#ifdef CONFIG_CRASH_DM_CRYPT
+int crash_load_dm_crypt_keys(struct kimage *image);
+ssize_t dm_crypt_keys_read(char *buf, size_t count, u64 *ppos);
+#else
+static inline int crash_load_dm_crypt_keys(struct kimage *image) {return 0; }
+#endif
+
#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index ed1609d78cd7..c6676265a985 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -33,12 +33,14 @@ struct group_info {
/**
* get_group_info - Get a reference to a group info structure
- * @group_info: The group info to reference
+ * @gi: The group info to reference
*
* This gets a reference to a set of supplementary groups.
*
* If the caller is accessing a task's credentials, they must hold the RCU read
* lock when reading.
+ *
+ * Returns: @gi
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
@@ -209,6 +211,8 @@ DEFINE_CLASS(override_creds,
* usage count. The purpose of this is to attempt to catch at compile time the
* accidental alteration of a set of credentials that should be considered
* immutable.
+ *
+ * Returns: @cred when the references are acquired, NULL otherwise.
*/
static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
@@ -246,8 +250,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
}
/**
- * put_cred - Release a reference to a set of credentials
- * @cred: The credentials to release
+ * put_cred_many - Release a reference to a set of credentials
+ * @_cred: The credentials to release
* @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
diff --git a/include/linux/damon.h b/include/linux/damon.h
index be3d198043ff..d9a3babbafc1 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -55,6 +55,8 @@ struct damon_size_range {
* @list: List head for siblings.
* @age: Age of this region.
*
+ * For any use case, @ar should be non-zero positive size.
+ *
* @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
* increased for every &damon_attrs->sample_interval if an access to the region
* during the last sampling interval is found. The update of this field should
@@ -214,11 +216,22 @@ struct damos_quota_goal {
};
/**
+ * enum damos_quota_goal_tuner - Goal-based quota tuning logic.
+ * @DAMOS_QUOTA_GOAL_TUNER_CONSIST: Aim long term consistent quota.
+ * @DAMOS_QUOTA_GOAL_TUNER_TEMPORAL: Aim zero quota asap.
+ */
+enum damos_quota_goal_tuner {
+ DAMOS_QUOTA_GOAL_TUNER_CONSIST,
+ DAMOS_QUOTA_GOAL_TUNER_TEMPORAL,
+};
+
+/**
* struct damos_quota - Controls the aggressiveness of the given scheme.
* @reset_interval: Charge reset interval in milliseconds.
* @ms: Maximum milliseconds that the scheme can use.
* @sz: Maximum bytes of memory that the action can be applied.
* @goals: Head of quota tuning goals (&damos_quota_goal) list.
+ * @goal_tuner: Goal-based @esz tuning algorithm to use.
* @esz: Effective size quota in bytes.
*
* @weight_sz: Weight of the region's size for prioritization.
@@ -260,6 +273,7 @@ struct damos_quota {
unsigned long ms;
unsigned long sz;
struct list_head goals;
+ enum damos_quota_goal_tuner goal_tuner;
unsigned long esz;
unsigned int weight_sz;
@@ -647,8 +661,7 @@ struct damon_operations {
void (*prepare_access_checks)(struct damon_ctx *context);
unsigned int (*check_accesses)(struct damon_ctx *context);
int (*get_scheme_score)(struct damon_ctx *context,
- struct damon_target *t, struct damon_region *r,
- struct damos *scheme);
+ struct damon_region *r, struct damos *scheme);
unsigned long (*apply_scheme)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed);
@@ -981,6 +994,7 @@ int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end,
+ unsigned long addr_unit,
unsigned long min_region_sz);
#endif /* CONFIG_DAMON */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index bf103f317cac..10a7cc79aea5 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -69,7 +69,7 @@ static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc,
const struct inode *inode,
struct dax_device *dax_dev)
{
- if (!vma_desc_test_flags(desc, VMA_SYNC_BIT))
+ if (!vma_desc_test(desc, VMA_SYNC_BIT))
return true;
if (!IS_DAX(inode))
return false;
@@ -115,7 +115,7 @@ static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc,
const struct inode *inode,
struct dax_device *dax_dev)
{
- return !vma_desc_test_flags(desc, VMA_SYNC_BIT);
+ return !vma_desc_test(desc, VMA_SYNC_BIT);
}
static inline size_t dax_recovery_write(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 898c60d21c92..c5bd5a74baba 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -264,6 +264,7 @@ extern void d_invalidate(struct dentry *);
extern struct dentry * d_make_root(struct inode *);
extern void d_mark_tmpfile(struct file *, struct inode *);
+int d_mark_tmpfile_name(struct file *file, const struct qstr *name);
extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 38f625af6ab4..cd4faaf5d427 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -755,4 +755,11 @@ static inline unsigned long to_bytes(sector_t n)
return (n << SECTOR_SHIFT);
}
+static inline void dm_stack_bs_limits(struct queue_limits *limits, unsigned int bs)
+{
+ limits->logical_block_size = max(limits->logical_block_size, bs);
+ limits->physical_block_size = max(limits->physical_block_size, bs);
+ limits->io_min = max(limits->io_min, bs);
+}
+
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index e65d564f01cd..67cec9ec0cd0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -190,6 +190,22 @@ ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
/**
+ * DEVICE_ATTR_RW_NAMED - Define a read-write device attribute with a sysfs name
+ * that differs from the function name.
+ * @_name: Attribute function preface
+ * @_attrname: Attribute name as it wil be exposed in the sysfs.
+ *
+ * Like DEVICE_ATTR_RW(), but allows for reusing names under separate paths in
+ * the same driver.
+ */
+#define DEVICE_ATTR_RW_NAMED(_name, _attrname) \
+ struct device_attribute dev_attr_##_name = { \
+ .attr = { .name = _attrname, .mode = 0644 }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+ }
+
+/**
* DEVICE_ATTR_RO - Define a readable device attribute.
* @_name: Attribute name.
*
@@ -208,6 +224,21 @@ ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
/**
+ * DEVICE_ATTR_RO_NAMED - Define a read-only device attribute with a sysfs name
+ * that differs from the function name.
+ * @_name: Attribute function preface
+ * @_attrname: Attribute name as it wil be exposed in the sysfs.
+ *
+ * Like DEVICE_ATTR_RO(), but allows for reusing names under separate paths in
+ * the same driver.
+ */
+#define DEVICE_ATTR_RO_NAMED(_name, _attrname) \
+ struct device_attribute dev_attr_##_name = { \
+ .attr = { .name = _attrname, .mode = 0444 }, \
+ .show = _name##_show, \
+ }
+
+/**
* DEVICE_ATTR_WO - Define an admin-only writable device attribute.
* @_name: Attribute name.
*
@@ -217,6 +248,21 @@ ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
/**
+ * DEVICE_ATTR_WO_NAMED - Define a read-only device attribute with a sysfs name
+ * that differs from the function name.
+ * @_name: Attribute function preface
+ * @_attrname: Attribute name as it wil be exposed in the sysfs.
+ *
+ * Like DEVICE_ATTR_WO(), but allows for reusing names under separate paths in
+ * the same driver.
+ */
+#define DEVICE_ATTR_WO_NAMED(_name, _attrname) \
+ struct device_attribute dev_attr_##_name = { \
+ .attr = { .name = _attrname, .mode = 0200 }, \
+ .store = _name##_store, \
+ }
+
+/**
* DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
* @_name: Attribute name.
* @_mode: File mode.
@@ -965,6 +1011,7 @@ static inline void device_unlock(struct device *dev)
}
DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+DEFINE_GUARD_COND(device, _intr, device_lock_interruptible(_T), _RET == 0)
static inline void device_lock_assert(struct device *dev)
{
@@ -1185,9 +1232,9 @@ device_create_with_groups(const struct class *cls, struct device *parent, dev_t
void device_destroy(const struct class *cls, dev_t devt);
int __must_check device_add_groups(struct device *dev,
- const struct attribute_group **groups);
+ const struct attribute_group *const *groups);
void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
+ const struct attribute_group *const *groups);
static inline int __must_check device_add_group(struct device *dev,
const struct attribute_group *grp)
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index 65880e60c720..78ab8d2b3e30 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -50,8 +50,8 @@ struct fwnode_handle;
struct class {
const char *name;
- const struct attribute_group **class_groups;
- const struct attribute_group **dev_groups;
+ const struct attribute_group *const *class_groups;
+ const struct attribute_group *const *dev_groups;
int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(const struct device *dev, umode_t *mode);
@@ -62,7 +62,7 @@ struct class {
int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
- const void *(*namespace)(const struct device *dev);
+ const struct ns_common *(*namespace)(const struct device *dev);
void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid);
@@ -180,9 +180,9 @@ struct class_attribute {
struct class_attribute class_attr_##_name = __ATTR_WO(_name)
int __must_check class_create_file_ns(const struct class *class, const struct class_attribute *attr,
- const void *ns);
+ const struct ns_common *ns);
void class_remove_file_ns(const struct class *class, const struct class_attribute *attr,
- const void *ns);
+ const struct ns_common *ns);
static inline int __must_check class_create_file(const struct class *class,
const struct class_attribute *attr)
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 133b9e637b55..166933b82e27 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -407,7 +407,7 @@ struct dma_buf {
* through the device.
*
* - Dynamic importers should set fences for any access that they can't
- * disable immediately from their &dma_buf_attach_ops.move_notify
+ * disable immediately from their &dma_buf_attach_ops.invalidate_mappings
* callback.
*
* IMPORTANT:
@@ -446,7 +446,7 @@ struct dma_buf_attach_ops {
bool allow_peer2peer;
/**
- * @move_notify: [optional] notification that the DMA-buf is moving
+ * @invalidate_mappings: [optional] notification that the DMA-buf is moving
*
* If this callback is provided the framework can avoid pinning the
* backing store while mappings exists.
@@ -456,14 +456,10 @@ struct dma_buf_attach_ops {
* called with this lock held as well. This makes sure that no mapping
* is created concurrently with an ongoing move operation.
*
- * Mappings stay valid and are not directly affected by this callback.
- * But the DMA-buf can now be in a different physical location, so all
- * mappings should be destroyed and re-created as soon as possible.
- *
- * New mappings can be created after this callback returns, and will
- * point to the new location of the DMA-buf.
+ * See the kdoc for dma_buf_invalidate_mappings() for details on the
+ * required behavior.
*/
- void (*move_notify)(struct dma_buf_attachment *attach);
+ void (*invalidate_mappings)(struct dma_buf_attachment *attach);
};
/**
@@ -578,7 +574,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
-void dma_buf_move_notify(struct dma_buf *dma_buf);
+void dma_buf_invalidate_mappings(struct dma_buf *dma_buf);
+bool dma_buf_attach_revocable(struct dma_buf_attachment *attach);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
diff --git a/include/linux/dma-buf/heaps/cma.h b/include/linux/dma-buf/heaps/cma.h
deleted file mode 100644
index e751479e21e7..000000000000
--- a/include/linux/dma-buf/heaps/cma.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DMA_BUF_HEAP_CMA_H_
-#define DMA_BUF_HEAP_CMA_H_
-
-struct cma;
-
-#ifdef CONFIG_DMABUF_HEAPS_CMA
-int dma_heap_cma_register_heap(struct cma *cma);
-#else
-static inline int dma_heap_cma_register_heap(struct cma *cma)
-{
- return 0;
-}
-#endif // CONFIG_DMABUF_HEAPS_CMA
-
-#endif // DMA_BUF_HEAP_CMA_H_
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 079b3dec0a16..370b3d2bba37 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -38,7 +38,6 @@ struct dma_fence_array_cb {
struct dma_fence_array {
struct dma_fence base;
- spinlock_t lock;
unsigned num_fences;
atomic_t num_pending;
struct dma_fence **fences;
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 5cd3ba53b4a1..df3beadf1515 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -46,7 +46,6 @@ struct dma_fence_chain {
*/
struct irq_work work;
};
- spinlock_t lock;
};
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index d4c92fd35092..b52ab692b22e 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -34,7 +34,8 @@ struct seq_file;
* @ops: dma_fence_ops associated with this fence
* @rcu: used for releasing fence with kfree_rcu
* @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
+ * @extern_lock: external spin_lock_irqsave used for locking (deprecated)
+ * @inline_lock: alternative internal spin_lock_irqsave used for locking
* @context: execution context this fence belongs to, returned by
* dma_fence_context_alloc()
* @seqno: the sequence number of this fence inside the execution context,
@@ -48,6 +49,8 @@ struct seq_file;
* atomic ops (bit_*), so taking the spinlock will not be needed most
* of the time.
*
+ * DMA_FENCE_FLAG_INITIALIZED_BIT - fence was initialized
+ * DMA_FENCE_FLAG_INLINE_LOCK_BIT - use inline spinlock instead of external one
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
* DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
@@ -65,8 +68,11 @@ struct seq_file;
* been completed, or never called at all.
*/
struct dma_fence {
- spinlock_t *lock;
- const struct dma_fence_ops *ops;
+ union {
+ spinlock_t *extern_lock;
+ spinlock_t inline_lock;
+ };
+ const struct dma_fence_ops __rcu *ops;
/*
* We clear the callback list on kref_put so that by the time we
* release the fence it is unused. No one should be adding to the
@@ -98,6 +104,8 @@ struct dma_fence {
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_INITIALIZED_BIT,
+ DMA_FENCE_FLAG_INLINE_LOCK_BIT,
DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
@@ -218,6 +226,10 @@ struct dma_fence_ops {
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
+ *
+ * Implementing this callback prevents the fence from detaching after
+ * signaling and so it is necessary for the module providing the
+ * dma_fence_ops to stay loaded as long as the dma_fence exists.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -229,6 +241,13 @@ struct dma_fence_ops {
* Can be called from irq context. This callback is optional. If it is
* NULL, then dma_fence_free() is instead called as the default
* implementation.
+ *
+ * Implementing this callback prevents the fence from detaching after
+ * signaling and so it is necessary for the module providing the
+ * dma_fence_ops to stay loaded as long as the dma_fence exists.
+ *
+ * If the callback is implemented the memory backing the dma_fence
+ * object must be freed RCU safe.
*/
void (*release)(struct dma_fence *fence);
@@ -264,6 +283,19 @@ void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
+ * dma_fence_was_initialized - test if fence was initialized
+ * @fence: fence to test
+ *
+ * Return: True if fence was ever initialized, false otherwise. Works correctly
+ * only when memory backing the fence structure is zero initialized on
+ * allocation.
+ */
+static inline bool dma_fence_was_initialized(struct dma_fence *fence)
+{
+ return fence && test_bit(DMA_FENCE_FLAG_INITIALIZED_BIT, &fence->flags);
+}
+
+/**
* dma_fence_put - decreases refcount of the fence
* @fence: fence to reduce refcount of
*/
@@ -351,6 +383,45 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
} while (1);
}
+/**
+ * dma_fence_spinlock - return pointer to the spinlock protecting the fence
+ * @fence: the fence to get the lock from
+ *
+ * Return either the pointer to the embedded or the external spin lock.
+ */
+static inline spinlock_t *dma_fence_spinlock(struct dma_fence *fence)
+{
+ return test_bit(DMA_FENCE_FLAG_INLINE_LOCK_BIT, &fence->flags) ?
+ &fence->inline_lock : fence->extern_lock;
+}
+
+/**
+ * dma_fence_lock_irqsave - irqsave lock the fence
+ * @fence: the fence to lock
+ * @flags: where to store the CPU flags.
+ *
+ * Lock the fence, preventing it from changing to the signaled state.
+ */
+#define dma_fence_lock_irqsave(fence, flags) \
+ spin_lock_irqsave(dma_fence_spinlock(fence), flags)
+
+/**
+ * dma_fence_unlock_irqrestore - unlock the fence and irqrestore
+ * @fence: the fence to unlock
+ * @flags: the CPU flags to restore
+ *
+ * Unlock the fence, allowing it to change its state to signaled again.
+ */
+#define dma_fence_unlock_irqrestore(fence, flags) \
+ spin_unlock_irqrestore(dma_fence_spinlock(fence), flags)
+
+/**
+ * dma_fence_assert_held - lockdep assertion that fence is locked
+ * @fence: the fence which should be locked
+ */
+#define dma_fence_assert_held(fence) \
+ lockdep_assert_held(dma_fence_spinlock(fence));
+
#ifdef CONFIG_LOCKDEP
bool dma_fence_begin_signalling(void);
void dma_fence_end_signalling(bool cookie);
@@ -439,13 +510,19 @@ dma_fence_test_signaled_flag(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
{
+ const struct dma_fence_ops *ops;
+
if (dma_fence_test_signaled_flag(fence))
return true;
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ rcu_read_lock();
+ ops = rcu_dereference(fence->ops);
+ if (ops && ops->signaled && ops->signaled(fence)) {
+ rcu_read_unlock();
dma_fence_signal_locked(fence);
return true;
}
+ rcu_read_unlock();
return false;
}
@@ -469,13 +546,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
{
+ const struct dma_fence_ops *ops;
+
if (dma_fence_test_signaled_flag(fence))
return true;
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ rcu_read_lock();
+ ops = rcu_dereference(fence->ops);
+ if (ops && ops->signaled && ops->signaled(fence)) {
+ rcu_read_unlock();
dma_fence_signal(fence);
return true;
}
+ rcu_read_unlock();
return false;
}
@@ -680,7 +763,7 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
*/
static inline bool dma_fence_is_array(struct dma_fence *fence)
{
- return fence->ops == &dma_fence_array_ops;
+ return rcu_access_pointer(fence->ops) == &dma_fence_array_ops;
}
/**
@@ -691,7 +774,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
*/
static inline bool dma_fence_is_chain(struct dma_fence *fence)
{
- return fence->ops == &dma_fence_chain_ops;
+ return rcu_access_pointer(fence->ops) == &dma_fence_chain_ops;
}
/**
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 60b63756df82..6a1832a73cad 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -91,14 +91,8 @@ static inline void set_dma_ops(struct device *dev,
#endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA
-extern struct cma *dma_contiguous_default_area;
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- if (dev && dev->cma_area)
- return dev->cma_area;
- return dma_contiguous_default_area;
-}
+struct cma *dev_get_cma_area(struct device *dev);
+struct cma *dma_contiguous_get_area_by_idx(unsigned int idx);
void dma_contiguous_reserve(phys_addr_t addr_limit);
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
@@ -117,6 +111,10 @@ static inline struct cma *dev_get_cma_area(struct device *dev)
{
return NULL;
}
+static inline struct cma *dma_contiguous_get_area_by_idx(unsigned int idx)
+{
+ return NULL;
+}
static inline void dma_contiguous_reserve(phys_addr_t limit)
{
}
@@ -147,9 +145,6 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
{
__free_pages(page, get_order(size));
}
-static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
-{
-}
#endif /* CONFIG_DMA_CMA*/
#ifdef CONFIG_DMA_DECLARE_COHERENT
@@ -361,6 +356,12 @@ static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+#ifndef CONFIG_ARCH_HAS_BATCHED_DMA_SYNC
+static inline void arch_sync_dma_flush(void)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
void arch_sync_dma_for_cpu_all(void);
#else
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 99ef042ecdb4..db8ab24a54f4 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,7 +9,7 @@
#include <linux/bug.h>
#include <linux/cache.h>
-/**
+/*
* List of possible attributes associated with a DMA mapping. The semantics
* of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
*/
@@ -92,6 +92,16 @@
* flushing.
*/
#define DMA_ATTR_REQUIRE_COHERENT (1UL << 12)
+/*
+ * DMA_ATTR_CC_SHARED: Indicates the DMA mapping is shared (decrypted) for
+ * confidential computing guests. For normal system memory the caller must have
+ * called set_memory_decrypted(), and pgprot_decrypted must be used when
+ * creating CPU PTEs for the mapping. The same shared semantic may be passed
+ * to the vIOMMU when it sets up the IOPTE. For MMIO use together with
+ * DMA_ATTR_MMIO to indicate shared MMIO. Unless DMA_ATTR_MMIO is provided
+ * a struct page is required.
+ */
+#define DMA_ATTR_CC_SHARED (1UL << 13)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index 270b5458aecf..1fafd5b0e315 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -73,6 +73,8 @@ enum dw_edma_chip_flags {
* @ll_region_rd: DMA descriptor link list memory for read channel
* @dt_region_wr: DMA data memory for write channel
* @dt_region_rd: DMA data memory for read channel
+ * @db_irq: Virtual IRQ dedicated to interrupt emulation
+ * @db_offset: Offset from DMA register base
* @mf: DMA register map format
* @dw: struct dw_edma that is filled by dw_edma_probe()
*/
@@ -94,9 +96,14 @@ struct dw_edma_chip {
struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+ /* interrupt emulation */
+ int db_irq;
+ resource_size_t db_offset;
+
enum dw_edma_map_format mf;
struct dw_edma *dw;
+ bool cfg_non_ll;
};
/* Export to the platform drivers */
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
index 6680dd1a43c6..332be28427e4 100644
--- a/include/linux/dma/qcom-gpi-dma.h
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -8,6 +8,9 @@
/**
* enum spi_transfer_cmd - spi transfer commands
+ * @SPI_TX: SPI peripheral TX command
+ * @SPI_RX: SPI peripheral RX command
+ * @SPI_DUPLEX: SPI peripheral Duplex command
*/
enum spi_transfer_cmd {
SPI_TX = 1,
@@ -64,7 +67,7 @@ enum i2c_op {
* @set_config: set peripheral config
* @rx_len: receive length for buffer
* @op: i2c cmd
- * @muli-msg: is part of multi i2c r-w msgs
+ * @multi_msg: is part of multi i2c r-w msgs
*/
struct gpi_i2c_config {
u8 set_config;
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
index c53c0f6e3b1a..3fe19b75ddf7 100644
--- a/include/linux/dma/ti-cppi5.h
+++ b/include/linux/dma/ti-cppi5.h
@@ -16,8 +16,8 @@
* struct cppi5_desc_hdr_t - Descriptor header, present in all types of
* descriptors
* @pkt_info0: Packet info word 0 (n/a in Buffer desc)
- * @pkt_info0: Packet info word 1 (n/a in Buffer desc)
- * @pkt_info0: Packet info word 2 (n/a in Buffer desc)
+ * @pkt_info1: Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info2: Packet info word 2 (n/a in Buffer desc)
* @src_dst_tag: Packet info word 3 (n/a in Buffer desc)
*/
struct cppi5_desc_hdr_t {
@@ -35,7 +35,7 @@ struct cppi5_desc_hdr_t {
* @buf_info1: word 8: Buffer valid data length
* @org_buf_len: word 9: Original buffer length
* @org_buf_ptr: word 10/11: Original buffer pointer
- * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * @epib: Extended Packet Info Data (optional, 4 words), and/or
* Protocol Specific Data (optional, 0-128 bytes in
* multiples of 4), and/or
* Other Software Data (0-N bytes, optional)
@@ -132,7 +132,7 @@ struct cppi5_desc_epib_t {
/**
* struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
* @hdr: Descriptor header
- * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * @epib: Extended Packet Info Data (optional, 4 words), and/or
* Protocol Specific Data (optional, 0-128 bytes in
* multiples of 4), and/or
* Other Software Data (0-N bytes, optional)
@@ -179,7 +179,7 @@ static inline void cppi5_desc_dump(void *desc, u32 size)
* cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
* @paddr: Physical address of the packet popped from the ring
*
- * Returns true if the address indicates TDCM
+ * Returns: true if the address indicates TDCM
*/
static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
{
@@ -190,7 +190,7 @@ static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
* cppi5_desc_get_type - get descriptor type
* @desc_hdr: packet descriptor/TR header
*
- * Returns descriptor type:
+ * Returns: descriptor type:
* CPPI5_INFO0_DESC_TYPE_VAL_HOST
* CPPI5_INFO0_DESC_TYPE_VAL_MONO
* CPPI5_INFO0_DESC_TYPE_VAL_TR
@@ -205,7 +205,7 @@ static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
* cppi5_desc_get_errflags - get Error Flags from Desc
* @desc_hdr: packet/TR descriptor header
*
- * Returns Error Flags from Packet/TR Descriptor
+ * Returns: Error Flags from Packet/TR Descriptor
*/
static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
{
@@ -307,7 +307,7 @@ static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
* @psdata_size: PSDATA size
* @sw_data_size: SWDATA size
*
- * Returns required Host Packet Descriptor size
+ * Returns: required Host Packet Descriptor size
* 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
*/
static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
@@ -381,6 +381,8 @@ cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
/**
* cppi5_hdesc_get_psdata_size - get PSdata size in bytes
* @desc: Host packet descriptor
+ *
+ * Returns: PSdata size in bytes
*/
static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
{
@@ -398,7 +400,7 @@ static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
* cppi5_hdesc_get_pktlen - get Packet Length from HDesc
* @desc: Host packet descriptor
*
- * Returns Packet Length from Host Packet Descriptor
+ * Returns: Packet Length from Host Packet Descriptor
*/
static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
{
@@ -408,6 +410,7 @@ static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
/**
* cppi5_hdesc_set_pktlen - set Packet Length in HDesc
* @desc: Host packet descriptor
+ * @pkt_len: Packet length to set
*/
static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
u32 pkt_len)
@@ -420,7 +423,7 @@ static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
* cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
* @desc: Host packet descriptor
*
- * Returns Protocol Specific Flags from Host Packet Descriptor
+ * Returns: Protocol Specific Flags from Host Packet Descriptor
*/
static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
{
@@ -431,6 +434,7 @@ static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
/**
* cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
* @desc: Host packet descriptor
+ * @ps_flags: Protocol Specific flags to set
*/
static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
u32 ps_flags)
@@ -442,8 +446,10 @@ static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
}
/**
- * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * cppi5_hdesc_get_pkttype - get Packet Type from HDesc
* @desc: Host packet descriptor
+ *
+ * Returns: Packet type
*/
static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
{
@@ -452,7 +458,7 @@ static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
}
/**
- * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * cppi5_hdesc_set_pkttype - set Packet Type in HDesc
* @desc: Host packet descriptor
* @pkt_type: Packet Type
*/
@@ -501,7 +507,7 @@ static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
/**
* cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
* @desc: Host Packet Descriptor
- * @buf_desc: Host Buffer Descriptor physical address
+ * @hbuf_desc: Host Buffer Descriptor physical address
*
* add and link Host Buffer Descriptor to HDesc
*/
@@ -527,7 +533,7 @@ static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
* cppi5_hdesc_epib_present - check if EPIB present
* @desc_hdr: packet descriptor/TR header
*
- * Returns true if EPIB present in the packet
+ * Returns: true if EPIB present in the packet
*/
static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
{
@@ -538,7 +544,7 @@ static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
* cppi5_hdesc_get_psdata - Get pointer on PSDATA
* @desc: Host packet descriptor
*
- * Returns pointer on PSDATA in HDesc.
+ * Returns: pointer on PSDATA in HDesc.
* NULL - if ps_data placed at the start of data buffer.
*/
static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
@@ -568,7 +574,7 @@ static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
* cppi5_hdesc_get_swdata - Get pointer on swdata
* @desc: Host packet descriptor
*
- * Returns pointer on SWDATA in HDesc.
+ * Returns: pointer on SWDATA in HDesc.
* NOTE. It's caller responsibility to be sure hdesc actually has swdata.
*/
static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
@@ -648,6 +654,7 @@ enum cppi5_tr_types {
CPPI5_TR_TYPE11,
/* type12-14: Reserved */
CPPI5_TR_TYPE15 = 15,
+ /* private: */
CPPI5_TR_TYPE_MAX
};
@@ -673,6 +680,7 @@ enum cppi5_tr_event_size {
CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+ /* private: */
CPPI5_TR_EVENT_SIZE_MAX
};
@@ -690,6 +698,7 @@ enum cppi5_tr_trigger {
CPPI5_TR_TRIGGER_GLOBAL0,
CPPI5_TR_TRIGGER_GLOBAL1,
CPPI5_TR_TRIGGER_LOCAL_EVENT,
+ /* private: */
CPPI5_TR_TRIGGER_MAX
};
@@ -711,6 +720,7 @@ enum cppi5_tr_trigger_type {
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
CPPI5_TR_TRIGGER_TYPE_ALL,
+ /* private: */
CPPI5_TR_TRIGGER_TYPE_MAX
};
@@ -815,7 +825,7 @@ struct cppi5_tr_type3_t {
* destination
* @dicnt1: Total loop iteration count for level 1 for destination
* @dicnt2: Total loop iteration count for level 2 for destination
- * @sicnt3: Total loop iteration count for level 3 (outermost) for
+ * @dicnt3: Total loop iteration count for level 3 (outermost) for
* destination
*/
struct cppi5_tr_type15_t {
@@ -887,6 +897,7 @@ enum cppi5_tr_resp_status_type {
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+ /* private: */
CPPI5_TR_RESPONSE_STATUS_MAX
};
@@ -903,6 +914,7 @@ enum cppi5_tr_resp_status_submission {
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+ /* private: */
CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
};
@@ -931,6 +943,7 @@ enum cppi5_tr_resp_status_unsupported {
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+ /* private: */
CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
};
@@ -939,7 +952,7 @@ enum cppi5_tr_resp_status_unsupported {
* @tr_count: number of TR records
* @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
*
- * Returns required TR Descriptor size
+ * Returns: required TR Descriptor size
*/
static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
{
@@ -955,7 +968,7 @@ static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
/**
* cppi5_trdesc_init - Init TR Descriptor
- * @desc: TR Descriptor
+ * @desc_hdr: TR Descriptor
* @tr_count: number of TR records
* @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
* @reload_idx: Absolute index to jump to on the 2nd and following passes
@@ -1044,7 +1057,7 @@ static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
}
/**
- * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * cppi5_tr_csf_set - Update the Configuration specific flags
* @flags: Pointer to the TR's flags
* @csf: Configuration specific flags
*
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 99efe2b9b4ea..b3d251c9734e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -996,7 +996,8 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
* @vecs: The array of DMA vectors that should be transferred
* @nents: The number of DMA vectors in the array
* @dir: Specifies the direction of the data transfer
- * @flags: DMA engine flags
+ * @flags: DMA engine flags - DMA_PREP_REPEAT can be used to mark a cyclic
+ * DMA transfer
*/
static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 927f8a8b7a1d..c8700e6a694d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -60,6 +60,7 @@ enum dmi_entry_type {
DMI_ENTRY_OOB_REMOTE_ACCESS,
DMI_ENTRY_BIS_ENTRY,
DMI_ENTRY_SYSTEM_BOOT,
+ DMI_ENTRY_64_MEM_ERROR,
DMI_ENTRY_MGMT_DEV,
DMI_ENTRY_MGMT_DEV_COMPONENT,
DMI_ENTRY_MGMT_DEV_THRES,
@@ -69,6 +70,10 @@ enum dmi_entry_type {
DMI_ENTRY_ADDITIONAL,
DMI_ENTRY_ONBOARD_DEV_EXT,
DMI_ENTRY_MGMT_CONTROLLER_HOST,
+ DMI_ENTRY_TPM_DEVICE,
+ DMI_ENTRY_PROCESSOR_ADDITIONAL,
+ DMI_ENTRY_FIRMWARE_INVENTORY,
+ DMI_ENTRY_STRING_PROPERTY,
DMI_ENTRY_INACTIVE = 126,
DMI_ENTRY_END_OF_TABLE = 127,
};
@@ -86,6 +91,21 @@ struct dmi_device {
void *device_data; /* Type specific data */
};
+#define DMI_A_INFO_ENT_MIN_SIZE 0x6
+struct dmi_a_info_entry {
+ u8 length;
+ u16 handle;
+ u8 offset;
+ u8 str_num;
+ u8 value[];
+} __packed;
+
+#define DMI_A_INFO_MIN_SIZE 0xB
+struct dmi_a_info {
+ struct dmi_header header;
+ u8 count;
+} __packed;
+
#ifdef CONFIG_DMI
struct dmi_dev_onboard {
@@ -115,6 +135,7 @@ extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
extern u64 dmi_memdev_size(u16 handle);
extern u8 dmi_memdev_type(u16 handle);
extern u16 dmi_memdev_handle(int slot);
+const char *dmi_string_nosave(const struct dmi_header *dm, u8 s);
#else
@@ -148,6 +169,8 @@ static inline u8 dmi_memdev_type(u16 handle) { return 0x0; }
static inline u16 dmi_memdev_handle(int slot) { return 0xffff; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
+static inline const char *
+ dmi_string_nosave(const struct dmi_header *dm, u8 s) { return ""; }
#endif
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 2ce295b46b8c..b7277a8b484d 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -52,6 +52,12 @@ struct dpll_device_ops {
int (*phase_offset_avg_factor_get)(const struct dpll_device *dpll,
void *dpll_priv, u32 *factor,
struct netlink_ext_ack *extack);
+ int (*freq_monitor_set)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack);
+ int (*freq_monitor_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_ops {
@@ -110,6 +116,10 @@ struct dpll_pin_ops {
int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *ffo, struct netlink_ext_ack *extack);
+ int (*measured_freq_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *measured_freq,
+ struct netlink_ext_ack *extack);
int (*esync_set)(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
u64 freq, struct netlink_ext_ack *extack);
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index 53f44b8cd75f..f53c534aba0c 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -87,7 +87,7 @@
*/
GENL_struct(DRBD_NLA_CFG_REPLY, 1, drbd_cfg_reply,
/* "arbitrary" size strings, nla_policy.len = 0 */
- __str_field(1, DRBD_GENLA_F_MANDATORY, info_text, 0)
+ __str_field(1, 0, info_text, 0)
)
/* Configuration requests typically need a context to operate on.
@@ -96,10 +96,10 @@ GENL_struct(DRBD_NLA_CFG_REPLY, 1, drbd_cfg_reply,
* and/or the replication group (aka resource) name,
* and the volume id within the resource. */
GENL_struct(DRBD_NLA_CFG_CONTEXT, 2, drbd_cfg_context,
- __u32_field(1, DRBD_GENLA_F_MANDATORY, ctx_volume)
- __str_field(2, DRBD_GENLA_F_MANDATORY, ctx_resource_name, 128)
- __bin_field(3, DRBD_GENLA_F_MANDATORY, ctx_my_addr, 128)
- __bin_field(4, DRBD_GENLA_F_MANDATORY, ctx_peer_addr, 128)
+ __u32_field(1, 0, ctx_volume)
+ __str_field(2, 0, ctx_resource_name, 128)
+ __bin_field(3, 0, ctx_my_addr, 128)
+ __bin_field(4, 0, ctx_peer_addr, 128)
)
GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
@@ -108,86 +108,86 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
__s32_field(3, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev_idx)
/* use the resize command to try and change the disk_size */
- __u64_field(4, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, disk_size)
+ __u64_field(4, DRBD_F_INVARIANT, disk_size)
/* we could change the max_bio_bvecs,
* but it won't propagate through the stack */
- __u32_field(5, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, max_bio_bvecs)
-
- __u32_field_def(6, DRBD_GENLA_F_MANDATORY, on_io_error, DRBD_ON_IO_ERROR_DEF)
- __u32_field_def(7, DRBD_GENLA_F_MANDATORY, fencing, DRBD_FENCING_DEF)
-
- __u32_field_def(8, DRBD_GENLA_F_MANDATORY, resync_rate, DRBD_RESYNC_RATE_DEF)
- __s32_field_def(9, DRBD_GENLA_F_MANDATORY, resync_after, DRBD_MINOR_NUMBER_DEF)
- __u32_field_def(10, DRBD_GENLA_F_MANDATORY, al_extents, DRBD_AL_EXTENTS_DEF)
- __u32_field_def(11, DRBD_GENLA_F_MANDATORY, c_plan_ahead, DRBD_C_PLAN_AHEAD_DEF)
- __u32_field_def(12, DRBD_GENLA_F_MANDATORY, c_delay_target, DRBD_C_DELAY_TARGET_DEF)
- __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF)
- __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF)
- __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF)
- __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
+ __u32_field(5, DRBD_F_INVARIANT, max_bio_bvecs)
+
+ __u32_field_def(6, 0, on_io_error, DRBD_ON_IO_ERROR_DEF)
+ __u32_field_def(7, 0, fencing, DRBD_FENCING_DEF)
+
+ __u32_field_def(8, 0, resync_rate, DRBD_RESYNC_RATE_DEF)
+ __s32_field_def(9, 0, resync_after, DRBD_MINOR_NUMBER_DEF)
+ __u32_field_def(10, 0, al_extents, DRBD_AL_EXTENTS_DEF)
+ __u32_field_def(11, 0, c_plan_ahead, DRBD_C_PLAN_AHEAD_DEF)
+ __u32_field_def(12, 0, c_delay_target, DRBD_C_DELAY_TARGET_DEF)
+ __u32_field_def(13, 0, c_fill_target, DRBD_C_FILL_TARGET_DEF)
+ __u32_field_def(14, 0, c_max_rate, DRBD_C_MAX_RATE_DEF)
+ __u32_field_def(15, 0, c_min_rate, DRBD_C_MIN_RATE_DEF)
+ __u32_field_def(20, 0, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
__u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF)
__u32_field_def(25, 0 /* OPTIONAL */, rs_discard_granularity, DRBD_RS_DISCARD_GRANULARITY_DEF)
- __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF)
- __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF)
- __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
- __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
+ __flg_field_def(16, 0, disk_barrier, DRBD_DISK_BARRIER_DEF)
+ __flg_field_def(17, 0, disk_flushes, DRBD_DISK_FLUSHES_DEF)
+ __flg_field_def(18, 0, disk_drain, DRBD_DISK_DRAIN_DEF)
+ __flg_field_def(19, 0, md_flushes, DRBD_MD_FLUSHES_DEF)
__flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
__flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF)
__flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF)
)
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
- __str_field_def(1, DRBD_GENLA_F_MANDATORY, cpu_mask, DRBD_CPU_MASK_SIZE)
- __u32_field_def(2, DRBD_GENLA_F_MANDATORY, on_no_data, DRBD_ON_NO_DATA_DEF)
+ __str_field_def(1, 0, cpu_mask, DRBD_CPU_MASK_SIZE)
+ __u32_field_def(2, 0, on_no_data, DRBD_ON_NO_DATA_DEF)
)
GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf,
- __str_field_def(1, DRBD_GENLA_F_MANDATORY | DRBD_F_SENSITIVE,
+ __str_field_def(1, DRBD_F_SENSITIVE,
shared_secret, SHARED_SECRET_MAX)
- __str_field_def(2, DRBD_GENLA_F_MANDATORY, cram_hmac_alg, SHARED_SECRET_MAX)
- __str_field_def(3, DRBD_GENLA_F_MANDATORY, integrity_alg, SHARED_SECRET_MAX)
- __str_field_def(4, DRBD_GENLA_F_MANDATORY, verify_alg, SHARED_SECRET_MAX)
- __str_field_def(5, DRBD_GENLA_F_MANDATORY, csums_alg, SHARED_SECRET_MAX)
- __u32_field_def(6, DRBD_GENLA_F_MANDATORY, wire_protocol, DRBD_PROTOCOL_DEF)
- __u32_field_def(7, DRBD_GENLA_F_MANDATORY, connect_int, DRBD_CONNECT_INT_DEF)
- __u32_field_def(8, DRBD_GENLA_F_MANDATORY, timeout, DRBD_TIMEOUT_DEF)
- __u32_field_def(9, DRBD_GENLA_F_MANDATORY, ping_int, DRBD_PING_INT_DEF)
- __u32_field_def(10, DRBD_GENLA_F_MANDATORY, ping_timeo, DRBD_PING_TIMEO_DEF)
- __u32_field_def(11, DRBD_GENLA_F_MANDATORY, sndbuf_size, DRBD_SNDBUF_SIZE_DEF)
- __u32_field_def(12, DRBD_GENLA_F_MANDATORY, rcvbuf_size, DRBD_RCVBUF_SIZE_DEF)
- __u32_field_def(13, DRBD_GENLA_F_MANDATORY, ko_count, DRBD_KO_COUNT_DEF)
- __u32_field_def(14, DRBD_GENLA_F_MANDATORY, max_buffers, DRBD_MAX_BUFFERS_DEF)
- __u32_field_def(15, DRBD_GENLA_F_MANDATORY, max_epoch_size, DRBD_MAX_EPOCH_SIZE_DEF)
- __u32_field_def(16, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF)
- __u32_field_def(17, DRBD_GENLA_F_MANDATORY, after_sb_0p, DRBD_AFTER_SB_0P_DEF)
- __u32_field_def(18, DRBD_GENLA_F_MANDATORY, after_sb_1p, DRBD_AFTER_SB_1P_DEF)
- __u32_field_def(19, DRBD_GENLA_F_MANDATORY, after_sb_2p, DRBD_AFTER_SB_2P_DEF)
- __u32_field_def(20, DRBD_GENLA_F_MANDATORY, rr_conflict, DRBD_RR_CONFLICT_DEF)
- __u32_field_def(21, DRBD_GENLA_F_MANDATORY, on_congestion, DRBD_ON_CONGESTION_DEF)
- __u32_field_def(22, DRBD_GENLA_F_MANDATORY, cong_fill, DRBD_CONG_FILL_DEF)
- __u32_field_def(23, DRBD_GENLA_F_MANDATORY, cong_extents, DRBD_CONG_EXTENTS_DEF)
- __flg_field_def(24, DRBD_GENLA_F_MANDATORY, two_primaries, DRBD_ALLOW_TWO_PRIMARIES_DEF)
- __flg_field(25, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, discard_my_data)
- __flg_field_def(26, DRBD_GENLA_F_MANDATORY, tcp_cork, DRBD_TCP_CORK_DEF)
- __flg_field_def(27, DRBD_GENLA_F_MANDATORY, always_asbp, DRBD_ALWAYS_ASBP_DEF)
- __flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative)
- __flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF)
- /* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */
- /* 9: __str_field_def(31, DRBD_GENLA_F_MANDATORY, name, SHARED_SECRET_MAX) */
+ __str_field_def(2, 0, cram_hmac_alg, SHARED_SECRET_MAX)
+ __str_field_def(3, 0, integrity_alg, SHARED_SECRET_MAX)
+ __str_field_def(4, 0, verify_alg, SHARED_SECRET_MAX)
+ __str_field_def(5, 0, csums_alg, SHARED_SECRET_MAX)
+ __u32_field_def(6, 0, wire_protocol, DRBD_PROTOCOL_DEF)
+ __u32_field_def(7, 0, connect_int, DRBD_CONNECT_INT_DEF)
+ __u32_field_def(8, 0, timeout, DRBD_TIMEOUT_DEF)
+ __u32_field_def(9, 0, ping_int, DRBD_PING_INT_DEF)
+ __u32_field_def(10, 0, ping_timeo, DRBD_PING_TIMEO_DEF)
+ __u32_field_def(11, 0, sndbuf_size, DRBD_SNDBUF_SIZE_DEF)
+ __u32_field_def(12, 0, rcvbuf_size, DRBD_RCVBUF_SIZE_DEF)
+ __u32_field_def(13, 0, ko_count, DRBD_KO_COUNT_DEF)
+ __u32_field_def(14, 0, max_buffers, DRBD_MAX_BUFFERS_DEF)
+ __u32_field_def(15, 0, max_epoch_size, DRBD_MAX_EPOCH_SIZE_DEF)
+ __u32_field_def(16, 0, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF)
+ __u32_field_def(17, 0, after_sb_0p, DRBD_AFTER_SB_0P_DEF)
+ __u32_field_def(18, 0, after_sb_1p, DRBD_AFTER_SB_1P_DEF)
+ __u32_field_def(19, 0, after_sb_2p, DRBD_AFTER_SB_2P_DEF)
+ __u32_field_def(20, 0, rr_conflict, DRBD_RR_CONFLICT_DEF)
+ __u32_field_def(21, 0, on_congestion, DRBD_ON_CONGESTION_DEF)
+ __u32_field_def(22, 0, cong_fill, DRBD_CONG_FILL_DEF)
+ __u32_field_def(23, 0, cong_extents, DRBD_CONG_EXTENTS_DEF)
+ __flg_field_def(24, 0, two_primaries, DRBD_ALLOW_TWO_PRIMARIES_DEF)
+ __flg_field(25, DRBD_F_INVARIANT, discard_my_data)
+ __flg_field_def(26, 0, tcp_cork, DRBD_TCP_CORK_DEF)
+ __flg_field_def(27, 0, always_asbp, DRBD_ALWAYS_ASBP_DEF)
+ __flg_field(28, DRBD_F_INVARIANT, tentative)
+ __flg_field_def(29, 0, use_rle, DRBD_USE_RLE_DEF)
+ /* 9: __u32_field_def(30, 0, fencing_policy, DRBD_FENCING_DEF) */
+ /* 9: __str_field_def(31, 0, name, SHARED_SECRET_MAX) */
/* 9: __u32_field(32, DRBD_F_REQUIRED | DRBD_F_INVARIANT, peer_node_id) */
__flg_field_def(33, 0 /* OPTIONAL */, csums_after_crash_only, DRBD_CSUMS_AFTER_CRASH_ONLY_DEF)
__u32_field_def(34, 0 /* OPTIONAL */, sock_check_timeo, DRBD_SOCKET_CHECK_TIMEO_DEF)
)
GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms,
- __flg_field(1, DRBD_GENLA_F_MANDATORY, assume_uptodate)
+ __flg_field(1, 0, assume_uptodate)
)
GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
- __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size)
- __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force)
- __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync)
+ __u64_field(1, 0, resize_size)
+ __flg_field(2, 0, resize_force)
+ __flg_field(3, 0, no_resync)
__u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF)
__u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF)
)
@@ -195,31 +195,31 @@ GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
/* the reason of the broadcast,
* if this is an event triggered broadcast. */
- __u32_field(1, DRBD_GENLA_F_MANDATORY, sib_reason)
+ __u32_field(1, 0, sib_reason)
__u32_field(2, DRBD_F_REQUIRED, current_state)
- __u64_field(3, DRBD_GENLA_F_MANDATORY, capacity)
- __u64_field(4, DRBD_GENLA_F_MANDATORY, ed_uuid)
+ __u64_field(3, 0, capacity)
+ __u64_field(4, 0, ed_uuid)
/* These are for broadcast from after state change work.
* prev_state and new_state are from the moment the state change took
* place, new_state is not neccessarily the same as current_state,
* there may have been more state changes since. Which will be
* broadcasted soon, in their respective after state change work. */
- __u32_field(5, DRBD_GENLA_F_MANDATORY, prev_state)
- __u32_field(6, DRBD_GENLA_F_MANDATORY, new_state)
+ __u32_field(5, 0, prev_state)
+ __u32_field(6, 0, new_state)
/* if we have a local disk: */
- __bin_field(7, DRBD_GENLA_F_MANDATORY, uuids, (UI_SIZE*sizeof(__u64)))
- __u32_field(8, DRBD_GENLA_F_MANDATORY, disk_flags)
- __u64_field(9, DRBD_GENLA_F_MANDATORY, bits_total)
- __u64_field(10, DRBD_GENLA_F_MANDATORY, bits_oos)
+ __bin_field(7, 0, uuids, (UI_SIZE*sizeof(__u64)))
+ __u32_field(8, 0, disk_flags)
+ __u64_field(9, 0, bits_total)
+ __u64_field(10, 0, bits_oos)
/* and in case resync or online verify is active */
- __u64_field(11, DRBD_GENLA_F_MANDATORY, bits_rs_total)
- __u64_field(12, DRBD_GENLA_F_MANDATORY, bits_rs_failed)
+ __u64_field(11, 0, bits_rs_total)
+ __u64_field(12, 0, bits_rs_failed)
/* for pre and post notifications of helper execution */
- __str_field(13, DRBD_GENLA_F_MANDATORY, helper, 32)
- __u32_field(14, DRBD_GENLA_F_MANDATORY, helper_exit_code)
+ __str_field(13, 0, helper, 32)
+ __u32_field(14, 0, helper_exit_code)
__u64_field(15, 0, send_cnt)
__u64_field(16, 0, recv_cnt)
@@ -233,12 +233,12 @@ GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
)
GENL_struct(DRBD_NLA_START_OV_PARMS, 9, start_ov_parms,
- __u64_field(1, DRBD_GENLA_F_MANDATORY, ov_start_sector)
- __u64_field(2, DRBD_GENLA_F_MANDATORY, ov_stop_sector)
+ __u64_field(1, 0, ov_start_sector)
+ __u64_field(2, 0, ov_stop_sector)
)
GENL_struct(DRBD_NLA_NEW_C_UUID_PARMS, 10, new_c_uuid_parms,
- __flg_field(1, DRBD_GENLA_F_MANDATORY, clear_bm)
+ __flg_field(1, 0, clear_bm)
)
GENL_struct(DRBD_NLA_TIMEOUT_PARMS, 11, timeout_parms,
@@ -246,11 +246,11 @@ GENL_struct(DRBD_NLA_TIMEOUT_PARMS, 11, timeout_parms,
)
GENL_struct(DRBD_NLA_DISCONNECT_PARMS, 12, disconnect_parms,
- __flg_field(1, DRBD_GENLA_F_MANDATORY, force_disconnect)
+ __flg_field(1, 0, force_disconnect)
)
GENL_struct(DRBD_NLA_DETACH_PARMS, 13, detach_parms,
- __flg_field(1, DRBD_GENLA_F_MANDATORY, force_detach)
+ __flg_field(1, 0, force_detach)
)
GENL_struct(DRBD_NLA_RESOURCE_INFO, 15, resource_info,
@@ -315,12 +315,12 @@ GENL_struct(DRBD_NLA_PEER_DEVICE_STATISTICS, 22, peer_device_statistics,
)
GENL_struct(DRBD_NLA_NOTIFICATION_HEADER, 23, drbd_notification_header,
- __u32_field(1, DRBD_GENLA_F_MANDATORY, nh_type)
+ __u32_field(1, 0, nh_type)
)
GENL_struct(DRBD_NLA_HELPER, 24, drbd_helper_info,
- __str_field(1, DRBD_GENLA_F_MANDATORY, helper_name, 32)
- __u32_field(2, DRBD_GENLA_F_MANDATORY, helper_status)
+ __str_field(1, 0, helper_name, 32)
+ __u32_field(2, 0, helper_status)
)
/*
@@ -333,9 +333,9 @@ GENL_notification(
DRBD_EVENT, 1, events,
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
GENL_tla_expected(DRBD_NLA_STATE_INFO, DRBD_F_REQUIRED)
- GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_SYNCER_CONF, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, 0)
+ GENL_tla_expected(DRBD_NLA_DISK_CONF, 0)
+ GENL_tla_expected(DRBD_NLA_SYNCER_CONF, 0)
)
/* query kernel for specific or all info */
@@ -349,7 +349,7 @@ GENL_op(
),
/* To select the object .doit.
* Or a subset of objects in .dumpit. */
- GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, 0)
)
/* add DRBD minor devices as volumes to resources */
@@ -367,7 +367,7 @@ GENL_op(DRBD_ADM_DEL_RESOURCE, 8, GENL_doit(drbd_adm_del_resource),
GENL_op(DRBD_ADM_RESOURCE_OPTS, 9,
GENL_doit(drbd_adm_resource_opts),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
- GENL_tla_expected(DRBD_NLA_RESOURCE_OPTS, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_OPTS, 0)
)
GENL_op(
@@ -403,7 +403,7 @@ GENL_op(
DRBD_ADM_RESIZE, 13,
GENL_doit(drbd_adm_resize),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
- GENL_tla_expected(DRBD_NLA_RESIZE_PARMS, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_RESIZE_PARMS, 0)
)
GENL_op(
@@ -424,18 +424,18 @@ GENL_op(
DRBD_ADM_NEW_C_UUID, 16,
GENL_doit(drbd_adm_new_c_uuid),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
- GENL_tla_expected(DRBD_NLA_NEW_C_UUID_PARMS, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_NEW_C_UUID_PARMS, 0)
)
GENL_op(
DRBD_ADM_START_OV, 17,
GENL_doit(drbd_adm_start_ov),
- GENL_tla_expected(DRBD_NLA_START_OV_PARMS, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_START_OV_PARMS, 0)
)
GENL_op(DRBD_ADM_DETACH, 18, GENL_doit(drbd_adm_detach),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
- GENL_tla_expected(DRBD_NLA_DETACH_PARMS, DRBD_GENLA_F_MANDATORY))
+ GENL_tla_expected(DRBD_NLA_DETACH_PARMS, 0))
GENL_op(DRBD_ADM_INVALIDATE, 19, GENL_doit(drbd_adm_invalidate),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
@@ -460,36 +460,36 @@ GENL_op(DRBD_ADM_GET_RESOURCES, 30,
GENL_op_init(
.dumpit = drbd_adm_dump_resources,
),
- GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, DRBD_GENLA_F_MANDATORY))
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, 0)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, 0)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, 0))
GENL_op(DRBD_ADM_GET_DEVICES, 31,
GENL_op_init(
.dumpit = drbd_adm_dump_devices,
.done = drbd_adm_dump_devices_done,
),
- GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_DEVICE_INFO, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY))
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, 0)
+ GENL_tla_expected(DRBD_NLA_DEVICE_INFO, 0)
+ GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, 0))
GENL_op(DRBD_ADM_GET_CONNECTIONS, 32,
GENL_op_init(
.dumpit = drbd_adm_dump_connections,
.done = drbd_adm_dump_connections_done,
),
- GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, DRBD_GENLA_F_MANDATORY))
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, 0)
+ GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, 0)
+ GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, 0))
GENL_op(DRBD_ADM_GET_PEER_DEVICES, 33,
GENL_op_init(
.dumpit = drbd_adm_dump_peer_devices,
.done = drbd_adm_dump_peer_devices_done,
),
- GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, DRBD_GENLA_F_MANDATORY)
- GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY))
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, 0)
+ GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, 0)
+ GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, 0))
GENL_notification(
DRBD_RESOURCE_STATE, 34, events,
@@ -524,7 +524,7 @@ GENL_op(
GENL_op_init(
.dumpit = drbd_adm_get_initial_state,
),
- GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY))
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, 0))
GENL_notification(
DRBD_HELPER, 40, events,
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
deleted file mode 100644
index b8fef35591aa..000000000000
--- a/include/linux/dsa/loop.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DSA_LOOP_H
-#define DSA_LOOP_H
-
-#include <linux/if_vlan.h>
-#include <linux/types.h>
-#include <linux/ethtool.h>
-#include <net/dsa.h>
-
-struct dsa_loop_vlan {
- u16 members;
- u16 untagged;
-};
-
-struct dsa_loop_mib_entry {
- char name[ETH_GSTRING_LEN];
- unsigned long val;
-};
-
-enum dsa_loop_mib_counters {
- DSA_LOOP_PHY_READ_OK,
- DSA_LOOP_PHY_READ_ERR,
- DSA_LOOP_PHY_WRITE_OK,
- DSA_LOOP_PHY_WRITE_ERR,
- __DSA_LOOP_CNT_MAX,
-};
-
-struct dsa_loop_port {
- struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
- u16 pvid;
- int mtu;
-};
-
-struct dsa_loop_priv {
- struct mii_bus *bus;
- unsigned int port_base;
- struct dsa_loop_vlan vlans[VLAN_N_VID];
- struct net_device *netdev;
- struct dsa_loop_port ports[DSA_MAX_PORTS];
-};
-
-#endif /* DSA_LOOP_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index fa32f2aca22f..deba46b3ee25 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -541,17 +541,6 @@ struct mem_ctl_info {
struct csrow_info **csrows;
unsigned int nr_csrows, num_cschannel;
- /*
- * Memory Controller hierarchy
- *
- * There are basically two types of memory controller: the ones that
- * sees memory sticks ("dimms"), and the ones that sees memory ranks.
- * All old memory controllers enumerate memories per rank, but most
- * of the recent drivers enumerate memories per DIMM, instead.
- * When the memory controller is per rank, csbased is true.
- */
- unsigned int n_layers;
- struct edac_mc_layer *layers;
bool csbased;
/*
@@ -609,6 +598,18 @@ struct mem_ctl_info {
u8 fake_inject_layer[EDAC_MAX_LAYERS];
bool fake_inject_ue;
u16 fake_inject_count;
+
+ /*
+ * Memory Controller hierarchy
+ *
+ * There are basically two types of memory controller: the ones that
+ * sees memory sticks ("dimms"), and the ones that sees memory ranks.
+ * All old memory controllers enumerate memories per rank, but most
+ * of the recent drivers enumerate memories per DIMM, instead.
+ * When the memory controller is per rank, csbased is true.
+ */
+ unsigned int n_layers;
+ struct edac_mc_layer layers[] __counted_by(n_layers);
};
#define mci_for_each_dimm(mci, dimm) \
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 664898d09ff5..72e76ec54641 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -832,27 +832,6 @@ extern int __init parse_efi_signature_list(
const void *data, size_t size,
efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *));
-/**
- * efi_range_is_wc - check the WC bit on an address range
- * @start: starting kvirt address
- * @len: length of range
- *
- * Consult the EFI memory map and make sure it's ok to set this range WC.
- * Returns true or false.
- */
-static inline int efi_range_is_wc(unsigned long start, unsigned long len)
-{
- unsigned long i;
-
- for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) {
- unsigned long paddr = __pa(start + i);
- if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC))
- return 0;
- }
- /* The range checked out */
- return 1;
-}
-
/*
* We play games with efi_enabled so that the compiler will, if
* possible, remove EFI-related code altogether.
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index e7497f804644..c909a8ba22e8 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -248,7 +248,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
struct em_perf_state *ps;
int i;
- WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
+ lockdep_assert(rcu_read_lock_any_held());
if (!sum_util)
return 0;
@@ -267,7 +267,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* Find the lowest performance state of the Energy Model above the
* requested performance.
*/
- em_table = rcu_dereference(pd->em_table);
+ em_table = rcu_dereference_all(pd->em_table);
i = em_pd_get_efficient_state(em_table->state, pd, max_util);
ps = &em_table->state[i];
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index f83ca0abf2cd..e04d67e999a1 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -48,6 +48,7 @@
/**
* arch_ptrace_report_syscall_entry - Architecture specific ptrace_report_syscall_entry() wrapper
+ * @regs: Pointer to the register state at syscall entry
*
* Invoked from syscall_trace_enter() to wrap ptrace_report_syscall_entry().
*
@@ -205,6 +206,8 @@ static __always_inline bool report_single_step(unsigned long work)
/**
* arch_ptrace_report_syscall_exit - Architecture specific ptrace_report_syscall_exit()
+ * @regs: Pointer to the register state at syscall exit
+ * @step: Indicates a single-step exit rather than a normal syscall exit
*
* This allows architecture specific ptrace_report_syscall_exit()
* implementations. If not defined by the architecture this falls back to
@@ -321,7 +324,7 @@ static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
syscall_exit_to_user_mode_work(regs);
- local_irq_disable_exit_to_user();
+ local_irq_disable();
syscall_exit_to_user_mode_prepare(regs);
instrumentation_end();
exit_to_user_mode();
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 798abec67a1b..1cb0740ba331 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -176,6 +176,8 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* struct ethtool_rxfh_context - a custom RSS context configuration
* @indir_size: Number of u32 entries in indirection table
* @key_size: Size of hash key, in bytes
+ * @indir_user_size: number of user provided entries for the
+ * indirection table
* @priv_size: Size of driver private data, in bytes
* @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_*
* @input_xfrm: Defines how the input data is transformed. Valid values are one
@@ -186,6 +188,7 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
struct ethtool_rxfh_context {
u32 indir_size;
u32 key_size;
+ u32 indir_user_size;
u16 priv_size;
u8 hfunc;
u8 input_xfrm;
@@ -214,6 +217,13 @@ static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
}
void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
+void ethtool_rxfh_indir_lost(struct net_device *dev);
+bool ethtool_rxfh_indir_can_resize(struct net_device *dev, const u32 *tbl,
+ u32 old_size, u32 new_size);
+void ethtool_rxfh_indir_resize(struct net_device *dev, u32 *tbl,
+ u32 old_size, u32 new_size);
+int ethtool_rxfh_ctxs_can_resize(struct net_device *dev, u32 new_indir_size);
+void ethtool_rxfh_ctxs_resize(struct net_device *dev, u32 new_indir_size);
struct link_mode_info {
int speed;
@@ -332,6 +342,8 @@ struct kernel_ethtool_coalesce {
u32 tx_aggr_max_bytes;
u32 tx_aggr_max_frames;
u32 tx_aggr_time_usecs;
+ u32 rx_cqe_frames;
+ u32 rx_cqe_nsecs;
};
/**
@@ -380,7 +392,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
#define ETHTOOL_COALESCE_RX_PROFILE BIT(27)
#define ETHTOOL_COALESCE_TX_PROFILE BIT(28)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0)
+#define ETHTOOL_COALESCE_RX_CQE_FRAMES BIT(29)
+#define ETHTOOL_COALESCE_RX_CQE_NSECS BIT(30)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(30, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -512,12 +526,14 @@ struct ethtool_eth_ctrl_stats {
*
* Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
* from the standard.
+ * @tx_pause_storm_events: TX pause storm event count (see ethtool.yaml).
*/
struct ethtool_pause_stats {
enum ethtool_mac_stats_src src;
struct_group(stats,
u64 tx_pause_frames;
u64 rx_pause_frames;
+ u64 tx_pause_storm_events;
);
};
@@ -1331,12 +1347,15 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
* @rss_ctx: XArray of custom RSS contexts
* @rss_lock: Protects entries in @rss_ctx. May be taken from
* within RTNL.
+ * @rss_indir_user_size: Number of user provided entries for the default
+ * (context 0) indirection table.
* @wol_enabled: Wake-on-LAN is enabled
* @module_fw_flash_in_progress: Module firmware flashing is in progress.
*/
struct ethtool_netdev_state {
struct xarray rss_ctx;
struct mutex rss_lock;
+ u32 rss_indir_user_size;
unsigned wol_enabled:1;
unsigned module_fw_flash_in_progress:1;
};
diff --git a/include/linux/evm.h b/include/linux/evm.h
index ddece4a6b25d..913f4573b203 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -18,6 +18,8 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
size_t xattr_value_len);
+int evm_fix_hmac(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len);
int evm_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, struct xattr *xattrs,
int *xattr_count);
@@ -51,6 +53,12 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
{
return INTEGRITY_UNKNOWN;
}
+
+static inline int evm_fix_hmac(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ return -EOPNOTSUPP;
+}
#endif
static inline int evm_inode_init_security(struct inode *inode, struct inode *dir,
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index d445705ac13c..726054614752 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -37,14 +37,14 @@
* section flag requires it. Use '%progbits' instead of '@progbits' since the
* former apparently works on all arches according to the binutils source.
*/
-#define __KSYMTAB(name, sym, sec, ns) \
+#define __KSYMTAB(name, sym, ns) \
asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1" "\n" \
"__kstrtab_" #name ":" "\n" \
" .asciz \"" #name "\"" "\n" \
"__kstrtabns_" #name ":" "\n" \
" .asciz \"" ns "\"" "\n" \
" .previous" "\n" \
- " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+ " .section \"___ksymtab+" #name "\", \"a\"" "\n" \
__KSYM_ALIGN "\n" \
"__ksymtab_" #name ":" "\n" \
__KSYM_REF(sym) "\n" \
@@ -59,14 +59,22 @@
#define KSYM_FUNC(name) name
#endif
-#define KSYMTAB_FUNC(name, sec, ns) __KSYMTAB(name, KSYM_FUNC(name), sec, ns)
-#define KSYMTAB_DATA(name, sec, ns) __KSYMTAB(name, name, sec, ns)
+#define KSYMTAB_FUNC(name, ns) __KSYMTAB(name, KSYM_FUNC(name), ns)
+#define KSYMTAB_DATA(name, ns) __KSYMTAB(name, name, ns)
-#define SYMBOL_CRC(sym, crc, sec) \
- asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
- ".balign 4" "\n" \
- "__crc_" #sym ":" "\n" \
- ".long " #crc "\n" \
- ".previous" "\n")
+#define SYMBOL_CRC(sym, crc) \
+ asm(" .section \"___kcrctab+" #sym "\",\"a\"" "\n" \
+ " .balign 4" "\n" \
+ "__crc_" #sym ":" "\n" \
+ " .long " #crc "\n" \
+ " .previous" "\n" \
+ )
+
+#define SYMBOL_FLAGS(sym, flags) \
+ asm(" .section \"___kflagstab+" #sym "\",\"a\"" "\n" \
+ "__flags_" #sym ":" "\n" \
+ " .byte " #flags "\n" \
+ " .previous" "\n" \
+ )
#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 6d4a58084fd5..5178a33c752c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -217,14 +217,12 @@ struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
bool sort_pagereflist; /* sort pagelist by offset */
- int open_count; /* number of opened files; protected by fb_info lock */
- struct mutex lock; /* mutex that protects the pageref list */
- struct list_head pagereflist; /* list of pagerefs for touched pages */
- struct address_space *mapping; /* page cache object for fb device */
/* callback */
struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
+
+struct fb_deferred_io_state;
#endif
/*
@@ -484,9 +482,8 @@ struct fb_info {
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
- unsigned long npagerefs;
- struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
+ struct fb_deferred_io_state *fbdefio_state;
#endif
const struct fb_ops *fbops;
@@ -605,9 +602,9 @@ extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
-extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
- u32 height, u32 shift_high, u32 shift_low, u32 mod);
-extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height);
+void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, const u8 *src, u32 idx, u32 height,
+ u32 shift_high, u32 shift_low, u32 mod);
+void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, const u8 *src, u32 s_pitch, u32 height);
extern void fb_set_suspend(struct fb_info *info, int state);
extern int fb_get_color_depth(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix);
@@ -633,8 +630,8 @@ static inline struct device *dev_of_fbinfo(const struct fb_info *info)
#endif
}
-static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
- u8 *src, u32 s_pitch, u32 height)
+static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, const u8 *src, u32 s_pitch,
+ u32 height)
{
u32 i, j;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 44d7ae95ddbc..1ec6d5ba64cc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -21,7 +21,6 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/sockptr.h>
-#include <crypto/sha1.h>
#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
@@ -1092,22 +1091,25 @@ bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
- enum skb_drop_reason *reason);
+enum skb_drop_reason
+sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
- enum skb_drop_reason ignore_reason;
+ enum skb_drop_reason drop_reason;
- return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
+ drop_reason = sk_filter_trim_cap(sk, skb, 1);
+ return drop_reason ? -EPERM : 0;
}
-static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason *reason)
+static inline enum skb_drop_reason
+sk_filter_reason(struct sock *sk, struct sk_buff *skb)
{
- return sk_filter_trim_cap(sk, skb, 1, reason);
+ return sk_filter_trim_cap(sk, skb, 1);
}
+struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp,
+ int *err);
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
@@ -1153,7 +1155,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
(void *)__bpf_call_base)
-struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
bool bpf_jit_inlines_helper_call(s32 imm);
@@ -1184,6 +1186,31 @@ static inline bool bpf_dump_raw_ok(const struct cred *cred)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+
+#ifdef CONFIG_BPF_SYSCALL
+struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
+ const struct bpf_insn *patch, u32 len);
+struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env);
+void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
+ struct bpf_insn_aux_data *orig_insn_aux);
+#else
+static inline struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
+ const struct bpf_insn *patch, u32 len)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env)
+{
+ return NULL;
+}
+
+static inline void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
+ struct bpf_insn_aux_data *orig_insn_aux)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
+
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
static inline bool xdp_return_frame_no_direct(void)
@@ -1310,9 +1337,14 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
-struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
+static inline bool bpf_prog_need_blind(const struct bpf_prog *prog)
+{
+ return prog->blinding_requested && !prog->blinded;
+}
+
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
@@ -1451,6 +1483,20 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
+static inline bool bpf_prog_need_blind(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog)
+{
+ return prog;
+}
+
+static inline void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
+{
+}
#endif /* CONFIG_BPF_JIT */
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
diff --git a/include/linux/find.h b/include/linux/find.h
index 9d720ad92bc1..6c2be8ca615d 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -22,8 +22,6 @@ extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long si
unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n);
-unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
- unsigned long size, unsigned long n);
unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size,
unsigned long n);
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
index 0ec1cdc5585d..4e3baa557068 100644
--- a/include/linux/firmware/cirrus/cs_dsp.h
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -179,6 +179,7 @@ struct cs_dsp {
bool booted;
bool running;
+ bool hibernating;
struct list_head ctl_list;
@@ -354,4 +355,6 @@ int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+void cs_dsp_hibernate(struct cs_dsp *dsp, bool hibernating);
+
#endif
diff --git a/include/linux/firmware/samsung/exynos-acpm-protocol.h b/include/linux/firmware/samsung/exynos-acpm-protocol.h
index 2091da965a5a..13f17dc4443b 100644
--- a/include/linux/firmware/samsung/exynos-acpm-protocol.h
+++ b/include/linux/firmware/samsung/exynos-acpm-protocol.h
@@ -14,30 +14,24 @@ struct acpm_handle;
struct device_node;
struct acpm_dvfs_ops {
- int (*set_rate)(const struct acpm_handle *handle,
- unsigned int acpm_chan_id, unsigned int clk_id,
- unsigned long rate);
- unsigned long (*get_rate)(const struct acpm_handle *handle,
+ int (*set_rate)(struct acpm_handle *handle, unsigned int acpm_chan_id,
+ unsigned int clk_id, unsigned long rate);
+ unsigned long (*get_rate)(struct acpm_handle *handle,
unsigned int acpm_chan_id,
unsigned int clk_id);
};
struct acpm_pmic_ops {
- int (*read_reg)(const struct acpm_handle *handle,
- unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
- u8 *buf);
- int (*bulk_read)(const struct acpm_handle *handle,
- unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
- u8 count, u8 *buf);
- int (*write_reg)(const struct acpm_handle *handle,
- unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
- u8 value);
- int (*bulk_write)(const struct acpm_handle *handle,
- unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
- u8 count, const u8 *buf);
- int (*update_reg)(const struct acpm_handle *handle,
- unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
- u8 value, u8 mask);
+ int (*read_reg)(struct acpm_handle *handle, unsigned int acpm_chan_id,
+ u8 type, u8 reg, u8 chan, u8 *buf);
+ int (*bulk_read)(struct acpm_handle *handle, unsigned int acpm_chan_id,
+ u8 type, u8 reg, u8 chan, u8 count, u8 *buf);
+ int (*write_reg)(struct acpm_handle *handle, unsigned int acpm_chan_id,
+ u8 type, u8 reg, u8 chan, u8 value);
+ int (*bulk_write)(struct acpm_handle *handle, unsigned int acpm_chan_id,
+ u8 type, u8 reg, u8 chan, u8 count, const u8 *buf);
+ int (*update_reg)(struct acpm_handle *handle, unsigned int acpm_chan_id,
+ u8 type, u8 reg, u8 chan, u8 value, u8 mask);
};
struct acpm_ops {
@@ -56,12 +50,12 @@ struct acpm_handle {
struct device;
#if IS_ENABLED(CONFIG_EXYNOS_ACPM_PROTOCOL)
-const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
- struct device_node *np);
+struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np);
#else
-static inline const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
- struct device_node *np)
+static inline struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
return NULL;
}
diff --git a/include/linux/firmware/thead/thead,th1520-aon.h b/include/linux/firmware/thead/thead,th1520-aon.h
index dae132b66873..d81f5f6f5b90 100644
--- a/include/linux/firmware/thead/thead,th1520-aon.h
+++ b/include/linux/firmware/thead/thead,th1520-aon.h
@@ -97,80 +97,6 @@ struct th1520_aon_rpc_ack_common {
#define RPC_GET_SVC_FLAG_ACK_TYPE(MESG) (((MESG)->svc & 0x40) >> 6)
#define RPC_SET_SVC_FLAG_ACK_TYPE(MESG, ACK) ((MESG)->svc |= (ACK) << 6)
-#define RPC_SET_BE64(MESG, OFFSET, SET_DATA) \
- do { \
- u8 *data = (u8 *)(MESG); \
- u64 _offset = (OFFSET); \
- u64 _set_data = (SET_DATA); \
- data[_offset + 7] = _set_data & 0xFF; \
- data[_offset + 6] = (_set_data & 0xFF00) >> 8; \
- data[_offset + 5] = (_set_data & 0xFF0000) >> 16; \
- data[_offset + 4] = (_set_data & 0xFF000000) >> 24; \
- data[_offset + 3] = (_set_data & 0xFF00000000) >> 32; \
- data[_offset + 2] = (_set_data & 0xFF0000000000) >> 40; \
- data[_offset + 1] = (_set_data & 0xFF000000000000) >> 48; \
- data[_offset + 0] = (_set_data & 0xFF00000000000000) >> 56; \
- } while (0)
-
-#define RPC_SET_BE32(MESG, OFFSET, SET_DATA) \
- do { \
- u8 *data = (u8 *)(MESG); \
- u64 _offset = (OFFSET); \
- u64 _set_data = (SET_DATA); \
- data[_offset + 3] = (_set_data) & 0xFF; \
- data[_offset + 2] = (_set_data & 0xFF00) >> 8; \
- data[_offset + 1] = (_set_data & 0xFF0000) >> 16; \
- data[_offset + 0] = (_set_data & 0xFF000000) >> 24; \
- } while (0)
-
-#define RPC_SET_BE16(MESG, OFFSET, SET_DATA) \
- do { \
- u8 *data = (u8 *)(MESG); \
- u64 _offset = (OFFSET); \
- u64 _set_data = (SET_DATA); \
- data[_offset + 1] = (_set_data) & 0xFF; \
- data[_offset + 0] = (_set_data & 0xFF00) >> 8; \
- } while (0)
-
-#define RPC_SET_U8(MESG, OFFSET, SET_DATA) \
- do { \
- u8 *data = (u8 *)(MESG); \
- data[OFFSET] = (SET_DATA) & 0xFF; \
- } while (0)
-
-#define RPC_GET_BE64(MESG, OFFSET, PTR) \
- do { \
- u8 *data = (u8 *)(MESG); \
- u64 _offset = (OFFSET); \
- *(u32 *)(PTR) = \
- (data[_offset + 7] | data[_offset + 6] << 8 | \
- data[_offset + 5] << 16 | data[_offset + 4] << 24 | \
- data[_offset + 3] << 32 | data[_offset + 2] << 40 | \
- data[_offset + 1] << 48 | data[_offset + 0] << 56); \
- } while (0)
-
-#define RPC_GET_BE32(MESG, OFFSET, PTR) \
- do { \
- u8 *data = (u8 *)(MESG); \
- u64 _offset = (OFFSET); \
- *(u32 *)(PTR) = \
- (data[_offset + 3] | data[_offset + 2] << 8 | \
- data[_offset + 1] << 16 | data[_offset + 0] << 24); \
- } while (0)
-
-#define RPC_GET_BE16(MESG, OFFSET, PTR) \
- do { \
- u8 *data = (u8 *)(MESG); \
- u64 _offset = (OFFSET); \
- *(u16 *)(PTR) = (data[_offset + 1] | data[_offset + 0] << 8); \
- } while (0)
-
-#define RPC_GET_U8(MESG, OFFSET, PTR) \
- do { \
- u8 *data = (u8 *)(MESG); \
- *(u8 *)(PTR) = (data[OFFSET]); \
- } while (0)
-
/*
* Defines for SC PM Power Mode
*/
diff --git a/include/linux/pagevec.h b/include/linux/folio_batch.h
index 63be5a451627..b45946adc50b 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/folio_batch.h
@@ -1,18 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * include/linux/pagevec.h
+ * include/linux/folio_batch.h
*
* In many places it is efficient to batch an operation up against multiple
* folios. A folio_batch is a container which is used for that.
*/
-#ifndef _LINUX_PAGEVEC_H
-#define _LINUX_PAGEVEC_H
+#ifndef _LINUX_FOLIO_BATCH_H
+#define _LINUX_FOLIO_BATCH_H
#include <linux/types.h>
/* 31 pointers + header align the folio_batch structure to a power of two */
-#define PAGEVEC_SIZE 31
+#define FOLIO_BATCH_SIZE 31
struct folio;
@@ -29,7 +29,7 @@ struct folio_batch {
unsigned char nr;
unsigned char i;
bool percpu_pvec_drained;
- struct folio *folios[PAGEVEC_SIZE];
+ struct folio *folios[FOLIO_BATCH_SIZE];
};
/**
@@ -58,7 +58,7 @@ static inline unsigned int folio_batch_count(const struct folio_batch *fbatch)
static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
{
- return PAGEVEC_SIZE - fbatch->nr;
+ return FOLIO_BATCH_SIZE - fbatch->nr;
}
/**
@@ -93,7 +93,7 @@ static inline struct folio *folio_batch_next(struct folio_batch *fbatch)
return fbatch->folios[fbatch->i++];
}
-void __folio_batch_release(struct folio_batch *pvec);
+void __folio_batch_release(struct folio_batch *fbatch);
static inline void folio_batch_release(struct folio_batch *fbatch)
{
@@ -102,4 +102,4 @@ static inline void folio_batch_release(struct folio_batch *fbatch)
}
void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
-#endif /* _LINUX_PAGEVEC_H */
+#endif /* _LINUX_FOLIO_BATCH_H */
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
index adab609c972e..f6d5f1f127c9 100644
--- a/include/linux/folio_queue.h
+++ b/include/linux/folio_queue.h
@@ -14,7 +14,7 @@
#ifndef _LINUX_FOLIO_QUEUE_H
#define _LINUX_FOLIO_QUEUE_H
-#include <linux/pagevec.h>
+#include <linux/folio_batch.h>
#include <linux/mm.h>
/*
@@ -29,12 +29,12 @@
*/
struct folio_queue {
struct folio_batch vec; /* Folios in the queue segment */
- u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ u8 orders[FOLIO_BATCH_SIZE]; /* Order of each folio */
struct folio_queue *next; /* Next queue segment or NULL */
struct folio_queue *prev; /* Previous queue segment of NULL */
unsigned long marks; /* 1-bit mark per folio */
unsigned long marks2; /* Second 1-bit mark per folio */
-#if PAGEVEC_SIZE > BITS_PER_LONG
+#if FOLIO_BATCH_SIZE > BITS_PER_LONG
#error marks is not big enough
#endif
unsigned int rreq_id;
@@ -70,7 +70,7 @@ static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
*/
static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
{
- return PAGEVEC_SIZE;
+ return FOLIO_BATCH_SIZE;
}
/**
diff --git a/include/linux/font.h b/include/linux/font.h
index fd8625cd76b2..6845f02d739a 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -11,47 +11,123 @@
#ifndef _VIDEO_FONT_H
#define _VIDEO_FONT_H
+#include <linux/math.h>
#include <linux/types.h>
+struct console_font;
+
+/*
+ * Glyphs
+ */
+
+/**
+ * font_glyph_pitch - Calculates the number of bytes per scanline
+ * @width: The glyph width in bits per scanline
+ *
+ * A glyph's pitch is the number of bytes in a single scanline, rounded
+ * up to the next full byte. The parameter @width receives the number
+ * of visible bits per scanline. For example, if width is 14 bytes per
+ * scanline, the pitch is 2 bytes per scanline. If width is 8 bits per
+ * scanline, the pitch is 1 byte per scanline.
+ *
+ * Returns:
+ * The number of bytes in a single scanline of the glyph
+ */
+static inline unsigned int font_glyph_pitch(unsigned int width)
+{
+ return DIV_ROUND_UP(width, 8);
+}
+
+/**
+ * font_glyph_size - Calculates the number of bytes per glyph
+ * @width: The glyph width in bits per scanline
+ * @vpitch: The number of scanlines in the glyph
+ *
+ * The number of bytes in a glyph depends on the pitch and the number
+ * of scanlines. font_glyph_size automatically calculates the pitch
+ * from the given width. The parameter @vpitch gives the number of
+ * scanlines, which is usually the glyph's height in scanlines. Fonts
+ * coming from user space can sometimes have a different vertical pitch
+ * with empty scanlines between two adjacent glyphs.
+ */
+static inline unsigned int font_glyph_size(unsigned int width, unsigned int vpitch)
+{
+ return font_glyph_pitch(width) * vpitch;
+}
+
+/*
+ * font_data_t and helpers
+ */
+
+/**
+ * font_data_t - Raw font data
+ *
+ * Values of type font_data_t store a pointer to raw font data. The format
+ * is monochrome. Each bit sets a pixel of a stored glyph. Font data does
+ * not store geometry information for the individual glyphs. Users of the
+ * font have to store glyph size, pitch and character count separately.
+ *
+ * Font data in font_data_t is not equivalent to raw u8. Each pointer stores
+ * an additional hidden header before the font data. The layout is
+ *
+ * +------+-----------------------------+
+ * | -16 | CRC32 Checksum (optional) |
+ * | -12 | <Unused> |
+ * | -8 | Number of data bytes |
+ * | -4 | Reference count |
+ * +------+-----------------------------+
+ * | 0 | Data buffer |
+ * | ... | |
+ * +------+-----------------------------+
+ *
+ * Use helpers to access font_data_t. Use font_data_buf() to get the stored data.
+ */
+typedef const unsigned char font_data_t;
+
+/**
+ * font_data_buf() - Returns the font data as raw bytes
+ * @fd: The font data
+ *
+ * Returns:
+ * The raw font data. The provided buffer is read-only.
+ */
+static inline const unsigned char *font_data_buf(font_data_t *fd)
+{
+ return (const unsigned char *)fd;
+}
+
+font_data_t *font_data_import(const struct console_font *font, unsigned int vpitch,
+ u32 (*calc_csum)(u32, const void *, size_t));
+void font_data_get(font_data_t *fd);
+bool font_data_put(font_data_t *fd);
+unsigned int font_data_size(font_data_t *fd);
+bool font_data_is_equal(font_data_t *lhs, font_data_t *rhs);
+int font_data_export(font_data_t *fd, struct console_font *font, unsigned int vpitch);
+
+/* font_rotate.c */
+void font_glyph_rotate_90(const unsigned char *glyph, unsigned int width, unsigned int height,
+ unsigned char *out);
+void font_glyph_rotate_180(const unsigned char *glyph, unsigned int width, unsigned int height,
+ unsigned char *out);
+void font_glyph_rotate_270(const unsigned char *glyph, unsigned int width, unsigned int height,
+ unsigned char *out);
+unsigned char *font_data_rotate(font_data_t *fd, unsigned int width, unsigned int height,
+ unsigned int charcount, unsigned int steps,
+ unsigned char *buf, size_t *bufsize);
+
+/*
+ * Font description
+ */
+
struct font_desc {
int idx;
const char *name;
unsigned int width, height;
unsigned int charcount;
- const void *data;
+ font_data_t *data;
int pref;
};
-#define VGA8x8_IDX 0
-#define VGA8x16_IDX 1
-#define PEARL8x8_IDX 2
-#define VGA6x11_IDX 3
-#define FONT7x14_IDX 4
-#define FONT10x18_IDX 5
-#define SUN8x16_IDX 6
-#define SUN12x22_IDX 7
-#define ACORN8x8_IDX 8
-#define MINI4x6_IDX 9
-#define FONT6x10_IDX 10
-#define TER16x32_IDX 11
-#define FONT6x8_IDX 12
-#define TER10x18_IDX 13
-
-extern const struct font_desc font_vga_8x8,
- font_vga_8x16,
- font_pearl_8x8,
- font_vga_6x11,
- font_7x14,
- font_10x18,
- font_sun_8x16,
- font_sun_12x22,
- font_acorn_8x8,
- font_mini_4x6,
- font_6x10,
- font_ter_16x32,
- font_6x8,
- font_ter_10x18;
-
/* Find a font with a specific name */
extern const struct font_desc *find_font(const char *name);
@@ -65,17 +141,23 @@ extern const struct font_desc *get_default_font(int xres, int yres,
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
-/* Extra word getters */
-#define REFCOUNT(fd) (((int *)(fd))[-1])
-#define FNTSIZE(fd) (((int *)(fd))[-2])
-#define FNTCHARCNT(fd) (((int *)(fd))[-3])
-#define FNTSUM(fd) (((int *)(fd))[-4])
-
-#define FONT_EXTRA_WORDS 4
+/*
+ * Built-in fonts
+ */
-struct font_data {
- unsigned int extra[FONT_EXTRA_WORDS];
- const unsigned char data[];
-} __packed;
+extern const struct font_desc font_10x18;
+extern const struct font_desc font_6x10;
+extern const struct font_desc font_6x8;
+extern const struct font_desc font_7x14;
+extern const struct font_desc font_acorn_8x8;
+extern const struct font_desc font_mini_4x6;
+extern const struct font_desc font_pearl_8x8;
+extern const struct font_desc font_sun_12x22;
+extern const struct font_desc font_sun_8x16;
+extern const struct font_desc font_ter_10x18;
+extern const struct font_desc font_ter_16x32;
+extern const struct font_desc font_vga_6x11;
+extern const struct font_desc font_vga_8x16;
+extern const struct font_desc font_vga_8x8;
#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8b3dd145b25e..e1d257e6da68 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,8 +55,6 @@ struct bdi_writeback;
struct bio;
struct io_comp_batch;
struct fiemap_extent_info;
-struct hd_geometry;
-struct iovec;
struct kiocb;
struct kobject;
struct pipe_inode_info;
@@ -445,6 +443,13 @@ struct address_space_operations {
extern const struct address_space_operations empty_aops;
+/* Structure for tracking metadata buffer heads associated with the mapping */
+struct mapping_metadata_bhs {
+ struct address_space *mapping; /* Mapping bhs are associated with */
+ spinlock_t lock; /* Lock protecting bh list */
+ struct list_head list; /* The list of bhs (b_assoc_buffers) */
+};
+
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
@@ -464,8 +469,6 @@ extern const struct address_space_operations empty_aops;
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
* @i_private_lock: For use by the owner of the address_space.
- * @i_private_list: For use by the owner of the address_space.
- * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
@@ -484,9 +487,7 @@ struct address_space {
unsigned long flags;
errseq_t wb_err;
spinlock_t i_private_lock;
- struct list_head i_private_list;
struct rw_semaphore i_mmap_rwsem;
- void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -783,7 +784,7 @@ struct inode {
#endif
/* Stat data, not accessed from path walking */
- unsigned long i_ino;
+ u64 i_ino;
/*
* Filesystems may only read i_nlink directly. They shall use the
* following functions for modification:
@@ -1917,7 +1918,6 @@ struct dir_context {
*/
#define COPY_FILE_SPLICE (1 << 0)
-struct iov_iter;
struct io_uring_cmd;
struct offset_ctx;
@@ -2058,16 +2058,24 @@ static inline bool can_mmap_file(struct file *file)
return true;
}
-int __compat_vma_mmap(const struct file_operations *f_op,
- struct file *file, struct vm_area_struct *vma);
+void compat_set_desc_from_vma(struct vm_area_desc *desc, const struct file *file,
+ const struct vm_area_struct *vma);
+int __compat_vma_mmap(struct vm_area_desc *desc, struct vm_area_struct *vma);
int compat_vma_mmap(struct file *file, struct vm_area_struct *vma);
+int __vma_check_mmap_hook(struct vm_area_struct *vma);
static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
{
+ int err;
+
if (file->f_op->mmap_prepare)
return compat_vma_mmap(file, vma);
- return file->f_op->mmap(file, vma);
+ err = file->f_op->mmap(file, vma);
+ if (err)
+ return err;
+
+ return __vma_check_mmap_hook(vma);
}
static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
@@ -2475,6 +2483,19 @@ struct file *dentry_create(struct path *path, int flags, umode_t mode,
const struct cred *cred);
const struct path *backing_file_user_path(const struct file *f);
+#ifdef CONFIG_SECURITY
+void *backing_file_security(const struct file *f);
+void backing_file_set_security(struct file *f, void *security);
+#else
+static inline void *backing_file_security(const struct file *f)
+{
+ return NULL;
+}
+static inline void backing_file_set_security(struct file *f, void *security)
+{
+}
+#endif /* CONFIG_SECURITY */
+
/*
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
* stored in ->vm_file is a backing file whose f_inode is on the underlying
@@ -2912,65 +2933,63 @@ static inline bool name_contains_dotdot(const char *name)
#include <linux/err.h>
/* needed for stackable file system support */
-extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
+loff_t default_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
+loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
-extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
+int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp);
static inline int inode_init_always(struct super_block *sb, struct inode *inode)
{
return inode_init_always_gfp(sb, inode, GFP_NOFS);
}
-extern void inode_init_once(struct inode *);
-extern void address_space_init_once(struct address_space *mapping);
-extern struct inode * igrab(struct inode *);
-extern ino_t iunique(struct super_block *, ino_t);
-extern int inode_needs_sync(struct inode *inode);
-extern int inode_just_drop(struct inode *inode);
+void inode_init_once(struct inode *inode);
+void address_space_init_once(struct address_space *mapping);
+struct inode *igrab(struct inode *inode);
+ino_t iunique(struct super_block *sb, ino_t max_reserved);
+int inode_needs_sync(struct inode *inode);
+int inode_just_drop(struct inode *inode);
static inline int inode_generic_drop(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
-extern void d_mark_dontcache(struct inode *inode);
+void d_mark_dontcache(struct inode *inode);
-extern struct inode *ilookup5_nowait(struct super_block *sb,
- unsigned long hashval, int (*test)(struct inode *, void *),
- void *data, bool *isnew);
-extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
- int (*test)(struct inode *, void *), void *data);
-extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+struct inode *ilookup5_nowait(struct super_block *sb, u64 hashval,
+ int (*test)(struct inode *, void *), void *data,
+ bool *isnew);
+struct inode *ilookup5(struct super_block *sb, u64 hashval,
+ int (*test)(struct inode *, void *), void *data);
+struct inode *ilookup(struct super_block *sb, u64 ino);
-extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
- int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *),
- void *data);
-struct inode *iget5_locked(struct super_block *, unsigned long,
+struct inode *inode_insert5(struct inode *inode, u64 hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data);
+struct inode *iget5_locked(struct super_block *sb, u64 hashval,
int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *), void *);
-struct inode *iget5_locked_rcu(struct super_block *, unsigned long,
+ int (*set)(struct inode *, void *), void *data);
+struct inode *iget5_locked_rcu(struct super_block *sb, u64 hashval,
int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *), void *);
-extern struct inode * iget_locked(struct super_block *, unsigned long);
-extern struct inode *find_inode_nowait(struct super_block *,
- unsigned long,
- int (*match)(struct inode *,
- unsigned long, void *),
- void *data);
-extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
- int (*)(struct inode *, void *), void *);
-extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
-extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
-extern int insert_inode_locked(struct inode *);
+ int (*set)(struct inode *, void *), void *data);
+struct inode *iget_locked(struct super_block *sb, u64 ino);
+struct inode *find_inode_nowait(struct super_block *sb, u64 hashval,
+ int (*match)(struct inode *, u64, void *),
+ void *data);
+struct inode *find_inode_rcu(struct super_block *sb, u64 hashval,
+ int (*test)(struct inode *, void *), void *data);
+struct inode *find_inode_by_ino_rcu(struct super_block *sb, u64 ino);
+int insert_inode_locked4(struct inode *inode, u64 hashval,
+ int (*test)(struct inode *, void *), void *data);
+int insert_inode_locked(struct inode *inode);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
+void lockdep_annotate_inode_mutex_key(struct inode *inode);
#else
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
-extern void unlock_new_inode(struct inode *);
-extern void discard_new_inode(struct inode *);
-extern unsigned int get_next_ino(void);
-extern void evict_inodes(struct super_block *sb);
+void unlock_new_inode(struct inode *inode);
+void discard_new_inode(struct inode *inode);
+unsigned int get_next_ino(void);
+void evict_inodes(struct super_block *sb);
void dump_mapping(const struct address_space *);
/*
@@ -3015,21 +3034,21 @@ int setattr_should_drop_sgid(struct mnt_idmap *idmap,
*/
#define alloc_inode_sb(_sb, _cache, _gfp) kmem_cache_alloc_lru(_cache, &_sb->s_inode_lru, _gfp)
-extern void __insert_inode_hash(struct inode *, unsigned long hashval);
+void __insert_inode_hash(struct inode *inode, u64 hashval);
static inline void insert_inode_hash(struct inode *inode)
{
__insert_inode_hash(inode, inode->i_ino);
}
-extern void __remove_inode_hash(struct inode *);
+void __remove_inode_hash(struct inode *inode);
static inline void remove_inode_hash(struct inode *inode)
{
if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}
-extern void inode_sb_list_add(struct inode *inode);
-extern void inode_lru_list_add(struct inode *inode);
+void inode_sb_list_add(struct inode *inode);
+void inode_lru_list_add(struct inode *inode);
int generic_file_mmap(struct file *, struct vm_area_struct *);
int generic_file_mmap_prepare(struct vm_area_desc *desc);
@@ -3295,8 +3314,8 @@ void simple_offset_destroy(struct offset_ctx *octx);
extern const struct file_operations simple_offset_dir_operations;
-extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
-extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
+extern int simple_fsync_noflush(struct file *, loff_t, loff_t, int);
+extern int simple_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 5e8a3b546033..98b83708f92b 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -27,8 +27,8 @@ typedef int fs_param_type(struct p_log *,
* The type of parameter expected.
*/
fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
- fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
- fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid,
+ fs_param_is_enum, fs_param_is_string, fs_param_is_blockdev,
+ fs_param_is_fd, fs_param_is_uid, fs_param_is_gid,
fs_param_is_file_or_string;
/*
@@ -84,8 +84,6 @@ extern int fs_lookup_param(struct fs_context *fc,
extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
-extern const struct constant_table bool_names[];
-
#ifdef CONFIG_VALIDATE_FS_PARSER
extern bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc);
@@ -127,9 +125,7 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
#define fsparam_string(NAME, OPT) \
__fsparam(fs_param_is_string, NAME, OPT, 0, NULL)
-#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0, NULL)
#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
-#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
#define fsparam_file_or_string(NAME, OPT) \
__fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL)
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 516aba5b858b..54712ec61ffb 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -450,8 +450,8 @@ u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
/* bio.c */
bool fscrypt_decrypt_bio(struct bio *bio);
-int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len);
+int fscrypt_zeroout_range(const struct inode *inode, loff_t pos,
+ sector_t sector, u64 len);
/* hooks.c */
int fscrypt_file_open(struct inode *inode, struct file *filp);
@@ -755,8 +755,8 @@ static inline bool fscrypt_decrypt_bio(struct bio *bio)
return true;
}
-static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len)
+static inline int fscrypt_zeroout_range(const struct inode *inode, loff_t pos,
+ sector_t sector, u64 len)
{
return -EOPNOTSUPP;
}
@@ -865,19 +865,11 @@ static inline void fscrypt_set_ops(struct super_block *sb,
bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
-void fscrypt_set_bio_crypt_ctx(struct bio *bio,
- const struct inode *inode, u64 first_lblk,
- gfp_t gfp_mask);
-
-void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
- const struct buffer_head *first_bh,
- gfp_t gfp_mask);
+void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
+ loff_t pos, gfp_t gfp_mask);
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
- u64 next_lblk);
-
-bool fscrypt_mergeable_bio_bh(struct bio *bio,
- const struct buffer_head *next_bh);
+ loff_t pos);
bool fscrypt_dio_supported(struct inode *inode);
@@ -892,22 +884,11 @@ static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
const struct inode *inode,
- u64 first_lblk, gfp_t gfp_mask) { }
-
-static inline void fscrypt_set_bio_crypt_ctx_bh(
- struct bio *bio,
- const struct buffer_head *first_bh,
- gfp_t gfp_mask) { }
+ loff_t pos, gfp_t gfp_mask) { }
static inline bool fscrypt_mergeable_bio(struct bio *bio,
const struct inode *inode,
- u64 next_lblk)
-{
- return true;
-}
-
-static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
- const struct buffer_head *next_bh)
+ loff_t pos)
{
return true;
}
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 897d6211c163..1da63f2d7040 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -178,9 +178,6 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
- * @driver_override: driver name to force a match; do not set directly,
- * because core frees it; use driver_set_override() to
- * set or clear it.
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -214,7 +211,6 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
- const char *driver_override;
};
#define to_fsl_mc_device(_dev) \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index c242fe49af4c..28b30c6f1031 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -31,7 +31,7 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
-#ifdef CONFIG_TRACING
+#ifdef CONFIG_TRACER_SNAPSHOT
extern void ftrace_boot_snapshot(void);
#else
static inline void ftrace_boot_snapshot(void) { }
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 097be89487bf..80b38fbf2121 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
#define _LINUX_FWNODE_H_
#include <linux/bits.h>
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -42,12 +43,12 @@ struct device;
* suppliers. Only enforce ordering with suppliers that have
* drivers.
*/
-#define FWNODE_FLAG_LINKS_ADDED BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE BIT(1)
-#define FWNODE_FLAG_INITIALIZED BIT(2)
-#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
-#define FWNODE_FLAG_BEST_EFFORT BIT(4)
-#define FWNODE_FLAG_VISITED BIT(5)
+#define FWNODE_FLAG_LINKS_ADDED 0
+#define FWNODE_FLAG_NOT_DEVICE 1
+#define FWNODE_FLAG_INITIALIZED 2
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD 3
+#define FWNODE_FLAG_BEST_EFFORT 4
+#define FWNODE_FLAG_VISITED 5
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -57,7 +58,7 @@ struct fwnode_handle {
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
- u8 flags;
+ unsigned long flags;
};
/*
@@ -212,16 +213,37 @@ static inline void fwnode_init(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&fwnode->suppliers);
}
+static inline void fwnode_set_flag(struct fwnode_handle *fwnode,
+ unsigned int bit)
+{
+ set_bit(bit, &fwnode->flags);
+}
+
+static inline void fwnode_clear_flag(struct fwnode_handle *fwnode,
+ unsigned int bit)
+{
+ clear_bit(bit, &fwnode->flags);
+}
+
+static inline void fwnode_assign_flag(struct fwnode_handle *fwnode,
+ unsigned int bit, bool value)
+{
+ assign_bit(bit, &fwnode->flags, value);
+}
+
+static inline bool fwnode_test_flag(struct fwnode_handle *fwnode,
+ unsigned int bit)
+{
+ return test_bit(bit, &fwnode->flags);
+}
+
static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
bool initialized)
{
if (IS_ERR_OR_NULL(fwnode))
return;
- if (initialized)
- fwnode->flags |= FWNODE_FLAG_INITIALIZED;
- else
- fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+ fwnode_assign_flag(fwnode, FWNODE_FLAG_INITIALIZED, initialized);
}
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h
index 6a9a1acb5aad..fc5d0b5edadc 100644
--- a/include/linux/generic_pt/common.h
+++ b/include/linux/generic_pt/common.h
@@ -175,6 +175,22 @@ enum {
PT_FEAT_VTDSS_FORCE_WRITEABLE,
};
+struct pt_riscv_32 {
+ struct pt_common common;
+};
+
+struct pt_riscv_64 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * Support the 64k contiguous page size following the Svnapot extension.
+ */
+ PT_FEAT_RISCV_SVNAPOT_64K = PT_FEAT_FMT_START,
+
+};
+
struct pt_x86_64 {
struct pt_common common;
};
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
index 9eefbb74efd0..dd0edd02a48a 100644
--- a/include/linux/generic_pt/iommu.h
+++ b/include/linux/generic_pt/iommu.h
@@ -66,6 +66,13 @@ struct pt_iommu {
struct device *iommu_device;
};
+static inline struct pt_iommu *iommupt_from_domain(struct iommu_domain *domain)
+{
+ if (!IS_ENABLED(CONFIG_IOMMU_PT) || !domain->is_iommupt)
+ return NULL;
+ return container_of(domain, struct pt_iommu, domain);
+}
+
/**
* struct pt_iommu_info - Details about the IOMMU page table
*
@@ -81,6 +88,56 @@ struct pt_iommu_info {
struct pt_iommu_ops {
/**
+ * @map_range: Install translation for an IOVA range
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @paddr: Physical/Output address to start
+ * @len: Length of the range starting from @iova
+ * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
+ * @gfp: GFP flags for any memory allocations
+ *
+ * The range starting at IOVA will have paddr installed into it. The
+ * rage is automatically segmented into optimally sized table entries,
+ * and can have any valid alignment.
+ *
+ * On error the caller will probably want to invoke unmap on the range
+ * from iova up to the amount indicated by @mapped to return the table
+ * back to an unchanged state.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA
+ * that were mapped are added to @mapped, @mapped is not zerod first.
+ */
+ int (*map_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ phys_addr_t paddr, dma_addr_t len, unsigned int prot,
+ gfp_t gfp, size_t *mapped);
+
+ /**
+ * @unmap_range: Make a range of IOVA empty/not present
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @len: Length of the range starting from @iova
+ * @iotlb_gather: Gather struct that must be flushed on return
+ *
+ * unmap_range() will remove a translation created by map_range(). It
+ * cannot subdivide a mapping created by map_range(), so it should be
+ * called with IOVA ranges that match those passed to map_pages. The
+ * IOVA range can aggregate contiguous map_range() calls so long as no
+ * individual range is split.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: Number of bytes of VA unmapped. iova + res will be the
+ * point unmapping stopped.
+ */
+ size_t (*unmap_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ dma_addr_t len,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ /**
* @set_dirty: Make the iova write dirty
* @iommu_table: Table to manipulate
* @iova: IO virtual address to start
@@ -194,14 +251,6 @@ struct pt_iommu_cfg {
#define IOMMU_PROTOTYPES(fmt) \
phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
dma_addr_t iova); \
- int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \
- unsigned long iova, phys_addr_t paddr, \
- size_t pgsize, size_t pgcount, \
- int prot, gfp_t gfp, size_t *mapped); \
- size_t pt_iommu_##fmt##_unmap_pages( \
- struct iommu_domain *domain, unsigned long iova, \
- size_t pgsize, size_t pgcount, \
- struct iommu_iotlb_gather *iotlb_gather); \
int pt_iommu_##fmt##_read_and_clear_dirty( \
struct iommu_domain *domain, unsigned long iova, size_t size, \
unsigned long flags, struct iommu_dirty_bitmap *dirty); \
@@ -222,9 +271,7 @@ struct pt_iommu_cfg {
* iommu_pt
*/
#define IOMMU_PT_DOMAIN_OPS(fmt) \
- .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
- .map_pages = &pt_iommu_##fmt##_map_pages, \
- .unmap_pages = &pt_iommu_##fmt##_unmap_pages
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys
#define IOMMU_PT_DIRTY_OPS(fmt) \
.read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
@@ -275,6 +322,17 @@ struct pt_iommu_vtdss_hw_info {
IOMMU_FORMAT(vtdss, vtdss_pt);
+struct pt_iommu_riscv_64_cfg {
+ struct pt_iommu_cfg common;
+};
+
+struct pt_iommu_riscv_64_hw_info {
+ u64 ppn;
+ u8 fsc_iosatp_mode;
+};
+
+IOMMU_FORMAT(riscv_64, riscv_64pt);
+
struct pt_iommu_x86_64_cfg {
struct pt_iommu_cfg common;
/* 4 is a 57 bit 5 level table */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index d4da060b7532..a7d36c9ea924 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -149,7 +149,8 @@ static int __ ## s_name ## _from_attrs(struct s_name *s, \
if (!tla) \
return -ENOMSG; \
DPRINT_TLA(#s_name, "<=-", #tag_name); \
- err = drbd_nla_parse_nested(ntb, maxtype, tla, s_name ## _nl_policy); \
+ err = nla_parse_nested_deprecated(ntb, maxtype, tla, \
+ s_name ## _nl_policy, NULL); \
if (err) \
return err; \
\
@@ -292,6 +293,10 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#endif
.maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
.policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy),
+#ifdef GENL_MAGIC_FAMILY_PRE_DOIT
+ .pre_doit = GENL_MAGIC_FAMILY_PRE_DOIT,
+ .post_doit = GENL_MAGIC_FAMILY_POST_DOIT,
+#endif
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index 621b87a87d74..2200cedd160a 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -26,16 +26,6 @@ extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
*/
/*
- * @DRBD_GENLA_F_MANDATORY: By default, netlink ignores attributes it does not
- * know about. This flag can be set in nlattr->nla_type to indicate that this
- * attribute must not be ignored.
- *
- * We check and remove this flag in drbd_nla_check_mandatory() before
- * validating the attribute types and lengths via nla_parse_nested().
- */
-#define DRBD_GENLA_F_MANDATORY (1 << 14)
-
-/*
* Flags specific to drbd and not visible at the netlink layer, used in
* <struct>_from_attrs and <struct>_to_skb:
*
@@ -52,7 +42,6 @@ extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
#define DRBD_F_SENSITIVE (1 << 1)
#define DRBD_F_INVARIANT (1 << 2)
-#define __nla_type(x) ((__u16)((x) & NLA_TYPE_MASK & ~DRBD_GENLA_F_MANDATORY))
/* }}}1
* MAGIC
@@ -158,12 +147,12 @@ enum { \
#undef __field
#define __field(attr_nr, attr_flag, name, nla_type, type, \
__get, __put, __is_signed) \
- T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)),
+ T_ ## name = (__u16)(attr_nr),
#undef __array
#define __array(attr_nr, attr_flag, name, nla_type, type, \
maxlen, __get, __put, __is_signed) \
- T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)),
+ T_ ## name = (__u16)(attr_nr),
#include GENL_MAGIC_INCLUDE_FILE
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 0d8408582918..3efb5cb1e1d1 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -6,6 +6,8 @@
#include <linux/err.h>
#include <linux/types.h>
+#include "defs.h"
+
struct acpi_device;
struct device;
struct fwnode_handle;
diff --git a/include/linux/gpio/defs.h b/include/linux/gpio/defs.h
new file mode 100644
index 000000000000..b69fd7c041b2
--- /dev/null
+++ b/include/linux/gpio/defs.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_GPIO_DEFS_H
+#define __LINUX_GPIO_DEFS_H
+
+#define GPIO_LINE_DIRECTION_IN 1
+#define GPIO_LINE_DIRECTION_OUT 0
+
+#endif /* __LINUX_GPIO_DEFS_H */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index fabe2baf7b50..17511434ed07 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,8 @@
#include <asm/msi.h>
#endif
+#include "defs.h"
+
struct device;
struct irq_chip;
struct irq_data;
@@ -42,9 +44,6 @@ union gpio_irq_fwspec {
#endif
};
-#define GPIO_LINE_DIRECTION_IN 1
-#define GPIO_LINE_DIRECTION_OUT 0
-
/**
* struct gpio_irq_chip - GPIO interrupt controller
*/
@@ -344,11 +343,17 @@ struct gpio_irq_chip {
* @direction_output: configures signal "offset" as output, returns 0 on
* success or a negative error number. This can be omitted on input-only
* or output-only gpio chips.
- * @get: returns value for signal "offset", 0=low, 1=high, or negative error
+ * @get: returns value for signal "offset", 0=low, 1=high, or negative error.
+ * the low and high values are defined as physical low on the line
+ * in/out to the connector such as a physical pad, pin or rail. The GPIO
+ * library has internal logic to handle lines that are active low, such
+ * as indicated by overstrike or #name in a schematic, and the driver
+ * should not try to second-guess the logic value of a line.
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
* @set: assigns output value for signal "offset", returns 0 on success or
- * negative error value
+ * negative error value. The output value follows the same semantic
+ * rules as for @get.
* @set_multiple: assigns output values for multiple signals defined by
* "mask", returns 0 on success or negative error value
* @set_config: optional hook for all kinds of settings. Uses the same
diff --git a/include/linux/gpio/generic.h b/include/linux/gpio/generic.h
index ff566dc9c3cb..de43c06c83ef 100644
--- a/include/linux/gpio/generic.h
+++ b/include/linux/gpio/generic.h
@@ -3,9 +3,15 @@
#ifndef __LINUX_GPIO_GENERIC_H
#define __LINUX_GPIO_GENERIC_H
+#include <linux/bits.h>
+#include <linux/bug.h>
#include <linux/cleanup.h>
-#include <linux/gpio/driver.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
struct device;
diff --git a/include/linux/gpio/gpio-nomadik.h b/include/linux/gpio/gpio-nomadik.h
index 592a774a53cd..8061b9826361 100644
--- a/include/linux/gpio/gpio-nomadik.h
+++ b/include/linux/gpio/gpio-nomadik.h
@@ -114,8 +114,7 @@ struct nmk_gpio_chip {
}
/**
- * enum prcm_gpiocr_reg_index
- * Used to reference an PRCM GPIOCR register address.
+ * enum prcm_gpiocr_reg_index - Used to reference a PRCM GPIOCR register address.
*/
enum prcm_gpiocr_reg_index {
PRCM_IDX_GPIOCR1,
@@ -123,8 +122,7 @@ enum prcm_gpiocr_reg_index {
PRCM_IDX_GPIOCR3
};
/**
- * enum prcm_gpiocr_altcx_index
- * Used to reference an Other alternate-C function.
+ * enum prcm_gpiocr_altcx_index - Used to reference an Other alternate-C function.
*/
enum prcm_gpiocr_altcx_index {
PRCM_IDX_GPIOCR_ALTC1,
@@ -135,7 +133,7 @@ enum prcm_gpiocr_altcx_index {
};
/**
- * struct prcm_gpio_altcx - Other alternate-C function
+ * struct prcm_gpiocr_altcx - Other alternate-C function
* @used: other alternate-C function availability
* @reg_index: PRCM GPIOCR register index used to control the function
* @control_bit: PRCM GPIOCR bit used to control the function
@@ -147,7 +145,7 @@ struct prcm_gpiocr_altcx {
} __packed;
/**
- * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
+ * struct prcm_gpiocr_altcx_pin_desc - Other alternate-C pin
* @pin: The pin number
* @altcx: array of other alternate-C[1-4] functions
*/
@@ -193,7 +191,7 @@ struct nmk_pingroup {
* numbering.
* @npins: The number of entries in @pins.
* @functions: The functions supported on this SoC.
- * @nfunction: The number of entries in @functions.
+ * @nfunctions: The number of entries in @functions.
* @groups: An array describing all pin groups the pin SoC supports.
* @ngroups: The number of entries in @groups.
* @altcx_pins: The pins that support Other alternate-C function on this SoC
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 44e5f162973e..5eb88f5d0630 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -46,23 +46,6 @@ struct gpiod_lookup_table {
struct gpiod_lookup table[];
};
-/**
- * struct gpiod_hog - GPIO line hog table
- * @chip_label: name of the chip the GPIO belongs to
- * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
- * @line_name: consumer name for the hogged line
- * @lflags: bitmask of gpio_lookup_flags GPIO_* values
- * @dflags: GPIO flags used to specify the direction and value
- */
-struct gpiod_hog {
- struct list_head list;
- const char *chip_label;
- u16 chip_hwnum;
- const char *line_name;
- unsigned long lflags;
- int dflags;
-};
-
/*
* Helper for lookup tables with just one single lookup for a device.
*/
@@ -95,24 +78,10 @@ static struct gpiod_lookup_table _name = { \
.flags = _flags, \
}
-/*
- * Simple definition of a single GPIO hog in an array.
- */
-#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
-(struct gpiod_hog) { \
- .chip_label = _chip_label, \
- .chip_hwnum = _chip_hwnum, \
- .line_name = _line_name, \
- .lflags = _lflags, \
- .dflags = _dflags, \
-}
-
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
-void gpiod_add_hogs(struct gpiod_hog *hogs);
-void gpiod_remove_hogs(struct gpiod_hog *hogs);
#else /* ! CONFIG_GPIOLIB */
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
@@ -120,8 +89,6 @@ static inline
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
-static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
-static inline void gpiod_remove_hogs(struct gpiod_hog *hogs) {}
#endif /* CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpu_buddy.h b/include/linux/gpu_buddy.h
new file mode 100644
index 000000000000..5fa917ba5450
--- /dev/null
+++ b/include/linux/gpu_buddy.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __GPU_BUDDY_H__
+#define __GPU_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/rbtree.h>
+#include <linux/rbtree_augmented.h>
+
+/**
+ * GPU_BUDDY_RANGE_ALLOCATION - Allocate within a specific address range
+ *
+ * When set, allocation is restricted to the range [start, end) specified
+ * in gpu_buddy_alloc_blocks(). Without this flag, start/end are ignored
+ * and allocation can use any free space.
+ */
+#define GPU_BUDDY_RANGE_ALLOCATION BIT(0)
+
+/**
+ * GPU_BUDDY_TOPDOWN_ALLOCATION - Allocate from top of address space
+ *
+ * Allocate starting from high addresses and working down. Useful for
+ * separating different allocation types (e.g., kernel vs userspace)
+ * to reduce fragmentation.
+ */
+#define GPU_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+
+/**
+ * GPU_BUDDY_CONTIGUOUS_ALLOCATION - Require physically contiguous blocks
+ *
+ * The allocation must be satisfied with a single contiguous block.
+ * If the requested size cannot be allocated contiguously, the
+ * allocation fails with -ENOSPC.
+ */
+#define GPU_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
+
+/**
+ * GPU_BUDDY_CLEAR_ALLOCATION - Prefer pre-cleared (zeroed) memory
+ *
+ * Attempt to allocate from the clear tree first. If insufficient clear
+ * memory is available, falls back to dirty memory. Useful when the
+ * caller needs zeroed memory and wants to avoid GPU clear operations.
+ */
+#define GPU_BUDDY_CLEAR_ALLOCATION BIT(3)
+
+/**
+ * GPU_BUDDY_CLEARED - Mark returned blocks as cleared
+ *
+ * Used with gpu_buddy_free_list() to indicate that the memory being
+ * freed has been cleared (zeroed). The blocks will be placed in the
+ * clear tree for future GPU_BUDDY_CLEAR_ALLOCATION requests.
+ */
+#define GPU_BUDDY_CLEARED BIT(4)
+
+/**
+ * GPU_BUDDY_TRIM_DISABLE - Disable automatic block trimming
+ *
+ * By default, if an allocation is smaller than the allocated block,
+ * excess memory is trimmed and returned to the free pool. This flag
+ * disables trimming, keeping the full power-of-two block size.
+ */
+#define GPU_BUDDY_TRIM_DISABLE BIT(5)
+
+enum gpu_buddy_free_tree {
+ GPU_BUDDY_CLEAR_TREE = 0,
+ GPU_BUDDY_DIRTY_TREE,
+ GPU_BUDDY_MAX_FREE_TREES,
+};
+
+#define for_each_free_tree(tree) \
+ for ((tree) = 0; (tree) < GPU_BUDDY_MAX_FREE_TREES; (tree)++)
+
+/**
+ * struct gpu_buddy_block - Block within a buddy allocator
+ *
+ * Each block in the buddy allocator is represented by this structure.
+ * Blocks are organized in a binary tree where each parent block can be
+ * split into two children (left and right buddies). The allocator manages
+ * blocks at various orders (power-of-2 sizes) from chunk_size up to the
+ * largest contiguous region.
+ *
+ * @private: Private data owned by the allocator user (e.g., driver-specific data)
+ * @link: List node for user ownership while block is allocated
+ */
+struct gpu_buddy_block {
+/* private: */
+ /*
+ * Header bit layout:
+ * - Bits 63:12: block offset within the address space
+ * - Bits 11:10: state (ALLOCATED, FREE, or SPLIT)
+ * - Bit 9: clear bit (1 if memory is zeroed)
+ * - Bits 8:6: reserved
+ * - Bits 5:0: order (log2 of size relative to chunk_size)
+ */
+#define GPU_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define GPU_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define GPU_BUDDY_ALLOCATED (1 << 10)
+#define GPU_BUDDY_FREE (2 << 10)
+#define GPU_BUDDY_SPLIT (3 << 10)
+#define GPU_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9)
+/* Free to be used, if needed in the future */
+#define GPU_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6)
+#define GPU_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
+ u64 header;
+
+ struct gpu_buddy_block *left;
+ struct gpu_buddy_block *right;
+ struct gpu_buddy_block *parent;
+/* public: */
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through gpu_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * gpu_buddy_free* ownership is given back to the mm.
+ */
+ union {
+/* private: */
+ struct rb_node rb;
+/* public: */
+ struct list_head link;
+ };
+/* private: */
+ struct list_head tmp_link;
+ unsigned int subtree_max_alignment;
+};
+
+/* Order-zero must be at least SZ_4K */
+#define GPU_BUDDY_MAX_ORDER (63 - 12)
+
+/**
+ * struct gpu_buddy - GPU binary buddy allocator
+ *
+ * The buddy allocator provides efficient power-of-two memory allocation
+ * with fast allocation and free operations. It is commonly used for GPU
+ * memory management where allocations can be split into power-of-two
+ * block sizes.
+ *
+ * Locking should be handled by the user; a simple mutex around
+ * gpu_buddy_alloc_blocks() and gpu_buddy_free_block()/gpu_buddy_free_list()
+ * should suffice.
+ *
+ * @n_roots: Number of root blocks in the roots array.
+ * @max_order: Maximum block order (log2 of largest block size / chunk_size).
+ * @chunk_size: Minimum allocation granularity in bytes. Must be at least SZ_4K.
+ * @size: Total size of the address space managed by this allocator in bytes.
+ * @avail: Total free space currently available for allocation in bytes.
+ * @clear_avail: Free space available in the clear tree (zeroed memory) in bytes.
+ * This is a subset of @avail.
+ */
+struct gpu_buddy {
+/* private: */
+ /*
+ * Array of red-black trees for free block management.
+ * Indexed as free_trees[clear/dirty][order] where:
+ * - Index 0 (GPU_BUDDY_CLEAR_TREE): blocks with zeroed content
+ * - Index 1 (GPU_BUDDY_DIRTY_TREE): blocks with unknown content
+ * Each tree holds free blocks of the corresponding order.
+ */
+ struct rb_root **free_trees;
+ /*
+ * Array of root blocks representing the top-level blocks of the
+ * binary tree(s). Multiple roots exist when the total size is not
+ * a power of two, with each root being the largest power-of-two
+ * that fits in the remaining space.
+ */
+ struct gpu_buddy_block **roots;
+/* public: */
+ unsigned int n_roots;
+ unsigned int max_order;
+ u64 chunk_size;
+ u64 size;
+ u64 avail;
+ u64 clear_avail;
+};
+
+static inline u64
+gpu_buddy_block_offset(const struct gpu_buddy_block *block)
+{
+ return block->header & GPU_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+gpu_buddy_block_order(struct gpu_buddy_block *block)
+{
+ return block->header & GPU_BUDDY_HEADER_ORDER;
+}
+
+static inline bool
+gpu_buddy_block_is_free(struct gpu_buddy_block *block)
+{
+ return (block->header & GPU_BUDDY_HEADER_STATE) == GPU_BUDDY_FREE;
+}
+
+static inline bool
+gpu_buddy_block_is_clear(struct gpu_buddy_block *block)
+{
+ return block->header & GPU_BUDDY_HEADER_CLEAR;
+}
+
+static inline u64
+gpu_buddy_block_size(struct gpu_buddy *mm,
+ struct gpu_buddy_block *block)
+{
+ return mm->chunk_size << gpu_buddy_block_order(block);
+}
+
+int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size);
+
+void gpu_buddy_fini(struct gpu_buddy *mm);
+
+int gpu_buddy_alloc_blocks(struct gpu_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags);
+
+int gpu_buddy_block_trim(struct gpu_buddy *mm,
+ u64 *start,
+ u64 new_size,
+ struct list_head *blocks);
+
+void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear);
+
+void gpu_buddy_free_block(struct gpu_buddy *mm, struct gpu_buddy_block *block);
+
+void gpu_buddy_free_list(struct gpu_buddy *mm,
+ struct list_head *objects,
+ unsigned int flags);
+
+void gpu_buddy_print(struct gpu_buddy *mm);
+void gpu_buddy_block_print(struct gpu_buddy *mm,
+ struct gpu_buddy_block *block);
+#endif
diff --git a/include/linux/hfs_common.h b/include/linux/hfs_common.h
index dadb5e0aa8a3..07dfc39630ab 100644
--- a/include/linux/hfs_common.h
+++ b/include/linux/hfs_common.h
@@ -166,6 +166,11 @@ struct hfsplus_attr_unistr {
hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
} __packed;
+enum {
+ HFS_REGULAR_NAME,
+ HFS_XATTR_NAME,
+};
+
struct hfs_extent {
__be16 block;
__be16 count;
@@ -510,7 +515,11 @@ struct hfs_btree_header_rec {
#define HFSPLUS_NODE_MXSZ 32768
#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
+#define HFSPLUS_BTREE_HDR_MAP_REC_INDEX 2 /* Map (bitmap) record in Header node */
+#define HFSPLUS_BTREE_MAP_NODE_REC_INDEX 0 /* Map record in Map Node */
#define HFSPLUS_BTREE_HDR_USER_BYTES 128
+#define HFSPLUS_BTREE_MAP_NODE_RECS_COUNT 2
+#define HFSPLUS_BTREE_MAP_NODE_RESERVED_BYTES 2
/* btree key type */
#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 31324609af4d..442a80d79e89 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -634,6 +634,38 @@ enum hid_battery_status {
HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
};
+/**
+ * struct hid_battery - represents a single battery power supply
+ * @dev: pointer to the parent hid_device
+ * @ps: the power supply instance
+ * @min: minimum battery value from HID descriptor
+ * @max: maximum battery value from HID descriptor
+ * @report_type: HID report type (input/feature)
+ * @report_id: HID report ID for this battery
+ * @charge_status: current charging status
+ * @status: battery reporting status
+ * @capacity: current battery capacity (0-100)
+ * @avoid_query: if true, avoid querying battery (e.g., for stylus)
+ * @present: if true, battery is present (may be dynamic)
+ * @ratelimit_time: rate limiting for battery reports
+ * @list: list node for linking into hid_device's battery list
+ */
+struct hid_battery {
+ struct hid_device *dev;
+ struct power_supply *ps;
+ __s32 min;
+ __s32 max;
+ __s32 report_type;
+ __s32 report_id;
+ __s32 charge_status;
+ enum hid_battery_status status;
+ __s32 capacity;
+ bool avoid_query;
+ bool present;
+ ktime_t ratelimit_time;
+ struct list_head list;
+};
+
struct hid_driver;
struct hid_ll_driver;
@@ -670,20 +702,10 @@ struct hid_device {
#ifdef CONFIG_HID_BATTERY_STRENGTH
/*
* Power supply information for HID devices which report
- * battery strength. power_supply was successfully registered if
- * battery is non-NULL.
+ * battery strength. Each battery is tracked separately in the
+ * batteries list.
*/
- struct power_supply *battery;
- __s32 battery_capacity;
- __s32 battery_min;
- __s32 battery_max;
- __s32 battery_report_type;
- __s32 battery_report_id;
- __s32 battery_charge_status;
- enum hid_battery_status battery_status;
- bool battery_avoid_query;
- bool battery_present;
- ktime_t battery_ratelimit_time;
+ struct list_head batteries;
#endif
unsigned long status; /* see STAT flags above */
@@ -699,6 +721,7 @@ struct hid_device {
char name[128]; /* Device name */
char phys[64]; /* Device physical location */
char uniq[64]; /* Device unique identifier (serial #) */
+ u64 firmware_version; /* Firmware version */
void *driver_data;
@@ -744,6 +767,15 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
dev_set_drvdata(&hdev->dev, data);
}
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+static inline struct hid_battery *hid_get_battery(struct hid_device *hdev)
+{
+ if (list_empty(&hdev->batteries))
+ return NULL;
+ return list_first_entry(&hdev->batteries, struct hid_battery, list);
+}
+#endif
+
#define HID_GLOBAL_STACK_SIZE 4
#define HID_COLLECTION_STACK_SIZE 4
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 51a6dc2b97e9..a6268dc4f7cb 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -102,6 +102,12 @@
#define QM_MIG_REGION_SEL 0x100198
#define QM_MIG_REGION_EN BIT(0)
+#define QM_MAX_CHANNEL_NUM 8
+#define QM_CHANNEL_USAGE_OFFSET 0x1100
+#define QM_MAX_DEV_USAGE 100
+#define QM_DEV_USAGE_RATE 100
+#define QM_CHANNEL_ADDR_INTRVL 0x4
+
/* uacce mode of the driver */
#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
#define UACCE_MODE_SVA 1 /* use uacce sva mode */
@@ -359,6 +365,11 @@ struct qm_rsv_buf {
struct qm_dma qcdma;
};
+struct qm_channel {
+ int channel_num;
+ const char *channel_name[QM_MAX_CHANNEL_NUM];
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -433,6 +444,7 @@ struct hisi_qm {
struct qm_err_isolate isolate_data;
struct hisi_qm_cap_tables cap_tables;
+ struct qm_channel channel_data;
};
struct hisi_qp_status {
@@ -546,8 +558,6 @@ int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
-int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
-void hisi_qm_stop_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
void hisi_qm_debug_init(struct hisi_qm *qm);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 5e7a63143a4a..1f5f55917d1c 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -453,16 +453,6 @@ void host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client);
int host1x_client_resume(struct host1x_client *client);
-struct tegra_mipi_device;
-
-struct tegra_mipi_device *tegra_mipi_request(struct device *device,
- struct device_node *np);
-void tegra_mipi_free(struct tegra_mipi_device *device);
-int tegra_mipi_enable(struct tegra_mipi_device *device);
-int tegra_mipi_disable(struct tegra_mipi_device *device);
-int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
-int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
-
/* host1x memory contexts */
struct host1x_memory_context {
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 74adbd4e7003..9ced498fefaa 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -13,6 +13,7 @@
#define _LINUX_HRTIMER_H
#include <linux/hrtimer_defs.h>
+#include <linux/hrtimer_rearm.h>
#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -31,6 +32,13 @@
* soft irq context
* HRTIMER_MODE_HARD - Timer callback function will be executed in
* hard irq context even on PREEMPT_RT.
+ * HRTIMER_MODE_LAZY_REARM - Avoid reprogramming if the timer was the
+ * first expiring timer and is moved into the
+ * future. Special mode for the HRTICK timer to
+ * avoid extensive reprogramming of the hardware,
+ * which is expensive in virtual machines. Risks
+ * a pointless expiry, but that's better than
+ * reprogramming on every context switch,
*/
enum hrtimer_mode {
HRTIMER_MODE_ABS = 0x00,
@@ -38,6 +46,7 @@ enum hrtimer_mode {
HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_SOFT = 0x04,
HRTIMER_MODE_HARD = 0x08,
+ HRTIMER_MODE_LAZY_REARM = 0x10,
HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
@@ -55,33 +64,6 @@ enum hrtimer_mode {
HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
};
-/*
- * Values to track state of the timer
- *
- * Possible states:
- *
- * 0x00 inactive
- * 0x01 enqueued into rbtree
- *
- * The callback state is not part of the timer->state because clearing it would
- * mean touching the timer after the callback, this makes it impossible to free
- * the timer from the callback function.
- *
- * Therefore we track the callback state in:
- *
- * timer->base->cpu_base->running == timer
- *
- * On SMP it is possible to have a "callback function running and enqueued"
- * status. It happens for example when a posix timer expired and the callback
- * queued a signal. Between dropping the lock which protects the posix timer
- * and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer.
- *
- * All state transitions are protected by cpu_base->lock.
- */
-#define HRTIMER_STATE_INACTIVE 0x00
-#define HRTIMER_STATE_ENQUEUED 0x01
-
/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
@@ -134,11 +116,6 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
return timer->_softexpires;
}
-static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
-{
- return ktime_to_ns(timer->node.expires);
-}
-
ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
@@ -146,24 +123,23 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
}
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
- timer->base->cpu_base->hres_active : 0;
-}
-
#ifdef CONFIG_HIGH_RES_TIMERS
+extern unsigned int hrtimer_resolution;
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-extern unsigned int hrtimer_resolution;
+extern struct static_key_false hrtimer_highres_enabled_key;
-#else
+static inline bool hrtimer_highres_enabled(void)
+{
+ return static_branch_likely(&hrtimer_highres_enabled_key);
+}
+#else /* CONFIG_HIGH_RES_TIMERS */
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-
-#endif
+static inline bool hrtimer_highres_enabled(void) { return false; }
+#endif /* !CONFIG_HIGH_RES_TIMERS */
static inline ktime_t
__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
@@ -293,8 +269,8 @@ extern bool hrtimer_active(const struct hrtimer *timer);
*/
static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- /* The READ_ONCE pairs with the update functions of timer->state */
- return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
+ /* The READ_ONCE pairs with the update functions of timer->is_queued */
+ return READ_ONCE(timer->is_queued);
}
/*
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index 02b010df6570..52ed9e46ff13 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -19,21 +19,23 @@
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @seq: seqcount around __run_hrtimer
+ * @expires_next: Absolute time of the next event in this clock base
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
- struct hrtimer_cpu_base *cpu_base;
- unsigned int index;
- clockid_t clockid;
- seqcount_raw_spinlock_t seq;
- struct hrtimer *running;
- struct timerqueue_head active;
- ktime_t offset;
+ struct hrtimer_cpu_base *cpu_base;
+ const unsigned int index;
+ const clockid_t clockid;
+ seqcount_raw_spinlock_t seq;
+ ktime_t expires_next;
+ struct hrtimer *running;
+ struct timerqueue_linked_head active;
+ ktime_t offset;
} __hrtimer_clock_base_align;
-enum hrtimer_base_type {
+enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
@@ -42,37 +44,36 @@ enum hrtimer_base_type {
HRTIMER_BASE_REALTIME_SOFT,
HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
- HRTIMER_MAX_CLOCK_BASES,
+ HRTIMER_MAX_CLOCK_BASES
};
/**
* struct hrtimer_cpu_base - the per cpu clock bases
- * @lock: lock protecting the base and associated clock bases
- * and timers
- * @cpu: cpu number
- * @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set_seq: Sequence counter of clock was set events
- * @hres_active: State of high resolution mode
- * @in_hrtirq: hrtimer_interrupt() is currently executing
- * @hang_detected: The last hrtimer interrupt detected a hang
- * @softirq_activated: displays, if the softirq is raised - update of softirq
- * related settings is not required then.
- * @nr_events: Total number of hrtimer interrupt events
- * @nr_retries: Total number of hrtimer interrupt retries
- * @nr_hangs: Total number of hrtimer interrupt hangs
- * @max_hang_time: Maximum time spent in hrtimer_interrupt
- * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
- * expired
- * @online: CPU is online from an hrtimers point of view
- * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
- * callback to finish.
- * @expires_next: absolute time of the next event, is required for remote
- * hrtimer enqueue; it is the total first expiry time (hard
- * and soft hrtimer are taken into account)
- * @next_timer: Pointer to the first expiring timer
- * @softirq_expires_next: Time to check, if soft queues needs also to be expired
- * @softirq_next_timer: Pointer to the first expiring softirq based timer
- * @clock_base: array of clock bases for this cpu
+ * @lock: lock protecting the base and associated clock bases and timers
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @hres_active: State of high resolution mode
+ * @deferred_rearm: A deferred rearm is pending
+ * @deferred_needs_update: The deferred rearm must re-evaluate the first timer
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are expired
+ * @online: CPU is online from an hrtimers point of view
+ * @timer_waiters: A hrtimer_cancel() waiters for the timer callback to finish.
+ * @expires_next: Absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
+ * @deferred_expires_next: Cached expires next value for deferred rearm
+ * @clock_base: Array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
* Do not dereference the pointer because it is not reliable on
@@ -83,11 +84,12 @@ struct hrtimer_cpu_base {
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
- unsigned int hres_active : 1,
- in_hrtirq : 1,
- hang_detected : 1,
- softirq_activated : 1,
- online : 1;
+ bool hres_active;
+ bool deferred_rearm;
+ bool deferred_needs_update;
+ bool hang_detected;
+ bool softirq_activated;
+ bool online;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int nr_events;
unsigned short nr_retries;
@@ -102,6 +104,7 @@ struct hrtimer_cpu_base {
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
+ ktime_t deferred_expires_next;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
call_single_data_t csd;
} ____cacheline_aligned;
diff --git a/include/linux/hrtimer_rearm.h b/include/linux/hrtimer_rearm.h
new file mode 100644
index 000000000000..a6f2e5d5e1c7
--- /dev/null
+++ b/include/linux/hrtimer_rearm.h
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _LINUX_HRTIMER_REARM_H
+#define _LINUX_HRTIMER_REARM_H
+
+#ifdef CONFIG_HRTIMER_REARM_DEFERRED
+#include <linux/thread_info.h>
+
+void __hrtimer_rearm_deferred(void);
+
+/*
+ * This is purely CPU local, so check the TIF bit first to avoid the overhead of
+ * the atomic test_and_clear_bit() operation for the common case where the bit
+ * is not set.
+ */
+static __always_inline bool hrtimer_test_and_clear_rearm_deferred_tif(unsigned long tif_work)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (unlikely(tif_work & _TIF_HRTIMER_REARM)) {
+ clear_thread_flag(TIF_HRTIMER_REARM);
+ return true;
+ }
+ return false;
+}
+
+#define TIF_REARM_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_HRTIMER_REARM)
+
+/* Invoked from the exit to user before invoking exit_to_user_mode_loop() */
+static __always_inline bool
+hrtimer_rearm_deferred_user_irq(unsigned long *tif_work, const unsigned long tif_mask)
+{
+ /* Help the compiler to optimize the function out for syscall returns */
+ if (!(tif_mask & _TIF_HRTIMER_REARM))
+ return false;
+ /*
+ * Rearm the timer if none of the resched flags is set before going into
+ * the loop which re-enables interrupts.
+ */
+ if (unlikely((*tif_work & TIF_REARM_MASK) == _TIF_HRTIMER_REARM)) {
+ clear_thread_flag(TIF_HRTIMER_REARM);
+ __hrtimer_rearm_deferred();
+ /* Don't go into the loop if HRTIMER_REARM was the only flag */
+ *tif_work &= ~TIF_HRTIMER_REARM;
+ return !*tif_work;
+ }
+ return false;
+}
+
+/* Invoked from the time slice extension decision function */
+static __always_inline void hrtimer_rearm_deferred_tif(unsigned long tif_work)
+{
+ if (hrtimer_test_and_clear_rearm_deferred_tif(tif_work))
+ __hrtimer_rearm_deferred();
+}
+
+/*
+ * This is to be called on all irqentry_exit() paths that will enable
+ * interrupts.
+ */
+static __always_inline void hrtimer_rearm_deferred(void)
+{
+ hrtimer_rearm_deferred_tif(read_thread_flags());
+}
+
+/*
+ * Invoked from the scheduler on entry to __schedule() so it can defer
+ * rearming after the load balancing callbacks which might change hrtick.
+ */
+static __always_inline bool hrtimer_test_and_clear_rearm_deferred(void)
+{
+ return hrtimer_test_and_clear_rearm_deferred_tif(read_thread_flags());
+}
+
+#else /* CONFIG_HRTIMER_REARM_DEFERRED */
+static __always_inline void __hrtimer_rearm_deferred(void) { }
+static __always_inline void hrtimer_rearm_deferred(void) { }
+static __always_inline void hrtimer_rearm_deferred_tif(unsigned long tif_work) { }
+static __always_inline bool
+hrtimer_rearm_deferred_user_irq(unsigned long *tif_work, const unsigned long tif_mask) { return false; }
+static __always_inline bool hrtimer_test_and_clear_rearm_deferred(void) { return false; }
+#endif /* !CONFIG_HRTIMER_REARM_DEFERRED */
+
+#endif
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
index 8fbbb6bdf7a1..b5dacc8271a4 100644
--- a/include/linux/hrtimer_types.h
+++ b/include/linux/hrtimer_types.h
@@ -17,7 +17,7 @@ enum hrtimer_restart {
/**
* struct hrtimer - the basic hrtimer structure
- * @node: timerqueue node, which also manages node.expires,
+ * @node: Linked timerqueue node, which also manages node.expires,
* the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on
* which the timer is based. Is setup by adding
@@ -28,23 +28,26 @@ enum hrtimer_restart {
* was armed.
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
+ * @is_queued: Indicates whether a timer is enqueued or not
* @is_rel: Set if the timer was armed relative
* @is_soft: Set if hrtimer will be expired in soft interrupt context.
* @is_hard: Set if hrtimer will be expired in hard interrupt context
* even on RT.
+ * @is_lazy: Set if the timer is frequently rearmed to avoid updates
+ * of the clock event device
*
* The hrtimer structure must be initialized by hrtimer_setup()
*/
struct hrtimer {
- struct timerqueue_node node;
+ struct timerqueue_linked_node node;
+ struct hrtimer_clock_base *base;
+ bool is_queued;
+ bool is_rel;
+ bool is_soft;
+ bool is_hard;
+ bool is_lazy;
ktime_t _softexpires;
enum hrtimer_restart (*__private function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- u8 is_rel;
- u8 is_soft;
- u8 is_hard;
};
#endif /* _LINUX_HRTIMER_TYPES_H */
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 6ca92bff02c6..ea6bef9b6012 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -271,7 +271,7 @@ struct hsi_controller {
struct module *owner;
unsigned int id;
unsigned int num_ports;
- struct hsi_port **port;
+ struct hsi_port *port[] __counted_by(num_ports);
};
#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4d9f964dfde..2949e5acff35 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -27,8 +27,8 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
-int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr);
+bool zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr);
int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
unsigned long addr);
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
@@ -83,7 +83,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
* file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
* it. Same to PFNMAPs where there's neither page* nor pagecache.
*/
-#define THP_ORDERS_ALL_SPECIAL \
+#define THP_ORDERS_ALL_SPECIAL_DAX \
(BIT(PMD_ORDER) | BIT(PUD_ORDER))
#define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
@@ -92,7 +92,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
* Mask of all large folio orders supported for THP.
*/
#define THP_ORDERS_ALL \
- (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
enum tva_type {
TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */
@@ -771,6 +771,11 @@ static inline bool pmd_is_huge(pmd_t pmd)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool is_pmd_order(unsigned int order)
+{
+ return order == HPAGE_PMD_ORDER;
+}
+
static inline int split_folio_to_list_to_order(struct folio *folio,
struct list_head *list, int new_order)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 65910437be1c..93418625d3c5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -518,6 +518,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
struct hugetlbfs_inode_info {
struct inode vfs_inode;
+ struct resv_map *resv_map;
unsigned int seals;
};
@@ -777,10 +778,6 @@ static inline unsigned long huge_page_size(const struct hstate *h)
return (unsigned long)PAGE_SIZE << h->order;
}
-extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
-
-extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
-
static inline unsigned long huge_page_mask(struct hstate *h)
{
return h->mask;
@@ -796,6 +793,23 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT;
}
+/**
+ * hugetlb_linear_page_index() - linear_page_index() but in hugetlb
+ * page size granularity.
+ * @vma: the hugetlb VMA
+ * @address: the virtual address within the VMA
+ *
+ * Return: the page offset within the mapping in huge page units.
+ */
+static inline pgoff_t hugetlb_linear_page_index(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ struct hstate *h = hstate_vma(vma);
+
+ return ((address - vma->vm_start) >> huge_page_shift(h)) +
+ (vma->vm_pgoff >> huge_page_order(h));
+}
+
static inline bool order_is_gigantic(unsigned int order)
{
return order > MAX_PAGE_ORDER;
@@ -1177,16 +1191,6 @@ static inline unsigned long huge_page_mask(struct hstate *h)
return PAGE_MASK;
}
-static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
-{
- return PAGE_SIZE;
-}
-
-static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
-{
- return PAGE_SIZE;
-}
-
static inline unsigned int huge_page_order(struct hstate *h)
{
return 0;
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 593f5d4e108b..565b473fd135 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -13,7 +13,7 @@ static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
static inline bool is_vma_hugetlb_flags(const vma_flags_t *flags)
{
- return vma_flags_test(flags, VMA_HUGETLB_BIT);
+ return vma_flags_test_any(flags, VMA_HUGETLB_BIT);
}
#else
@@ -30,7 +30,7 @@ static inline bool is_vma_hugetlb_flags(const vma_flags_t *flags)
#endif
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(const struct vm_area_struct *vma)
{
return is_vm_hugetlb_flags(vma->vm_flags);
}
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index b77bc55a4cf3..1d3c1927986e 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -46,7 +46,7 @@ struct hwrng {
unsigned long priv;
unsigned short quality;
- /* internal. */
+ /* private: internal. */
struct list_head list;
struct kref ref;
struct work_struct cleanup_work;
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index f35b42e8c5de..74b91244fe0e 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -25,34 +25,6 @@ struct hwspinlock;
struct hwspinlock_device;
struct hwspinlock_ops;
-/**
- * struct hwspinlock_pdata - platform data for hwspinlock drivers
- * @base_id: base id for this hwspinlock device
- *
- * hwspinlock devices provide system-wide hardware locks that are used
- * by remote processors that have no other way to achieve synchronization.
- *
- * To achieve that, each physical lock must have a system-wide id number
- * that is agreed upon, otherwise remote processors can't possibly assume
- * they're using the same hardware lock.
- *
- * Usually boards have a single hwspinlock device, which provides several
- * hwspinlocks, and in this case, they can be trivially numbered 0 to
- * (num-of-locks - 1).
- *
- * In case boards have several hwspinlocks devices, a different base id
- * should be used for each hwspinlock device (they can't all use 0 as
- * a starting id!).
- *
- * This platform data structure should be used to provide the base id
- * for each device (which is trivially 0 when only a single hwspinlock
- * device exists). It can be shared between different platforms, hence
- * its location.
- */
-struct hwspinlock_pdata {
- int base_id;
-};
-
#ifdef CONFIG_HWSPINLOCK
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index dfc516c1c719..a26fb8e7cedf 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1015,8 +1015,8 @@ struct vmbus_channel {
/* The max size of a packet on this channel */
u32 max_pkt_size;
- /* function to mmap ring buffer memory to the channel's sysfs ring attribute */
- int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
+ /* function to mmap ring buffer memory to the channel's sysfs ring attribute */
+ int (*mmap_prepare_ring_buffer)(struct vmbus_channel *channel, struct vm_area_desc *desc);
/* boolean to control visibility of sysfs for ring buffer */
bool ring_sysfs_visible;
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index e3b3b0fa2a8f..2bd9f2157e6c 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -15,38 +15,13 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
-typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr,
- const struct inet6_skb_parm *parm);
void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr,
const struct inet6_skb_parm *parm);
-#if IS_BUILTIN(CONFIG_IPV6)
-static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct inet6_skb_parm *parm)
-{
- icmp6_send(skb, type, code, info, NULL, parm);
-}
-static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
-{
- BUILD_BUG_ON(fn != icmp6_send);
- return 0;
-}
-static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
-{
- BUILD_BUG_ON(fn != icmp6_send);
- return 0;
-}
-#else
-extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct inet6_skb_parm *parm);
-extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
-extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
-#endif
static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
{
- __icmpv6_send(skb, type, code, info, IP6CB(skb));
+ icmp6_send(skb, type, code, info, NULL, IP6CB(skb));
}
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
@@ -58,7 +33,7 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
{
struct inet6_skb_parm parm = { 0 };
- __icmpv6_send(skb_in, type, code, info, &parm);
+ icmp6_send(skb_in, type, code, info, NULL, &parm);
}
#endif
diff --git a/include/linux/ieee80211-eht.h b/include/linux/ieee80211-eht.h
index f8e9f5d36d2a..a97b1d01f3ac 100644
--- a/include/linux/ieee80211-eht.h
+++ b/include/linux/ieee80211-eht.h
@@ -251,8 +251,8 @@ struct ieee80211_eht_operation_info {
#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
-#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
-#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x20
#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
diff --git a/include/linux/ieee80211-ht.h b/include/linux/ieee80211-ht.h
index 21bbf470540f..7612b72f9c7c 100644
--- a/include/linux/ieee80211-ht.h
+++ b/include/linux/ieee80211-ht.h
@@ -281,6 +281,9 @@ enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
WLAN_ACTION_ADDBA_RESP = 1,
WLAN_ACTION_DELBA = 2,
+ WLAN_ACTION_NDP_ADDBA_REQ = 128,
+ WLAN_ACTION_NDP_ADDBA_RESP = 129,
+ WLAN_ACTION_NDP_DELBA = 130,
};
/* BACK (block-ack) parties */
diff --git a/include/linux/ieee80211-nan.h b/include/linux/ieee80211-nan.h
index d07959bf8a90..455033955e54 100644
--- a/include/linux/ieee80211-nan.h
+++ b/include/linux/ieee80211-nan.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2025 Intel Corporation
+ * Copyright (c) 2018 - 2026 Intel Corporation
*/
#ifndef LINUX_IEEE80211_NAN_H
@@ -23,6 +23,11 @@
#define NAN_OP_MODE_160MHZ 0x04
#define NAN_OP_MODE_PNDL_SUPPRTED 0x08
+#define NAN_DEV_CAPA_NUM_TX_ANT_POS 0
+#define NAN_DEV_CAPA_NUM_TX_ANT_MASK 0x0f
+#define NAN_DEV_CAPA_NUM_RX_ANT_POS 4
+#define NAN_DEV_CAPA_NUM_RX_ANT_MASK 0xf0
+
/* NAN Device capabilities, as defined in Wi-Fi Aware (TM) specification
* Table 79
*/
@@ -32,4 +37,41 @@
#define NAN_DEV_CAPA_NDPE_SUPPORTED 0x08
#define NAN_DEV_CAPA_S3_SUPPORTED 0x10
+/* NAN attributes, as defined in Wi-Fi Aware (TM) specification 4.0 Table 42 */
+#define NAN_ATTR_MASTER_INDICATION 0x00
+#define NAN_ATTR_CLUSTER_INFO 0x01
+
+struct ieee80211_nan_attr {
+ u8 attr;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+struct ieee80211_nan_master_indication {
+ u8 master_pref;
+ u8 random_factor;
+} __packed;
+
+struct ieee80211_nan_anchor_master_info {
+ union {
+ __le64 master_rank;
+ struct {
+ u8 master_addr[ETH_ALEN];
+ u8 random_factor;
+ u8 master_pref;
+ } __packed;
+ } __packed;
+ u8 hop_count;
+ __le32 ambtt;
+} __packed;
+
+#define for_each_nan_attr(_attr, _data, _datalen) \
+ for (_attr = (const struct ieee80211_nan_attr *)(_data); \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_attr >= \
+ (int)sizeof(*_attr) && \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_attr >= \
+ (int)sizeof(*_attr) + le16_to_cpu(_attr->length); \
+ _attr = (const struct ieee80211_nan_attr *) \
+ (_attr->data + le16_to_cpu(_attr->length)))
+
#endif /* LINUX_IEEE80211_NAN_H */
diff --git a/include/linux/ieee80211-uhr.h b/include/linux/ieee80211-uhr.h
index 132acced7d79..d199f3ebdba0 100644
--- a/include/linux/ieee80211-uhr.h
+++ b/include/linux/ieee80211-uhr.h
@@ -12,8 +12,8 @@
#define IEEE80211_UHR_OPER_PARAMS_DPS_ENA 0x0001
#define IEEE80211_UHR_OPER_PARAMS_NPCA_ENA 0x0002
-#define IEEE80211_UHR_OPER_PARAMS_DBE_ENA 0x0004
-#define IEEE80211_UHR_OPER_PARAMS_PEDCA_ENA 0x0008
+#define IEEE80211_UHR_OPER_PARAMS_PEDCA_ENA 0x0004
+#define IEEE80211_UHR_OPER_PARAMS_DBE_ENA 0x0008
struct ieee80211_uhr_operation {
__le16 params;
@@ -29,11 +29,216 @@ struct ieee80211_uhr_operation {
#define IEEE80211_UHR_NPCA_PARAMS_MOPLEN 0x00400000
#define IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES 0x00800000
+/**
+ * struct ieee80211_uhr_npca_info - npca operation information
+ *
+ * This structure is the "NPCA Operation Parameters field format" of "UHR
+ * Operation Element" fields as described in P802.11bn_D1.3
+ * subclause 9.4.2.353. See Figure 9-aa4.
+ *
+ * Refer to IEEE80211_UHR_NPCA*
+ * @params:
+ * NPCA Primary Channel - NPCA primary channel
+ * NPCA_Min Duration Threshold - Minimum duration of inter-BSS activity
+ * NPCA Switching Delay -
+ * Time needed by an NPCA AP to switch from the
+ * BSS primary channel to the NPCA primary channel
+ * in the unit of 4 µs.
+ * NPCA Switching Back Delay -
+ * Time to switch from the NPCA primary channel
+ * to the BSS primary channel in the unit of 4 µs.
+ * NPCA Initial QSRC -
+ * Initialize the EDCAF QSRC[AC] variables
+ * when an NPCA STA in the BSS
+ * switches to NPCA operation.
+ * NPCA MOPLEN -
+ * Indicates which conditions can be used to
+ * initiate an NPCA operation,
+ * 1 -> both PHYLEN NPCA operation and MOPLEN
+ * NPCA operation are
+ * permitted in the BSS
+ * 0 -> only PHYLEN NPCA operation is allowed in the BSS.
+ * NPCA Disabled Subchannel Bitmap Present -
+ * Indicates whether the NPCA Disabled Subchannel
+ * Bitmap field is present. A 1 in this field indicates that
+ * the NPCA Disabled Subchannel Bitmap field is present
+ * @dis_subch_bmap:
+ * A bit in the bitmap that lies within the BSS bandwidth is set
+ * to 1 to indicate that the corresponding 20 MHz subchannel is
+ * punctured and is set to 0 to indicate that the corresponding
+ * 20 MHz subchannel is not punctured. A bit in the bitmap that
+ * falls outside of the BSS bandwidth is reserved. This field is
+ * present when the value of the NPCA Disabled Subchannel Bitmap
+ * Field Present field is equal to 1, and not present, otherwise
+ */
struct ieee80211_uhr_npca_info {
__le32 params;
__le16 dis_subch_bmap[];
} __packed;
+#define IEEE80211_UHR_DPS_PADDING_DELAY 0x0000003F
+#define IEEE80211_UHR_DPS_TRANSITION_DELAY 0x00003F00
+#define IEEE80211_UHR_DPS_ICF_REQUIRED 0x00010000
+#define IEEE80211_UHR_DPS_PARAMETERIZED_FLAG 0x00020000
+#define IEEE80211_UHR_DPS_LC_MODE_BW 0x001C0000
+#define IEEE80211_UHR_DPS_LC_MODE_NSS 0x01E00000
+#define IEEE80211_UHR_DPS_LC_MODE_MCS 0x1E000000
+#define IEEE80211_UHR_DPS_MOBILE_AP_DPS_STATIC_HCM 0x20000000
+
+/**
+ * struct ieee80211_uhr_dps_info - DPS operation information
+ *
+ * This structure is the "DPS Operation Parameter field" of "UHR
+ * Operation Element" fields as described in P802.11bn_D1.3
+ * subclause 9.4.1.87. See Figure 9-207u.
+ *
+ * Refer to IEEE80211_UHR_DPS*
+ * @params:
+ * DPS Padding Delay -
+ * Indicates the minimum MAC padding
+ * duration that is required by a DPS STA
+ * in an ICF to cause the STA to transition
+ * from the lower capability mode to the
+ * higher capability mode. The DPS Padding
+ * Delay field is in units of 4 µs.
+ * DPS Transition Delay -
+ * Indicates the amount of time required by a
+ * DPS STA to transition from the higher
+ * capability mode to the lower capability
+ * mode. The DPS Transition Delay field is in
+ * units of 4 µs.
+ * ICF Required -
+ * Indicates when the DPS assisting STA needs
+ * to transmit an ICF frame to the peer DPS STA
+ * before performing the frame exchanges with
+ * the peer DPS STA in a TXOP.
+ * 1 -> indicates that the transmission of the
+ * ICF frame to the peer DPS STA prior to
+ * any frame exchange is needed.
+ * 0 -> ICF transmission before the frame
+ * exchanges with the peer DPS STA is only
+ * needed if the frame exchange is performed
+ * in the HC mode.
+ * Parameterized Flag -
+ * 0 -> indicates that only 20 MHz, 1 SS,
+ * non-HT PPDU format with the data
+ * rate of 6, 12, and 24 Mb/s as the
+ * default mode are supported by the
+ * DPS STA in the LC mode
+ * 1 -> indicates that a bandwidth up to the
+ * bandwidth indicated in the LC Mode
+ * Bandwidth field, a number of spatial
+ * streams up to the NSS indicated in
+ * the LC Mode Nss field, and an MCS up
+ * to the MCS indicated in the LC Mode
+ * MCS fields are supported by the DPS
+ * STA in the LC mode as the
+ * parameterized mode.
+ * LC Mode Bandwidth -
+ * Indicates the maximum bandwidth supported
+ * by the STA in the LC mode.
+ * LC Mode NSS -
+ * Indicates the maximum number of the spatial
+ * streams supported by the STA in the LC mode.
+ * LC Mode MCS -
+ * Indicates the highest MCS supported by the STA
+ * in the LC mode.
+ * Mobile AP DPS Static HCM -
+ * 1 -> indicates that it will remain in the DPS high
+ * capability mode until the next TBTT on that
+ * link.
+ * 0 -> otherwise.
+ */
+struct ieee80211_uhr_dps_info {
+ __le32 params;
+} __packed;
+
+#define IEEE80211_UHR_DBE_OPER_BANDWIDTH 0x07
+#define IEEE80211_UHR_DBE_OPER_DIS_SUBCHANNEL_BITMAP_PRES 0x08
+
+/**
+ * enum ieee80211_uhr_dbe_oper_bw - DBE Operational Bandwidth
+ *
+ * Encoding for the DBE Operational Bandwidth field in the UHR Operation
+ * element (DBE Operation Parameters).
+ *
+ * @IEEE80211_UHR_DBE_OPER_BW_40: 40 MHz operational DBE bandwidth
+ * @IEEE80211_UHR_DBE_OPER_BW_80: 80 MHz operational DBE bandwidth
+ * @IEEE80211_UHR_DBE_OPER_BW_160: 160 MHz operational DBE bandwidth
+ * @IEEE80211_UHR_DBE_OPER_BW_320_1: 320-1 MHz operational DBE bandwidth
+ * @IEEE80211_UHR_DBE_OPER_BW_320_2: 320-2 MHz operational DBE bandwidth
+ */
+enum ieee80211_uhr_dbe_oper_bw {
+ IEEE80211_UHR_DBE_OPER_BW_40 = 1,
+ IEEE80211_UHR_DBE_OPER_BW_80 = 2,
+ IEEE80211_UHR_DBE_OPER_BW_160 = 3,
+ IEEE80211_UHR_DBE_OPER_BW_320_1 = 4,
+ IEEE80211_UHR_DBE_OPER_BW_320_2 = 5,
+};
+
+/**
+ * struct ieee80211_uhr_dbe_info - DBE operation information
+ *
+ * This structure is the "DBE Operation Parameters field" of
+ * "UHR Operation Element" fields as described in P802.11bn_D1.3
+ * subclause 9.4.2.353. See Figure 9-aa6.
+ *
+ * Refer to IEEE80211_UHR_DBE_OPER*
+ * @params:
+ * B0-B2 - DBE Operational Bandwidth field, see
+ * "enum ieee80211_uhr_dbe_oper_bw" for values.
+ * Value 0 is reserved.
+ * Value 1 indicates 40 MHz operational DBE bandwidth.
+ * Value 2 indicates 80 MHz operational DBE bandwidth.
+ * Value 3 indicates 160 MHz operational DBE bandwidth.
+ * Value 4 indicates 320-1 MHz operational DBE bandwidth.
+ * Value 5 indicates 320-2 MHz operational DBE bandwidth.
+ * Values 6 to 7 are reserved.
+ * B3 - DBE Disabled Subchannel Bitmap Present.
+ * @dis_subch_bmap: DBE Disabled Subchannel Bitmap field is set to indicate
+ * disabled 20 MHz subchannels within the DBE Bandwidth.
+ */
+struct ieee80211_uhr_dbe_info {
+ u8 params;
+ __le16 dis_subch_bmap[];
+} __packed;
+
+#define IEEE80211_UHR_P_EDCA_ECWMIN 0x0F
+#define IEEE80211_UHR_P_EDCA_ECWMAX 0xF0
+#define IEEE80211_UHR_P_EDCA_AIFSN 0x000F
+#define IEEE80211_UHR_P_EDCA_CW_DS 0x0030
+#define IEEE80211_UHR_P_EDCA_PSRC_THRESHOLD 0x01C0
+#define IEEE80211_UHR_P_EDCA_QSRC_THRESHOLD 0x0600
+
+/**
+ * struct ieee80211_uhr_p_edca_info - P-EDCA operation information
+ *
+ * This structure is the "P-EDCA Operation Parameters field" of
+ * "UHR Operation Element" fields as described in P802.11bn_D1.3
+ * subclause 9.4.2.353. See Figure 9-aa5.
+ *
+ * Refer to IEEE80211_UHR_P_EDCA*
+ * @p_edca_ec: P-EDCA ECWmin and ECWmax.
+ * These fields indicate the CWmin and CWmax values used by a
+ * P-EDCA STA during P-EDCA contention.
+ * @params: AIFSN, CW DS, PSRC threshold, and QSRC threshold.
+ * - The AIFSN field indicates the AIFSN value used by a P-EDCA STA
+ * during P-EDCA contention.
+ * - The CW DS field indicates the value used for randomization of the
+ * transmission slot of the DS-CTS frame. The value 3 is reserved.
+ * The value 0 indicates that randomization is not enabled.
+ * - The P-EDCA PSRC threshold field indicates the maximum number of
+ * allowed consecutive DS-CTS transmissions. The value 0 and values
+ * greater than 4 are reserved.
+ * - The P-EDCA QSRC threshold field indicates the value of the
+ * QSRC[AC_VO] counter required to start P-EDCA contention. The
+ * value 0 is reserved.
+ */
+struct ieee80211_uhr_p_edca_info {
+ u8 p_edca_ec;
+ __le16 params;
+} __packed;
+
static inline bool ieee80211_uhr_oper_size_ok(const u8 *data, u8 len,
bool beacon)
{
@@ -47,19 +252,52 @@ static inline bool ieee80211_uhr_oper_size_ok(const u8 *data, u8 len,
if (beacon)
return true;
- /* FIXME: DPS, DBE, P-EDCA (consider order, also relative to NPCA) */
+ /* DPS Operation Parameters (fixed 4 bytes) */
+ if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_DPS_ENA)) {
+ needed += sizeof(struct ieee80211_uhr_dps_info);
+ if (len < needed)
+ return false;
+ }
+ /* NPCA Operation Parameters (fixed 4 bytes + optional 2 bytes) */
if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_NPCA_ENA)) {
const struct ieee80211_uhr_npca_info *npca =
- (const void *)oper->variable;
+ (const void *)(data + needed);
needed += sizeof(*npca);
-
if (len < needed)
return false;
- if (npca->params & cpu_to_le32(IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES))
+ if (npca->params &
+ cpu_to_le32(IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES)) {
needed += sizeof(npca->dis_subch_bmap[0]);
+ if (len < needed)
+ return false;
+ }
+ }
+
+ /* P-EDCA Operation Parameters (fixed 3 bytes) */
+ if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_PEDCA_ENA)) {
+ needed += sizeof(struct ieee80211_uhr_p_edca_info);
+ if (len < needed)
+ return false;
+ }
+
+ /* DBE Operation Parameters (fixed 1 byte + optional 2 bytes) */
+ if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_DBE_ENA)) {
+ const struct ieee80211_uhr_dbe_info *dbe =
+ (const void *)(data + needed);
+
+ needed += sizeof(*dbe);
+ if (len < needed)
+ return false;
+
+ if (dbe->params &
+ IEEE80211_UHR_DBE_OPER_DIS_SUBCHANNEL_BITMAP_PRES) {
+ needed += sizeof(dbe->dis_subch_bmap[0]);
+ if (len < needed)
+ return false;
+ }
}
return len >= needed;
@@ -72,12 +310,15 @@ static inline bool ieee80211_uhr_oper_size_ok(const u8 *data, u8 len,
static inline const struct ieee80211_uhr_npca_info *
ieee80211_uhr_npca_info(const struct ieee80211_uhr_operation *oper)
{
+ const u8 *pos = oper->variable;
+
if (!(oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_NPCA_ENA)))
return NULL;
- /* FIXME: DPS */
+ if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_DPS_ENA))
+ pos += sizeof(struct ieee80211_uhr_dps_info);
- return (const void *)oper->variable;
+ return (const void *)pos;
}
static inline const __le16 *
@@ -131,6 +372,24 @@ ieee80211_uhr_npca_dis_subch_bitmap(const struct ieee80211_uhr_operation *oper)
#define IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES 0x08
#define IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES 0x10
+/**
+ * enum ieee80211_uhr_dbe_max_supported_bw - DBE Maximum Supported Bandwidth
+ *
+ * As per spec P802.11bn_D1.3 "Table 9-bb5—Encoding of the DBE Maximum
+ * Supported Bandwidth field".
+ *
+ * @IEEE80211_UHR_DBE_MAX_BW_40: Indicates 40 MHz DBE max supported bw
+ * @IEEE80211_UHR_DBE_MAX_BW_80: Indicates 80 MHz DBE max supported bw
+ * @IEEE80211_UHR_DBE_MAX_BW_160: Indicates 160 MHz DBE max supported bw
+ * @IEEE80211_UHR_DBE_MAX_BW_320: Indicates 320 MHz DBE max supported bw
+ */
+enum ieee80211_uhr_dbe_max_supported_bw {
+ IEEE80211_UHR_DBE_MAX_BW_40 = 1,
+ IEEE80211_UHR_DBE_MAX_BW_80 = 2,
+ IEEE80211_UHR_DBE_MAX_BW_160 = 3,
+ IEEE80211_UHR_DBE_MAX_BW_320 = 4,
+};
+
struct ieee80211_uhr_cap_mac {
u8 mac_cap[5];
} __packed;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0aa2fb8f88de..23f9df9be837 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1046,31 +1046,28 @@ struct ieee80211_mgmt {
} __packed probe_resp;
struct {
u8 category;
+ u8 action_code;
union {
struct {
- u8 action_code;
u8 dialog_token;
u8 status_code;
u8 variable[];
} __packed wme_action;
struct{
- u8 action_code;
+ u8 no_fixed_fields[0];
u8 variable[];
} __packed chan_switch;
struct{
- u8 action_code;
struct ieee80211_ext_chansw_ie data;
u8 variable[];
} __packed ext_chan_switch;
struct{
- u8 action_code;
u8 dialog_token;
u8 element_id;
u8 length;
struct ieee80211_msrment_ie msr_elem;
} __packed measurement;
struct{
- u8 action_code;
u8 dialog_token;
__le16 capab;
__le16 timeout;
@@ -1079,7 +1076,6 @@ struct ieee80211_mgmt {
u8 variable[];
} __packed addba_req;
struct{
- u8 action_code;
u8 dialog_token;
__le16 status;
__le16 capab;
@@ -1088,54 +1084,45 @@ struct ieee80211_mgmt {
u8 variable[];
} __packed addba_resp;
struct{
- u8 action_code;
__le16 params;
__le16 reason_code;
} __packed delba;
struct {
- u8 action_code;
+ u8 no_fixed_fields[0];
u8 variable[];
} __packed self_prot;
struct{
- u8 action_code;
+ u8 no_fixed_fields[0];
u8 variable[];
} __packed mesh_action;
struct {
- u8 action;
u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
} __packed sa_query;
struct {
- u8 action;
u8 smps_control;
} __packed ht_smps;
struct {
- u8 action_code;
u8 chanwidth;
} __packed ht_notify_cw;
struct {
- u8 action_code;
u8 dialog_token;
__le16 capability;
u8 variable[];
} __packed tdls_discover_resp;
struct {
- u8 action_code;
u8 operating_mode;
} __packed vht_opmode_notif;
struct {
- u8 action_code;
u8 membership[WLAN_MEMBERSHIP_LEN];
u8 position[WLAN_USER_POSITION_LEN];
} __packed vht_group_notif;
struct {
- u8 action_code;
u8 dialog_token;
u8 tpc_elem_id;
u8 tpc_elem_length;
struct ieee80211_tpc_report_ie tpc;
} __packed tpc_report;
struct {
- u8 action_code;
u8 dialog_token;
u8 follow_up;
u8 tod[6];
@@ -1145,11 +1132,10 @@ struct ieee80211_mgmt {
u8 variable[];
} __packed ftm;
struct {
- u8 action_code;
+ u8 no_fixed_fields[0];
u8 variable[];
} __packed s1g;
struct {
- u8 action_code;
u8 dialog_token;
u8 follow_up;
u32 tod;
@@ -1158,41 +1144,37 @@ struct ieee80211_mgmt {
u8 max_toa_error;
} __packed wnm_timing_msr;
struct {
- u8 action_code;
u8 dialog_token;
u8 variable[];
} __packed ttlm_req;
struct {
- u8 action_code;
u8 dialog_token;
__le16 status_code;
u8 variable[];
} __packed ttlm_res;
struct {
- u8 action_code;
+ u8 no_fixed_fields[0];
+ /* no variable fields either */
} __packed ttlm_tear_down;
struct {
- u8 action_code;
u8 dialog_token;
u8 variable[];
} __packed ml_reconf_req;
struct {
- u8 action_code;
u8 dialog_token;
u8 count;
u8 variable[];
} __packed ml_reconf_resp;
struct {
- u8 action_code;
+ u8 no_fixed_fields[0];
u8 variable[];
} __packed epcs;
struct {
- u8 action_code;
u8 dialog_token;
u8 control;
u8 variable[];
} __packed eml_omn;
- } u;
+ };
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
@@ -1210,9 +1192,15 @@ struct ieee80211_mgmt {
#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_UHR_PHY
-/* mgmt header + 1 byte category code */
-#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
+#define IEEE80211_MIN_ACTION_SIZE(type) offsetofend(struct ieee80211_mgmt, u.action.type)
+/* Link Reconfiguration Status Duple field */
+struct ieee80211_ml_reconf_status {
+ u8 info;
+ __le16 status;
+} __packed;
+
+#define IEEE80211_ML_RECONF_LINK_ID_MASK 0xf
/* Management MIC information element (IEEE 802.11w) for CMAC */
struct ieee80211_mmie {
@@ -1358,6 +1346,7 @@ struct ieee80211_tdls_data {
#define WLAN_AUTH_FILS_SK 4
#define WLAN_AUTH_FILS_SK_PFS 5
#define WLAN_AUTH_FILS_PK 6
+#define WLAN_AUTH_IEEE8021X 8
#define WLAN_AUTH_EPPKE 9
#define WLAN_AUTH_LEAP 128
@@ -1500,6 +1489,8 @@ enum ieee80211_statuscode {
WLAN_STATUS_REJECT_DSE_BAND = 96,
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+ /* 802.11ah */
+ WLAN_STATUS_REJECTED_NDP_BLOCK_ACK_SUGGESTED = 109,
/* 802.11ai */
WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 112,
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 113,
@@ -1507,6 +1498,7 @@ enum ieee80211_statuscode {
WLAN_STATUS_SAE_PK = 127,
WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134,
+ WLAN_STATUS_8021X_AUTH_SUCCESS = 153,
};
@@ -1929,6 +1921,11 @@ enum ieee80211_radio_measurement_actioncode {
#define PMK_MAX_LEN 64
#define SAE_PASSWORD_MAX_LEN 128
+#define MICHAEL_MIC_LEN 8
+
+void michael_mic(const u8 *key, struct ieee80211_hdr *hdr,
+ const u8 *data, size_t data_len, u8 *mic);
+
/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_20_40_BSS_COEX = 0,
@@ -2248,6 +2245,7 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_TYPE_WFA_NAN 0x13
#define WLAN_OUI_TYPE_WFA_DPP 0x1A
#define WLAN_OUI_MICROSOFT 0x0050f2
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
@@ -2389,7 +2387,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
if (!ieee80211_is_action(fc))
return false;
- if (skb->len < offsetofend(typeof(*mgmt), u.action.u.ftm.action_code))
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE(action_code))
return true;
/* action frame - additionally check for non-bufferable FTM */
@@ -2398,8 +2396,8 @@ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
mgmt->u.action.category != WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
return true;
- if (mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_REQUEST ||
- mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_RESPONSE)
+ if (mgmt->u.action.action_code == WLAN_PUB_ACTION_FTM_REQUEST ||
+ mgmt->u.action.action_code == WLAN_PUB_ACTION_FTM_RESPONSE)
return false;
return true;
@@ -2449,7 +2447,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
{
- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE(category))
return false;
return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
}
@@ -2465,7 +2463,7 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
{
struct ieee80211_mgmt *mgmt = (void *)hdr;
- if (len < IEEE80211_MIN_ACTION_SIZE)
+ if (len < IEEE80211_MIN_ACTION_SIZE(category))
return false;
if (!ieee80211_is_action(hdr->frame_control))
return false;
@@ -2483,13 +2481,14 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
static inline bool
ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
u8 action;
if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
- skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ skb->len < IEEE80211_MIN_ACTION_SIZE(action_code))
return false;
- action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+ action = mgmt->u.action.action_code;
return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
@@ -2528,7 +2527,7 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
*/
static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
{
- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE(category))
return false;
return _ieee80211_is_group_privacy_action((void *)skb->data);
}
@@ -2624,8 +2623,7 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
if (!ieee80211_is_action(mgmt->frame_control))
return false;
- if (skb->len < IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.tpc_report))
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE(tpc_report))
return false;
/*
@@ -2644,12 +2642,11 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return false;
/* both spectrum mgmt and link measurement have same action code */
- if (mgmt->u.action.u.tpc_report.action_code !=
- WLAN_ACTION_SPCT_TPC_RPRT)
+ if (mgmt->u.action.action_code != WLAN_ACTION_SPCT_TPC_RPRT)
return false;
- if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT ||
- mgmt->u.action.u.tpc_report.tpc_elem_length !=
+ if (mgmt->u.action.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT ||
+ mgmt->u.action.tpc_report.tpc_elem_length !=
sizeof(struct ieee80211_tpc_report_ie))
return false;
@@ -2665,16 +2662,15 @@ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE(wnm_timing_msr))
return false;
if (!ieee80211_is_action(mgmt->frame_control))
return false;
if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED &&
- mgmt->u.action.u.wnm_timing_msr.action_code ==
- WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE &&
- skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr))
+ mgmt->u.action.action_code ==
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE)
return true;
return false;
@@ -2689,15 +2685,13 @@ static inline bool ieee80211_is_ftm(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
- if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE(ftm))
return false;
- if (mgmt->u.action.u.ftm.action_code ==
- WLAN_PUB_ACTION_FTM_RESPONSE &&
- skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm))
- return true;
+ if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ return false;
- return false;
+ return mgmt->u.action.action_code == WLAN_PUB_ACTION_FTM_RESPONSE;
}
struct element {
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index db45d6f1c4f4..594d6dc3f4c9 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -25,8 +25,6 @@ struct pppoe_opt {
struct net_device *dev; /* device associated with socket*/
int ifindex; /* ifindex of device associated with socket */
struct pppoe_addr pa; /* what this socket is bound to*/
- struct sockaddr_pppox relay; /* what socket data will be
- relayed to (PPPoE relaying) */
struct work_struct padt_work;/* Work item for handling PADT */
};
@@ -53,16 +51,10 @@ struct pppox_sock {
#define pppoe_dev proto.pppoe.dev
#define pppoe_ifindex proto.pppoe.ifindex
#define pppoe_pa proto.pppoe.pa
-#define pppoe_relay proto.pppoe.relay
static inline struct pppox_sock *pppox_sk(struct sock *sk)
{
- return (struct pppox_sock *)sk;
-}
-
-static inline struct sock *sk_pppox(struct pppox_sock *po)
-{
- return (struct sock *)po;
+ return container_of(sk, struct pppox_sock, sk);
}
struct module;
@@ -80,14 +72,11 @@ extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
-
/* PPPoX socket states */
enum {
PPPOX_NONE = 0, /* initial state */
PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */
PPPOX_BOUND = 2, /* bound to ppp device */
- PPPOX_RELAY = 4, /* forwarding is enabled */
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index ce97d891cf72..3d21e06fda67 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -27,10 +27,11 @@ struct team;
struct team_port {
struct net_device *dev;
- struct hlist_node hlist; /* node in enabled ports hash list */
+ struct hlist_node tx_hlist; /* node in tx-enabled ports hash list */
struct list_head list; /* node in ordinary list */
struct team *team;
- int index; /* index of enabled port. If disabled, it's set to -1 */
+ int tx_index; /* index of tx enabled port. If disabled, -1 */
+ bool rx_enabled;
bool linkup; /* either state.linkup or user.linkup */
@@ -75,14 +76,24 @@ static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
return rcu_dereference(dev->rx_handler_data);
}
+static inline bool team_port_rx_enabled(struct team_port *port)
+{
+ return READ_ONCE(port->rx_enabled);
+}
+
+static inline bool team_port_tx_enabled(struct team_port *port)
+{
+ return READ_ONCE(port->tx_index) != -1;
+}
+
static inline bool team_port_enabled(struct team_port *port)
{
- return port->index != -1;
+ return team_port_rx_enabled(port) && team_port_tx_enabled(port);
}
static inline bool team_port_txable(struct team_port *port)
{
- return port->linkup && team_port_enabled(port);
+ return port->linkup && team_port_tx_enabled(port);
}
static inline bool team_port_dev_txable(const struct net_device *port_dev)
@@ -121,8 +132,7 @@ struct team_mode_ops {
int (*port_enter)(struct team *team, struct team_port *port);
void (*port_leave)(struct team *team, struct team_port *port);
void (*port_change_dev_addr)(struct team *team, struct team_port *port);
- void (*port_enabled)(struct team *team, struct team_port *port);
- void (*port_disabled)(struct team *team, struct team_port *port);
+ void (*port_tx_disabled)(struct team *team, struct team_port *port);
};
extern int team_modeop_port_enter(struct team *team, struct team_port *port);
@@ -186,16 +196,16 @@ struct team_mode {
#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
struct team {
- struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
const struct header_ops *header_ops_cache;
/*
- * List of enabled ports and their count
+ * List of tx-enabled ports and counts of rx and tx-enabled ports.
*/
- int en_port_count;
- struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
+ int tx_en_port_count;
+ int rx_en_port_count;
+ struct hlist_head tx_en_port_hlist[TEAM_PORT_HASHENTRIES];
struct list_head port_list; /* list of all ports */
@@ -232,48 +242,50 @@ static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
skb->dev = port->dev;
- if (unlikely(netpoll_tx_running(team->dev))) {
+ if (unlikely(netpoll_tx_running(netdev_from_priv(team)))) {
team_netpoll_send_skb(port, skb);
return 0;
}
return dev_queue_xmit(skb);
}
-static inline struct hlist_head *team_port_index_hash(struct team *team,
- int port_index)
+static inline struct hlist_head *team_tx_port_index_hash(struct team *team,
+ int tx_port_index)
{
- return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+ unsigned int list_entry = tx_port_index & (TEAM_PORT_HASHENTRIES - 1);
+
+ return &team->tx_en_port_hlist[list_entry];
}
-static inline struct team_port *team_get_port_by_index(struct team *team,
- int port_index)
+static inline struct team_port *team_get_port_by_tx_index(struct team *team,
+ int tx_port_index)
{
+ struct hlist_head *head = team_tx_port_index_hash(team, tx_port_index);
struct team_port *port;
- struct hlist_head *head = team_port_index_hash(team, port_index);
- hlist_for_each_entry(port, head, hlist)
- if (port->index == port_index)
+ hlist_for_each_entry(port, head, tx_hlist)
+ if (port->tx_index == tx_port_index)
return port;
return NULL;
}
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = READ_ONCE(team->en_port_count);
+ int tx_en_port_count = READ_ONCE(team->tx_en_port_count);
- if (unlikely(!en_port_count))
+ if (unlikely(!tx_en_port_count))
return 0;
- return num % en_port_count;
+ return num % tx_en_port_count;
}
-static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
- int port_index)
+static inline struct team_port *team_get_port_by_tx_index_rcu(struct team *team,
+ int tx_port_index)
{
+ struct hlist_head *head = team_tx_port_index_hash(team, tx_port_index);
struct team_port *port;
- struct hlist_head *head = team_port_index_hash(team, port_index);
- hlist_for_each_entry_rcu(port, head, hlist)
- if (port->index == port_index)
+ hlist_for_each_entry_rcu(port, head, tx_hlist)
+ if (READ_ONCE(port->tx_index) == tx_port_index)
return port;
return NULL;
}
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index a9ecff191bd9..2c91b7659ce9 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -931,6 +931,18 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
#define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \
__IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN)
+/**
+ * IIO_DECLARE_QUATERNION() - Declare a quaternion element
+ * @type: element type of the individual vectors
+ * @name: identifier name
+ *
+ * Quaternions are a vector composed of 4 elements (W, X, Y, Z). Use this macro
+ * to declare a quaternion element in a struct to ensure proper alignment in
+ * an IIO buffer.
+ */
+#define IIO_DECLARE_QUATERNION(type, name) \
+ type name[4] __aligned(sizeof(type) * 4)
+
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
/* The information at the returned address is guaranteed to be cacheline aligned */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index abf8923f8fc5..8e08baf16c2f 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/kexec.h>
+#include <linux/secure_boot.h>
#include <crypto/hash_info.h>
struct linux_binprm;
@@ -73,14 +74,8 @@ int ima_validate_range(phys_addr_t phys, size_t size);
#endif
#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
-extern bool arch_ima_get_secureboot(void);
extern const char * const *arch_get_ima_policy(void);
#else
-static inline bool arch_ima_get_secureboot(void)
-{
- return false;
-}
-
static inline const char * const *arch_get_ima_policy(void)
{
return NULL;
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index dc272b514a01..0e4340ecd857 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -57,7 +57,7 @@
* builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
* alternatives
*/
-#if IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
#define INDIRECT_CALL_INET(f, f2, f1, ...) \
INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
#elif IS_ENABLED(CONFIG_INET)
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index fa1f328d6712..328004f605c3 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -77,7 +77,6 @@ enum rapl_primitives {
PSYS_TIME_WINDOW1,
PSYS_TIME_WINDOW2,
/* below are not raw primitive data */
- AVERAGE_POWER,
NR_RAPL_PRIMITIVES,
};
@@ -128,6 +127,46 @@ struct reg_action {
int err;
};
+struct rapl_defaults {
+ u8 floor_freq_reg_addr;
+ int (*check_unit)(struct rapl_domain *rd);
+ void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
+ u64 (*compute_time_window)(struct rapl_domain *rd, u64 val, bool to_raw);
+ unsigned int dram_domain_energy_unit;
+ unsigned int psys_domain_energy_unit;
+ bool spr_psys_bits;
+ bool msr_pl4_support;
+ bool msr_pmu_support;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \
+ .name = #p, \
+ .mask = m, \
+ .shift = s, \
+ .id = i, \
+ .unit = u, \
+ .flag = f \
+ }
+
+enum unit_type {
+ ARBITRARY_UNIT, /* no translation */
+ POWER_UNIT,
+ ENERGY_UNIT,
+ TIME_UNIT,
+};
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+ const char *name;
+ u64 mask;
+ int shift;
+ enum rapl_domain_reg_id id;
+ enum unit_type unit;
+ u32 flag;
+};
+
/**
* struct rapl_if_priv: private data for different RAPL interfaces
* @control_type: Each RAPL interface must have its own powercap
@@ -142,8 +181,8 @@ struct reg_action {
* registers.
* @write_raw: Callback for writing RAPL interface specific
* registers.
- * @defaults: internal pointer to interface default settings
- * @rpi: internal pointer to interface primitive info
+ * @defaults: pointer to default settings
+ * @rpi: pointer to interface primitive info
*/
struct rapl_if_priv {
enum rapl_if_type type;
@@ -154,8 +193,8 @@ struct rapl_if_priv {
int limits[RAPL_DOMAIN_MAX];
int (*read_raw)(int id, struct reg_action *ra, bool pmu_ctx);
int (*write_raw)(int id, struct reg_action *ra);
- void *defaults;
- void *rpi;
+ const struct rapl_defaults *defaults;
+ struct rapl_primitive_info *rpi;
};
#ifdef CONFIG_PERF_EVENTS
@@ -211,6 +250,9 @@ void rapl_remove_package_cpuslocked(struct rapl_package *rp);
struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
void rapl_remove_package(struct rapl_package *rp);
+int rapl_default_check_unit(struct rapl_domain *rd);
+void rapl_default_set_floor_freq(struct rapl_domain *rd, bool mode);
+u64 rapl_default_compute_time_window(struct rapl_domain *rd, u64 value, bool to_raw);
#ifdef CONFIG_PERF_EVENTS
int rapl_package_add_pmu(struct rapl_package *rp);
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 214fdbd49052..244392026c6d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -8,6 +8,9 @@
#include <linux/llist.h>
#include <uapi/linux/io_uring.h>
+struct iou_loop_params;
+struct io_uring_bpf_ops;
+
enum {
/*
* A hint to not wake right away but delay until there are enough of
@@ -41,6 +44,8 @@ enum io_uring_cmd_flags {
IO_URING_F_COMPAT = (1 << 12),
};
+struct iou_loop_params;
+
struct io_wq_work_node {
struct io_wq_work_node *next;
};
@@ -268,24 +273,30 @@ struct io_alloc_cache {
unsigned int init_clear;
};
+enum {
+ IO_RING_F_DRAIN_NEXT = BIT(0),
+ IO_RING_F_OP_RESTRICTED = BIT(1),
+ IO_RING_F_REG_RESTRICTED = BIT(2),
+ IO_RING_F_OFF_TIMEOUT_USED = BIT(3),
+ IO_RING_F_DRAIN_ACTIVE = BIT(4),
+ IO_RING_F_HAS_EVFD = BIT(5),
+ /* all CQEs should be posted only by the submitter task */
+ IO_RING_F_TASK_COMPLETE = BIT(6),
+ IO_RING_F_LOCKLESS_CQ = BIT(7),
+ IO_RING_F_SYSCALL_IOPOLL = BIT(8),
+ IO_RING_F_POLL_ACTIVATED = BIT(9),
+ IO_RING_F_DRAIN_DISABLED = BIT(10),
+ IO_RING_F_COMPAT = BIT(11),
+ IO_RING_F_IOWQ_LIMITS_SET = BIT(12),
+};
+
struct io_ring_ctx {
/* const or read-mostly hot data */
struct {
+ /* ring setup flags */
unsigned int flags;
- unsigned int drain_next: 1;
- unsigned int op_restricted: 1;
- unsigned int reg_restricted: 1;
- unsigned int off_timeout_used: 1;
- unsigned int drain_active: 1;
- unsigned int has_evfd: 1;
- /* all CQEs should be posted only by the submitter task */
- unsigned int task_complete: 1;
- unsigned int lockless_cq: 1;
- unsigned int syscall_iopoll: 1;
- unsigned int poll_activated: 1;
- unsigned int drain_disabled: 1;
- unsigned int compat: 1;
- unsigned int iowq_limits_set : 1;
+ /* internal state flags IO_RING_F_* flags , mostly read-only */
+ unsigned int int_flags;
struct task_struct *submitter_task;
struct io_rings *rings;
@@ -355,6 +366,9 @@ struct io_ring_ctx {
struct io_alloc_cache rw_cache;
struct io_alloc_cache cmd_cache;
+ int (*loop_step)(struct io_ring_ctx *ctx,
+ struct iou_loop_params *);
+
/*
* Any cancelable uring_cmd is added to this list in
* ->uring_cmd() by io_uring_cmd_insert_cancelable()
@@ -477,6 +491,8 @@ struct io_ring_ctx {
DECLARE_HASHTABLE(napi_ht, 4);
#endif
+ struct io_uring_bpf_ops *bpf_ops;
+
/*
* Protection for resize vs mmap races - both the mmap and resize
* side will need to grab this lock, to prevent either side from
@@ -545,6 +561,7 @@ enum {
REQ_F_HAS_METADATA_BIT,
REQ_F_IMPORT_BUFFER_BIT,
REQ_F_SQE_COPIED_BIT,
+ REQ_F_IOPOLL_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -638,6 +655,8 @@ enum {
REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
/* ->sqe_copy() has been called, if necessary */
REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
+ /* request must be iopolled to completion (set in ->issue()) */
+ REQ_F_IOPOLL = IO_REQ_FLAG(REQ_F_IOPOLL_BIT),
};
struct io_tw_req {
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 99b7209dabd7..2c5685adf3a9 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
#include <linux/mm_types.h>
#include <linux/blkdev.h>
-#include <linux/pagevec.h>
+#include <linux/folio_batch.h>
struct address_space;
struct fiemap_extent_info;
@@ -65,6 +65,8 @@ struct vm_fault;
*
* IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
* bio, i.e. set REQ_ATOMIC.
+ *
+ * IOMAP_F_INTEGRITY indicates that the filesystems handles integrity metadata.
*/
#define IOMAP_F_NEW (1U << 0)
#define IOMAP_F_DIRTY (1U << 1)
@@ -79,6 +81,11 @@ struct vm_fault;
#define IOMAP_F_BOUNDARY (1U << 6)
#define IOMAP_F_ANON_WRITE (1U << 7)
#define IOMAP_F_ATOMIC_BIO (1U << 8)
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+#define IOMAP_F_INTEGRITY (1U << 9)
+#else
+#define IOMAP_F_INTEGRITY 0
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
* Flag reserved for file system specific usage
@@ -493,6 +500,7 @@ struct iomap_read_folio_ctx {
struct folio *cur_folio;
struct readahead_control *rac;
void *read_ctx;
+ loff_t read_ctx_file_offset;
};
struct iomap_read_ops {
@@ -512,7 +520,14 @@ struct iomap_read_ops {
*
* This is optional.
*/
- void (*submit_read)(struct iomap_read_folio_ctx *ctx);
+ void (*submit_read)(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx);
+
+ /*
+ * Optional, allows filesystem to specify own bio_set, so new bio's
+ * can be allocated from the provided bio_set.
+ */
+ struct bio_set *bio_set;
};
/*
@@ -598,6 +613,9 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
extern struct bio_set iomap_ioend_bioset;
#ifdef CONFIG_BLOCK
+int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t plen);
+
extern const struct iomap_read_ops iomap_bio_read_ops;
static inline void iomap_bio_read_folio(struct folio *folio,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 54b8b48c762e..e587d4ac4d33 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -223,6 +223,7 @@ enum iommu_domain_cookie_type {
struct iommu_domain {
unsigned type;
enum iommu_domain_cookie_type cookie_type;
+ bool is_iommupt;
const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops;
const struct iommu_ops *owner; /* Whose domain_alloc we came from */
@@ -271,6 +272,8 @@ enum iommu_cap {
*/
IOMMU_CAP_DEFERRED_FLUSH,
IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
+ /* ATS is supported and may be enabled for this device */
+ IOMMU_CAP_PCI_ATS_SUPPORTED,
};
/* These are the possible reserved region types */
@@ -980,7 +983,8 @@ static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
- if (domain->ops->iotlb_sync)
+ if (domain->ops->iotlb_sync &&
+ likely(iotlb_gather->start < iotlb_gather->end))
domain->ops->iotlb_sync(domain, iotlb_gather);
iommu_iotlb_gather_init(iotlb_gather);
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index bdd2e0652bc3..53edd69acb9b 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -159,7 +159,7 @@
*
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
- * time, which is rather minimal with a non-zero delay_us.
+ * time, which is rather minimal with a non-zero @delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
@@ -167,9 +167,9 @@
* Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val.
*/
-#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
- sleep_before_read, args...) \
- poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
+ delay_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, delay_us, timeout_us, delay_before_read)
/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 5533a5debf3f..3c73c9c0d4f7 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -202,6 +202,7 @@ enum {
* typedef resource_alignf - Resource alignment callback
* @data: Private data used by the callback
* @res: Resource candidate range (an empty resource space)
+ * @empty_res: Empty resource range without alignment applied
* @size: The minimum size of the empty space
* @align: Alignment from the constraints
*
@@ -212,6 +213,7 @@ enum {
*/
typedef resource_size_t (*resource_alignf)(void *data,
const struct resource *res,
+ const struct resource *empty_res,
resource_size_t size,
resource_size_t align);
@@ -304,14 +306,28 @@ static inline unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
-/* True iff r1 completely contains r2 */
-static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
+
+/*
+ * For checking if @r1 completely contains @r2 for resources that have real
+ * addresses but are not yet crafted into the resource tree. Normally
+ * resource_contains() should be used instead of this function as it checks
+ * also IORESOURCE_UNSET flag.
+ */
+static inline bool __resource_contains_unbound(const struct resource *r1,
+ const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
+
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+/* True iff r1 completely contains r2 */
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
+{
if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET)
return false;
- return r1->start <= r2->start && r1->end >= r2->end;
+
+ return __resource_contains_unbound(r1, r2);
}
/* True if any part of r1 overlaps r2 */
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
index d26d1b1bcbfb..167fba7dbf04 100644
--- a/include/linux/irq-entry-common.h
+++ b/include/linux/irq-entry-common.h
@@ -3,6 +3,7 @@
#define __LINUX_IRQENTRYCOMMON_H
#include <linux/context_tracking.h>
+#include <linux/hrtimer_rearm.h>
#include <linux/kmsan.h>
#include <linux/rseq_entry.h>
#include <linux/static_call_types.h>
@@ -33,6 +34,14 @@
_TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
ARCH_EXIT_TO_USER_MODE_WORK)
+#ifdef CONFIG_HRTIMER_REARM_DEFERRED
+# define EXIT_TO_USER_MODE_WORK_SYSCALL (EXIT_TO_USER_MODE_WORK)
+# define EXIT_TO_USER_MODE_WORK_IRQ (EXIT_TO_USER_MODE_WORK | _TIF_HRTIMER_REARM)
+#else
+# define EXIT_TO_USER_MODE_WORK_SYSCALL (EXIT_TO_USER_MODE_WORK)
+# define EXIT_TO_USER_MODE_WORK_IRQ (EXIT_TO_USER_MODE_WORK)
+#endif
+
/**
* arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
* @regs: Pointer to currents pt_regs
@@ -101,37 +110,6 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
}
/**
- * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Defaults to local_irq_enable(). Can be supplied by architecture specific
- * code.
- */
-static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
-
-#ifndef local_irq_enable_exit_to_user
-static __always_inline void local_irq_enable_exit_to_user(unsigned long ti_work)
-{
- local_irq_enable();
-}
-#endif
-
-/**
- * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
- *
- * Defaults to local_irq_disable(). Can be supplied by architecture specific
- * code.
- */
-static inline void local_irq_disable_exit_to_user(void);
-
-#ifndef local_irq_disable_exit_to_user
-static __always_inline void local_irq_disable_exit_to_user(void)
-{
- local_irq_disable();
-}
-#endif
-
-/**
* arch_exit_to_user_mode_work - Architecture specific TIF work for exit
* to user mode.
* @regs: Pointer to currents pt_regs
@@ -203,6 +181,7 @@ unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work
/**
* __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
* @regs: Pointer to pt_regs on entry stack
+ * @work_mask: Which TIF bits need to be evaluated
*
* 1) check that interrupts are disabled
* 2) call tick_nohz_user_enter_prepare()
@@ -212,7 +191,8 @@ unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work
*
* Don't invoke directly, use the syscall/irqentry_ prefixed variants below
*/
-static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs,
+ const unsigned long work_mask)
{
unsigned long ti_work;
@@ -222,8 +202,10 @@ static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
tick_nohz_user_enter_prepare();
ti_work = read_thread_flags();
- if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
- ti_work = exit_to_user_mode_loop(regs, ti_work);
+ if (unlikely(ti_work & work_mask)) {
+ if (!hrtimer_rearm_deferred_user_irq(&ti_work, work_mask))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+ }
arch_exit_to_user_mode_prepare(regs, ti_work);
}
@@ -239,7 +221,7 @@ static __always_inline void __exit_to_user_mode_validate(void)
/* Temporary workaround to keep ARM64 alive */
static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
{
- __exit_to_user_mode_prepare(regs);
+ __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK);
rseq_exit_to_user_mode_legacy();
__exit_to_user_mode_validate();
}
@@ -253,7 +235,7 @@ static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *reg
*/
static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{
- __exit_to_user_mode_prepare(regs);
+ __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK_SYSCALL);
rseq_syscall_exit_to_user_mode();
__exit_to_user_mode_validate();
}
@@ -267,7 +249,7 @@ static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *re
*/
static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
{
- __exit_to_user_mode_prepare(regs);
+ __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK_IRQ);
rseq_irqentry_exit_to_user_mode();
__exit_to_user_mode_validate();
}
@@ -335,6 +317,8 @@ static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
*/
static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
{
+ lockdep_assert_irqs_disabled();
+
instrumentation_begin();
irqentry_exit_to_user_mode_prepare(regs);
instrumentation_end();
@@ -366,6 +350,207 @@ typedef struct irqentry_state {
#endif
/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void raw_irqentry_exit_cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * irqentry_enter_from_kernel_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from kernel mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing) and
+ * is provided for architectures which require a strict split between entry from
+ * kernel and user mode and therefore cannot use irqentry_enter() which handles
+ * both entry modes.
+ *
+ * Returns: An opaque object that must be passed to irqentry_exit_to_kernel_mode().
+ */
+static __always_inline irqentry_state_t irqentry_enter_from_kernel_mode(struct pt_regs *regs)
+{
+ irqentry_state_t ret = {
+ .exit_rcu = false,
+ };
+
+ /*
+ * If this entry hit the idle task invoke ct_irq_enter() whether
+ * RCU is watching or not.
+ *
+ * Interrupts can nest when the first interrupt invokes softirq
+ * processing on return which enables interrupts.
+ *
+ * Scheduler ticks in the idle task can mark quiescent state and
+ * terminate a grace period, if and only if the timer interrupt is
+ * not nested into another interrupt.
+ *
+ * Checking for rcu_is_watching() here would prevent the nesting
+ * interrupt to invoke ct_irq_enter(). If that nested interrupt is
+ * the tick then rcu_flavor_sched_clock_irq() would wrongfully
+ * assume that it is the first interrupt and eventually claim
+ * quiescent state and end grace periods prematurely.
+ *
+ * Unconditionally invoke ct_irq_enter() so RCU state stays
+ * consistent.
+ *
+ * TINY_RCU does not support EQS, so let the compiler eliminate
+ * this part when enabled.
+ */
+ if (!IS_ENABLED(CONFIG_TINY_RCU) &&
+ (is_idle_task(current) || arch_in_rcu_eqs())) {
+ /*
+ * If RCU is not watching then the same careful
+ * sequence vs. lockdep and tracing is required
+ * as in irqentry_enter_from_user_mode().
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ ct_irq_enter();
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+
+ ret.exit_rcu = true;
+ return ret;
+ }
+
+ /*
+ * If RCU is watching then RCU only wants to check whether it needs
+ * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
+ * already contains a warning when RCU is not watching, so no point
+ * in having another one here.
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ rcu_irq_enter_check_tick();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+
+ return ret;
+}
+
+/**
+ * irqentry_exit_to_kernel_mode_preempt - Run preempt checks on return to kernel mode
+ * @regs: Pointer to current's pt_regs
+ * @state: Return value from matching call to irqentry_enter_from_kernel_mode()
+ *
+ * This is to be invoked before irqentry_exit_to_kernel_mode_after_preempt() to
+ * allow kernel preemption on return from interrupt.
+ *
+ * Must be invoked with interrupts disabled and CPU state which allows kernel
+ * preemption.
+ *
+ * After returning from this function, the caller can modify CPU state before
+ * invoking irqentry_exit_to_kernel_mode_after_preempt(), which is required to
+ * re-establish the tracing, lockdep and RCU state for returning to the
+ * interrupted context.
+ */
+static inline void irqentry_exit_to_kernel_mode_preempt(struct pt_regs *regs,
+ irqentry_state_t state)
+{
+ if (regs_irqs_disabled(regs) || state.exit_rcu)
+ return;
+
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ irqentry_exit_cond_resched();
+}
+
+/**
+ * irqentry_exit_to_kernel_mode_after_preempt - Establish trace, lockdep and RCU state
+ * @regs: Pointer to current's pt_regs
+ * @state: Return value from matching call to irqentry_enter_from_kernel_mode()
+ *
+ * This is to be invoked after irqentry_exit_to_kernel_mode_preempt() and before
+ * actually returning to the interrupted context.
+ *
+ * There are no requirements for the CPU state other than being able to complete
+ * the tracing, lockdep and RCU state transitions. After this function returns
+ * the caller must return directly to the interrupted context.
+ */
+static __always_inline void
+irqentry_exit_to_kernel_mode_after_preempt(struct pt_regs *regs, irqentry_state_t state)
+{
+ if (!regs_irqs_disabled(regs)) {
+ /*
+ * If RCU was not watching on entry this needs to be done
+ * carefully and needs the same ordering of lockdep/tracing
+ * and RCU as the return to user mode path.
+ */
+ if (state.exit_rcu) {
+ instrumentation_begin();
+ hrtimer_rearm_deferred();
+ /* Tell the tracer that IRET will enable interrupts */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+ ct_irq_exit();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ return;
+ }
+
+ instrumentation_begin();
+ hrtimer_rearm_deferred();
+ /* Covers both tracing and lockdep */
+ trace_hardirqs_on();
+ instrumentation_end();
+ } else {
+ /*
+ * IRQ flags state is correct already. Just tell RCU if it
+ * was not watching on entry.
+ */
+ if (state.exit_rcu)
+ ct_irq_exit();
+ }
+}
+
+/**
+ * irqentry_exit_to_kernel_mode - Run preempt checks and establish state after
+ * invoking the interrupt handler
+ * @regs: Pointer to current's pt_regs
+ * @state: Return value from matching call to irqentry_enter_from_kernel_mode()
+ *
+ * This is the counterpart of irqentry_enter_from_kernel_mode() and combines
+ * the calls to irqentry_exit_to_kernel_mode_preempt() and
+ * irqentry_exit_to_kernel_mode_after_preempt().
+ *
+ * The requirement for the CPU state is that it can schedule. After the function
+ * returns the tracing, lockdep and RCU state transitions are completed and the
+ * caller must return directly to the interrupted context.
+ */
+static __always_inline void irqentry_exit_to_kernel_mode(struct pt_regs *regs,
+ irqentry_state_t state)
+{
+ lockdep_assert_irqs_disabled();
+
+ instrumentation_begin();
+ irqentry_exit_to_kernel_mode_preempt(regs, state);
+ instrumentation_end();
+
+ irqentry_exit_to_kernel_mode_after_preempt(regs, state);
+}
+
+/**
* irqentry_enter - Handle state tracking on ordinary interrupt entries
* @regs: Pointer to pt_regs of interrupted context
*
@@ -394,33 +579,11 @@ typedef struct irqentry_state {
* establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
* would not be possible.
*
- * Returns: An opaque object that must be passed to idtentry_exit()
+ * Returns: An opaque object that must be passed to irqentry_exit()
*/
irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
/**
- * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
- *
- * Conditional reschedule with additional sanity checks.
- */
-void raw_irqentry_exit_cond_resched(void);
-
-#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
-#define irqentry_exit_cond_resched_dynamic_disabled NULL
-DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
-#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-void dynamic_irqentry_exit_cond_resched(void);
-#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
-#endif
-#else /* CONFIG_PREEMPT_DYNAMIC */
-#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
-#endif /* CONFIG_PREEMPT_DYNAMIC */
-
-/**
* irqentry_exit - Handle return from exception that used irqentry_enter()
* @regs: Pointer to pt_regs (exception entry regs)
* @state: Return value from matching call to irqentry_enter()
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 951acbdb9f84..efa514ee562f 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -35,6 +35,10 @@ enum irqchip_irq_state;
*
* Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
*
+ * Note that the first 6 definitions are shadowed by C preprocessor definitions
+ * in include/dt-bindings/interrupt-controller/irq.h. This is not an issue, as
+ * the actual values must be the same, due to being part of the stable DT ABI.
+ *
* IRQ_TYPE_NONE - default, unspecified type
* IRQ_TYPE_EDGE_RISING - rising edge triggered
* IRQ_TYPE_EDGE_FALLING - falling edge triggered
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
index b78488df6c98..40d2fce68294 100644
--- a/include/linux/irqchip/arm-gic-v5.h
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -25,6 +25,28 @@
#define GICV5_HWIRQ_TYPE_SPI UL(0x3)
/*
+ * Architected PPIs
+ */
+#define GICV5_ARCH_PPI_S_DB_PPI 0x0
+#define GICV5_ARCH_PPI_RL_DB_PPI 0x1
+#define GICV5_ARCH_PPI_NS_DB_PPI 0x2
+#define GICV5_ARCH_PPI_SW_PPI 0x3
+#define GICV5_ARCH_PPI_HACDBSIRQ 0xf
+#define GICV5_ARCH_PPI_CNTHVS 0x13
+#define GICV5_ARCH_PPI_CNTHPS 0x14
+#define GICV5_ARCH_PPI_PMBIRQ 0x15
+#define GICV5_ARCH_PPI_COMMIRQ 0x16
+#define GICV5_ARCH_PPI_PMUIRQ 0x17
+#define GICV5_ARCH_PPI_CTIIRQ 0x18
+#define GICV5_ARCH_PPI_GICMNT 0x19
+#define GICV5_ARCH_PPI_CNTHP 0x1a
+#define GICV5_ARCH_PPI_CNTV 0x1b
+#define GICV5_ARCH_PPI_CNTHV 0x1c
+#define GICV5_ARCH_PPI_CNTPS 0x1d
+#define GICV5_ARCH_PPI_CNTP 0x1e
+#define GICV5_ARCH_PPI_TRBIRQ 0x1f
+
+/*
* Tables attributes
*/
#define GICV5_NO_READ_ALLOC 0b0
@@ -365,6 +387,11 @@ int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
int gicv5_irs_iste_alloc(u32 lpi);
void gicv5_irs_syncr(void);
+/* Embedded in kvm.arch */
+struct gicv5_vpe {
+ bool resident;
+};
+
struct gicv5_its_devtab_cfg {
union {
struct {
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index a53a00d36228..7e785aa6d35d 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -429,22 +429,46 @@ struct jbd2_inode {
unsigned long i_flags;
/**
- * @i_dirty_start:
+ * @i_dirty_start_page:
+ *
+ * Dirty range start in PAGE_SIZE units.
+ *
+ * The dirty range is empty if @i_dirty_start_page is greater than or
+ * equal to @i_dirty_end_page.
*
- * Offset in bytes where the dirty range for this inode starts.
* [j_list_lock]
*/
- loff_t i_dirty_start;
+ pgoff_t i_dirty_start_page;
/**
- * @i_dirty_end:
+ * @i_dirty_end_page:
+ *
+ * Dirty range end in PAGE_SIZE units (exclusive).
*
- * Inclusive offset in bytes where the dirty range for this inode
- * ends. [j_list_lock]
+ * [j_list_lock]
*/
- loff_t i_dirty_end;
+ pgoff_t i_dirty_end_page;
};
+/*
+ * Lockless readers treat start_page >= end_page as an empty range.
+ * Writers publish a new non-empty range by storing i_dirty_end_page before
+ * i_dirty_start_page.
+ */
+static inline bool jbd2_jinode_get_dirty_range(const struct jbd2_inode *jinode,
+ loff_t *start, loff_t *end)
+{
+ pgoff_t start_page = READ_ONCE(jinode->i_dirty_start_page);
+ pgoff_t end_page = READ_ONCE(jinode->i_dirty_end_page);
+
+ if (start_page >= end_page)
+ return false;
+
+ *start = (loff_t)start_page << PAGE_SHIFT;
+ *end = ((loff_t)end_page << PAGE_SHIFT) - 1;
+ return true;
+}
+
struct jbd2_revoke_table_s;
/**
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index d1c3d4941854..bbd57061802c 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -67,10 +67,6 @@ extern void register_refined_jiffies(long clock_tick_rate);
/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-#ifndef __jiffy_arch_data
-#define __jiffy_arch_data
-#endif
-
/*
* The 64-bit value is not atomic on 32-bit systems - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
@@ -83,7 +79,7 @@ extern void register_refined_jiffies(long clock_tick_rate);
* See arch/ARCH/kernel/vmlinux.lds.S
*/
extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
+extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index fdb79dd1ebd8..b9c7b0ebf7b9 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -87,13 +87,6 @@ struct static_key {
atomic_t enabled;
#ifdef CONFIG_JUMP_LABEL
/*
- * Note:
- * To make anonymous unions work with old compilers, the static
- * initialization of them requires brackets. This creates a dependency
- * on the order of the struct with the initializers. If any fields
- * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
- * to be modified.
- *
* bit 0 => 1 if key is initially true
* 0 if initially false
* bit 1 => 1 if points to struct static_key_mod
@@ -238,19 +231,12 @@ extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
-/*
- * We should be using ATOMIC_INIT() for initializing .enabled, but
- * the inclusion of atomic.h is problematic for inclusion of jump_label.h
- * in 'low-level' headers. Thus, we are initializing .enabled with a
- * raw value, but have added a BUILD_BUG_ON() to catch any issues in
- * jump_label_init() see: kernel/jump_label.c.
- */
#define STATIC_KEY_INIT_TRUE \
- { .enabled = { 1 }, \
- { .type = JUMP_TYPE_TRUE } }
+ { .enabled = ATOMIC_INIT(1), \
+ .type = JUMP_TYPE_TRUE }
#define STATIC_KEY_INIT_FALSE \
- { .enabled = { 0 }, \
- { .type = JUMP_TYPE_FALSE } }
+ { .enabled = ATOMIC_INIT(0), \
+ .type = JUMP_TYPE_FALSE }
#else /* !CONFIG_JUMP_LABEL */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 338a1921a50a..bf233bde68c7 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -352,8 +352,8 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
* kasan_mempool_unpoison_object().
*
* This function operates on all slab allocations including large kmalloc
- * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
- * size > KMALLOC_MAX_SIZE).
+ * allocations (i.e. the ones backed directly by the buddy allocator rather
+ * than kmalloc slab caches).
*
* Return: true if the allocation can be safely reused; false otherwise.
*/
@@ -381,8 +381,8 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
* original tags based on the pointer value.
*
* This function operates on all slab allocations including large kmalloc
- * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
- * size > KMALLOC_MAX_SIZE).
+ * allocations (i.e. the ones backed directly by the buddy allocator rather
+ * than kmalloc slab caches).
*/
static __always_inline void kasan_mempool_unpoison_object(void *ptr,
size_t size)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index b5a5f32fdfd1..e21b2f7f4159 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -23,6 +23,7 @@
struct file;
struct dentry;
struct iattr;
+struct ns_common;
struct seq_file;
struct vm_area_struct;
struct vm_operations_struct;
@@ -99,8 +100,6 @@ enum kernfs_node_type {
#define KERNFS_TYPE_MASK 0x000f
#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
-#define KERNFS_MAX_USER_XATTRS 128
-#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10)
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
@@ -209,7 +208,7 @@ struct kernfs_node {
struct rb_node rb;
- const void *ns; /* namespace tag */
+ const struct ns_common *ns; /* namespace tag */
unsigned int hash; /* ns + name hash */
unsigned short flags;
umode_t mode;
@@ -331,7 +330,7 @@ struct kernfs_ops {
*/
struct kernfs_fs_context {
struct kernfs_root *root; /* Root of the hierarchy being mounted */
- void *ns_tag; /* Namespace tag of the mount (or NULL) */
+ struct ns_common *ns_tag; /* Namespace tag of the mount (or NULL) */
unsigned long magic; /* File system specific magic number */
/* The following are set/used by kernfs_mount() */
@@ -406,9 +405,11 @@ void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
- const char *name, const void *ns);
+ const char *name,
+ const struct ns_common *ns);
struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
- const char *path, const void *ns);
+ const char *path,
+ const struct ns_common *ns);
void kernfs_get(struct kernfs_node *kn);
void kernfs_put(struct kernfs_node *kn);
@@ -426,7 +427,8 @@ unsigned int kernfs_root_flags(struct kernfs_node *kn);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
- void *priv, const void *ns);
+ void *priv,
+ const struct ns_common *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
@@ -434,7 +436,8 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
kuid_t uid, kgid_t gid,
loff_t size,
const struct kernfs_ops *ops,
- void *priv, const void *ns,
+ void *priv,
+ const struct ns_common *ns,
struct lock_class_key *key);
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
@@ -446,9 +449,9 @@ void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
bool kernfs_remove_self(struct kernfs_node *kn);
int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
- const void *ns);
+ const struct ns_common *ns);
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
- const char *new_name, const void *new_ns);
+ const char *new_name, const struct ns_common *new_ns);
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
struct poll_table_struct *pt);
@@ -459,7 +462,7 @@ int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags);
-const void *kernfs_super_ns(struct super_block *sb);
+const struct ns_common *kernfs_super_ns(struct super_block *sb);
int kernfs_get_tree(struct fs_context *fc);
void kernfs_free_fs_context(struct fs_context *fc);
void kernfs_kill_sb(struct super_block *sb);
@@ -494,11 +497,11 @@ static inline struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
static inline struct kernfs_node *
kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
- const void *ns)
+ const struct ns_common *ns)
{ return NULL; }
static inline struct kernfs_node *
kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path,
- const void *ns)
+ const struct ns_common *ns)
{ return NULL; }
static inline void kernfs_get(struct kernfs_node *kn) { }
@@ -526,14 +529,15 @@ static inline unsigned int kernfs_root_flags(struct kernfs_node *kn)
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
umode_t mode, kuid_t uid, kgid_t gid,
- void *priv, const void *ns)
+ void *priv, const struct ns_common *ns)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
umode_t mode, kuid_t uid, kgid_t gid,
loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns, struct lock_class_key *key)
+ void *priv, const struct ns_common *ns,
+ struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
@@ -549,12 +553,14 @@ static inline bool kernfs_remove_self(struct kernfs_node *kn)
{ return false; }
static inline int kernfs_remove_by_name_ns(struct kernfs_node *kn,
- const char *name, const void *ns)
+ const char *name,
+ const struct ns_common *ns)
{ return -ENOSYS; }
static inline int kernfs_rename_ns(struct kernfs_node *kn,
struct kernfs_node *new_parent,
- const char *new_name, const void *new_ns)
+ const char *new_name,
+ const struct ns_common *new_ns)
{ return -ENOSYS; }
static inline int kernfs_setattr(struct kernfs_node *kn,
@@ -575,7 +581,7 @@ static inline int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags)
{ return -ENOSYS; }
-static inline const void *kernfs_super_ns(struct super_block *sb)
+static inline const struct ns_common *kernfs_super_ns(struct super_block *sb)
{ return NULL; }
static inline int kernfs_get_tree(struct fs_context *fc)
diff --git a/include/linux/kho/abi/kexec_handover.h b/include/linux/kho/abi/kexec_handover.h
index 2201a0d2c159..6b7d8ef550f9 100644
--- a/include/linux/kho/abi/kexec_handover.h
+++ b/include/linux/kho/abi/kexec_handover.h
@@ -10,8 +10,13 @@
#ifndef _LINUX_KHO_ABI_KEXEC_HANDOVER_H
#define _LINUX_KHO_ABI_KEXEC_HANDOVER_H
+#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/math.h>
#include <linux/types.h>
+#include <asm/page.h>
+
/**
* DOC: Kexec Handover ABI
*
@@ -29,32 +34,32 @@
* compatibility is only guaranteed for kernels supporting the same ABI version.
*
* FDT Structure Overview:
- * The FDT serves as a central registry for physical
- * addresses of preserved data structures and sub-FDTs. The first kernel
- * populates this FDT with references to memory regions and other FDTs that
- * need to persist across the kexec transition. The subsequent kernel then
- * parses this FDT to locate and restore the preserved data.::
+ * The FDT serves as a central registry for physical addresses of preserved
+ * data structures. The first kernel populates this FDT with references to
+ * memory regions and other metadata that need to persist across the kexec
+ * transition. The subsequent kernel then parses this FDT to locate and
+ * restore the preserved data.::
*
* / {
- * compatible = "kho-v1";
+ * compatible = "kho-v2";
*
* preserved-memory-map = <0x...>;
*
* <subnode-name-1> {
- * fdt = <0x...>;
+ * preserved-data = <0x...>;
* };
*
* <subnode-name-2> {
- * fdt = <0x...>;
+ * preserved-data = <0x...>;
* };
* ... ...
* <subnode-name-N> {
- * fdt = <0x...>;
+ * preserved-data = <0x...>;
* };
* };
*
* Root KHO Node (/):
- * - compatible: "kho-v1"
+ * - compatible: "kho-v2"
*
* Indentifies the overall KHO ABI version.
*
@@ -69,20 +74,20 @@
* is provided by the subsystem that uses KHO for preserving its
* data.
*
- * - fdt: u64
+ * - preserved-data: u64
*
- * Physical address pointing to a subnode FDT blob that is also
+ * Physical address pointing to a subnode data blob that is also
* being preserved.
*/
/* The compatible string for the KHO FDT root node. */
-#define KHO_FDT_COMPATIBLE "kho-v1"
+#define KHO_FDT_COMPATIBLE "kho-v2"
/* The FDT property for the preserved memory map. */
#define KHO_FDT_MEMORY_MAP_PROP_NAME "preserved-memory-map"
-/* The FDT property for sub-FDTs. */
-#define KHO_FDT_SUB_TREE_PROP_NAME "fdt"
+/* The FDT property for preserved data blobs. */
+#define KHO_FDT_SUB_TREE_PROP_NAME "preserved-data"
/**
* DOC: Kexec Handover ABI for vmalloc Preservation
@@ -160,4 +165,113 @@ struct kho_vmalloc {
unsigned short order;
};
+/**
+ * DOC: KHO persistent memory tracker
+ *
+ * KHO tracks preserved memory using a radix tree data structure. Each node of
+ * the tree is exactly a single page. The leaf nodes are bitmaps where each set
+ * bit is a preserved page of any order. The intermediate nodes are tables of
+ * physical addresses that point to a lower level node.
+ *
+ * The tree hierarchy is shown below::
+ *
+ * root
+ * +-------------------+
+ * | Level 5 | (struct kho_radix_node)
+ * +-------------------+
+ * |
+ * v
+ * +-------------------+
+ * | Level 4 | (struct kho_radix_node)
+ * +-------------------+
+ * |
+ * | ... (intermediate levels)
+ * |
+ * v
+ * +-------------------+
+ * | Level 0 | (struct kho_radix_leaf)
+ * +-------------------+
+ *
+ * The tree is traversed using a key that encodes the page's physical address
+ * (pa) and its order into a single unsigned long value. The encoded key value
+ * is composed of two parts: the 'order bit' in the upper part and the
+ * 'shifted physical address' in the lower part.::
+ *
+ * +------------+-----------------------------+--------------------------+
+ * | Page Order | Order Bit | Shifted Physical Address |
+ * +------------+-----------------------------+--------------------------+
+ * | 0 | ...000100 ... (at bit 52) | pa >> (PAGE_SHIFT + 0) |
+ * | 1 | ...000010 ... (at bit 51) | pa >> (PAGE_SHIFT + 1) |
+ * | 2 | ...000001 ... (at bit 50) | pa >> (PAGE_SHIFT + 2) |
+ * | ... | ... | ... |
+ * +------------+-----------------------------+--------------------------+
+ *
+ * Shifted Physical Address:
+ * The 'shifted physical address' is the physical address normalized for its
+ * order. It effectively represents the PFN shifted right by the order.
+ *
+ * Order Bit:
+ * The 'order bit' encodes the page order by setting a single bit at a
+ * specific position. The position of this bit itself represents the order.
+ *
+ * For instance, on a 64-bit system with 4KB pages (PAGE_SHIFT = 12), the
+ * maximum range for the shifted physical address (for order 0) is 52 bits
+ * (64 - 12). This address occupies bits [0-51]. For order 0, the order bit is
+ * set at position 52.
+ *
+ * The following diagram illustrates how the encoded key value is split into
+ * indices for the tree levels, with PAGE_SIZE of 4KB::
+ *
+ * 63:60 59:51 50:42 41:33 32:24 23:15 14:0
+ * +---------+--------+--------+--------+--------+--------+-----------------+
+ * | 0 | Lv 5 | Lv 4 | Lv 3 | Lv 2 | Lv 1 | Lv 0 (bitmap) |
+ * +---------+--------+--------+--------+--------+--------+-----------------+
+ *
+ * The radix tree stores pages of all orders in a single 6-level hierarchy. It
+ * efficiently shares higher tree levels, especially due to common zero top
+ * address bits, allowing a single, efficient algorithm to manage all
+ * pages. This bitmap approach also offers memory efficiency; for example, a
+ * 512KB bitmap can cover a 16GB memory range for 0-order pages with PAGE_SIZE =
+ * 4KB.
+ *
+ * The data structures defined here are part of the KHO ABI. Any modification
+ * to these structures that breaks backward compatibility must be accompanied by
+ * an update to the "compatible" string. This ensures that a newer kernel can
+ * correctly interpret the data passed by an older kernel.
+ */
+
+/*
+ * Defines constants for the KHO radix tree structure, used to track preserved
+ * memory. These constants govern the indexing, sizing, and depth of the tree.
+ */
+enum kho_radix_consts {
+ /*
+ * The bit position of the order bit (and also the length of the
+ * shifted physical address) for an order-0 page.
+ */
+ KHO_ORDER_0_LOG2 = 64 - PAGE_SHIFT,
+
+ /* Size of the table in kho_radix_node, in log2 */
+ KHO_TABLE_SIZE_LOG2 = const_ilog2(PAGE_SIZE / sizeof(phys_addr_t)),
+
+ /* Number of bits in the kho_radix_leaf bitmap, in log2 */
+ KHO_BITMAP_SIZE_LOG2 = PAGE_SHIFT + const_ilog2(BITS_PER_BYTE),
+
+ /*
+ * The total tree depth is the number of intermediate levels
+ * and 1 bitmap level.
+ */
+ KHO_TREE_MAX_DEPTH =
+ DIV_ROUND_UP(KHO_ORDER_0_LOG2 - KHO_BITMAP_SIZE_LOG2,
+ KHO_TABLE_SIZE_LOG2) + 1,
+};
+
+struct kho_radix_node {
+ u64 table[1 << KHO_TABLE_SIZE_LOG2];
+};
+
+struct kho_radix_leaf {
+ DECLARE_BITMAP(bitmap, 1 << KHO_BITMAP_SIZE_LOG2);
+};
+
#endif /* _LINUX_KHO_ABI_KEXEC_HANDOVER_H */
diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h
index 68cb6303b846..08b10fea2afc 100644
--- a/include/linux/kho/abi/memfd.h
+++ b/include/linux/kho/abi/memfd.h
@@ -56,10 +56,24 @@ struct memfd_luo_folio_ser {
u64 index;
} __packed;
+/*
+ * The set of seals this version supports preserving. If support for any new
+ * seals is needed, add it here and bump version.
+ */
+#define MEMFD_LUO_ALL_SEALS (F_SEAL_SEAL | \
+ F_SEAL_SHRINK | \
+ F_SEAL_GROW | \
+ F_SEAL_WRITE | \
+ F_SEAL_FUTURE_WRITE | \
+ F_SEAL_EXEC)
+
/**
* struct memfd_luo_ser - Main serialization structure for a memfd.
* @pos: The file's current position (f_pos).
* @size: The total size of the file in bytes (i_size).
+ * @seals: The seals present on the memfd. The seals are uABI so it is safe
+ * to directly use them in the ABI.
+ * @flags: Flags for the file. Unused flag bits must be set to 0.
* @nr_folios: Number of folios in the folios array.
* @folios: KHO vmalloc descriptor pointing to the array of
* struct memfd_luo_folio_ser.
@@ -67,11 +81,13 @@ struct memfd_luo_folio_ser {
struct memfd_luo_ser {
u64 pos;
u64 size;
+ u32 seals;
+ u32 flags;
u64 nr_folios;
struct kho_vmalloc folios;
} __packed;
/* The compatibility string for memfd file handler */
-#define MEMFD_LUO_FH_COMPATIBLE "memfd-v1"
+#define MEMFD_LUO_FH_COMPATIBLE "memfd-v2"
#endif /* _LINUX_KHO_ABI_MEMFD_H */
diff --git a/include/linux/kho_radix_tree.h b/include/linux/kho_radix_tree.h
new file mode 100644
index 000000000000..84e918b96e53
--- /dev/null
+++ b/include/linux/kho_radix_tree.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KHO_RADIX_TREE_H
+#define _LINUX_KHO_RADIX_TREE_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex_types.h>
+#include <linux/types.h>
+
+/**
+ * DOC: Kexec Handover Radix Tree
+ *
+ * This is a radix tree implementation for tracking physical memory pages
+ * across kexec transitions. It was developed for the KHO mechanism but is
+ * designed for broader use by any subsystem that needs to preserve pages.
+ *
+ * The radix tree is a multi-level tree where leaf nodes are bitmaps
+ * representing individual pages. To allow pages of different sizes (orders)
+ * to be stored efficiently in a single tree, it uses a unique key encoding
+ * scheme. Each key is an unsigned long that combines a page's physical
+ * address and its order.
+ *
+ * Client code is responsible for allocating the root node of the tree,
+ * initializing the mutex lock, and managing its lifecycle. It must use the
+ * tree data structures defined in the KHO ABI,
+ * `include/linux/kho/abi/kexec_handover.h`.
+ */
+
+struct kho_radix_node;
+
+struct kho_radix_tree {
+ struct kho_radix_node *root;
+ struct mutex lock; /* protects the tree's structure and root pointer */
+};
+
+typedef int (*kho_radix_tree_walk_callback_t)(phys_addr_t phys,
+ unsigned int order);
+
+#ifdef CONFIG_KEXEC_HANDOVER
+
+int kho_radix_add_page(struct kho_radix_tree *tree, unsigned long pfn,
+ unsigned int order);
+
+void kho_radix_del_page(struct kho_radix_tree *tree, unsigned long pfn,
+ unsigned int order);
+
+int kho_radix_walk_tree(struct kho_radix_tree *tree,
+ kho_radix_tree_walk_callback_t cb);
+
+#else /* #ifdef CONFIG_KEXEC_HANDOVER */
+
+static inline int kho_radix_add_page(struct kho_radix_tree *tree, long pfn,
+ unsigned int order)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_radix_del_page(struct kho_radix_tree *tree,
+ unsigned long pfn, unsigned int order) { }
+
+static inline int kho_radix_walk_tree(struct kho_radix_tree *tree,
+ kho_radix_tree_walk_callback_t cb)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* #ifdef CONFIG_KEXEC_HANDOVER */
+
+#endif /* _LINUX_KHO_RADIX_TREE_H */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index c8219505a79f..bcb5d4e32001 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -109,7 +109,7 @@ struct kobject *kobject_get(struct kobject *kobj);
struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj);
void kobject_put(struct kobject *kobj);
-const void *kobject_namespace(const struct kobject *kobj);
+const struct ns_common *kobject_namespace(const struct kobject *kobj);
void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
@@ -118,7 +118,7 @@ struct kobj_type {
const struct sysfs_ops *sysfs_ops;
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj);
- const void *(*namespace)(const struct kobject *kobj);
+ const struct ns_common *(*namespace)(const struct kobject *kobj);
void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 150fe2ae1b6b..4f0990e09b93 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -16,6 +16,7 @@
#ifndef _LINUX_KOBJECT_NS_H
#define _LINUX_KOBJECT_NS_H
+struct ns_common;
struct sock;
struct kobject;
@@ -39,10 +40,10 @@ enum kobj_ns_type {
struct kobj_ns_type_operations {
enum kobj_ns_type type;
bool (*current_may_mount)(void);
- void *(*grab_current_ns)(void);
- const void *(*netlink_ns)(struct sock *sk);
- const void *(*initial_ns)(void);
- void (*drop_ns)(void *);
+ struct ns_common *(*grab_current_ns)(void);
+ const struct ns_common *(*netlink_ns)(struct sock *sk);
+ const struct ns_common *(*initial_ns)(void);
+ void (*drop_ns)(struct ns_common *);
};
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
@@ -51,7 +52,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa
const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
-void *kobj_ns_grab_current(enum kobj_ns_type type);
-void kobj_ns_drop(enum kobj_ns_type type, void *ns);
+struct ns_common *kobj_ns_grab_current(enum kobj_ns_type type);
+void kobj_ns_drop(enum kobj_ns_type type, struct ns_common *ns);
#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c982694c987b..d39d0d5483a2 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -17,8 +17,8 @@
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, vm_flags_t *vm_flags);
-vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
- vm_flags_t vm_flags);
+vma_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
+ vma_flags_t vma_flags);
int ksm_enable_merge_any(struct mm_struct *mm);
int ksm_disable_merge_any(struct mm_struct *mm);
int ksm_disable(struct mm_struct *mm);
@@ -103,10 +103,10 @@ bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
-static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm,
- const struct file *file, vm_flags_t vm_flags)
+static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
+ const struct file *file, vma_flags_t vma_flags)
{
- return vm_flags;
+ return vma_flags;
}
static inline int ksm_disable(struct mm_struct *mm)
diff --git a/include/linux/ksysfs.h b/include/linux/ksysfs.h
new file mode 100644
index 000000000000..c7dc6e18f28e
--- /dev/null
+++ b/include/linux/ksysfs.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _KSYSFS_H_
+#define _KSYSFS_H_
+
+void ksysfs_init(void);
+
+#endif /* _KSYSFS_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6b76e7a6f4c2..4c14aee1fb06 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -318,7 +318,8 @@ static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
struct kvm_mmio_fragment {
gpa_t gpa;
void *data;
- unsigned len;
+ u64 val;
+ unsigned int len;
};
struct kvm_vcpu {
@@ -1029,6 +1030,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
+static inline bool kvm_is_vcpu_creation_in_progress(struct kvm *kvm)
+{
+ lockdep_assert_held(&kvm->lock);
+
+ return kvm->created_vcpus != atomic_read(&kvm->online_vcpus);
+}
+
void kvm_destroy_vcpus(struct kvm *kvm);
int kvm_trylock_all_vcpus(struct kvm *kvm);
@@ -1628,6 +1636,13 @@ static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
/*
+ * kvm_arch_shutdown() is invoked immediately prior to forcefully disabling
+ * hardware virtualization on all CPUs via IPI function calls (in preparation
+ * for shutdown or reboot), e.g. to allow arch code to prepare for disabling
+ * virtualization while KVM may be actively running vCPUs.
+ */
+void kvm_arch_shutdown(void);
+/*
* kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
* kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
* kvm_usage_count, i.e. at the beginning of the generic hardware enabling
@@ -2300,7 +2315,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
extern bool enable_virt_at_load;
-extern bool kvm_rebooting;
#endif
extern unsigned int halt_poll_ns;
@@ -2366,6 +2380,7 @@ void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v5_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -2594,12 +2609,4 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range);
#endif
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-int kvm_enable_virtualization(void);
-void kvm_disable_virtualization(void);
-#else
-static inline int kvm_enable_virtualization(void) { return 0; }
-static inline void kvm_disable_virtualization(void) { }
-#endif
-
#endif
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
index 05673d3529e7..992cd8bd8ed0 100644
--- a/include/linux/leafops.h
+++ b/include/linux/leafops.h
@@ -607,7 +607,20 @@ static inline bool pmd_is_migration_entry(pmd_t pmd)
}
/**
- * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry?
+ * softleaf_is_valid_pmd_entry() - Is the specified softleaf entry obtained from
+ * a PMD one that we support at PMD level?
+ * @entry: Entry to check.
+ * Returns: true if the softleaf entry is valid at PMD, otherwise false.
+ */
+static inline bool softleaf_is_valid_pmd_entry(softleaf_t entry)
+{
+ /* Only device private, migration entries valid for PMD. */
+ return softleaf_is_device_private(entry) ||
+ softleaf_is_migration(entry);
+}
+
+/**
+ * pmd_is_valid_softleaf() - Is this PMD entry a valid softleaf entry?
* @pmd: PMD entry.
*
* PMD leaf entries are valid only if they are device private or migration
@@ -620,9 +633,27 @@ static inline bool pmd_is_valid_softleaf(pmd_t pmd)
{
const softleaf_t entry = softleaf_from_pmd(pmd);
- /* Only device private, migration entries valid for PMD. */
- return softleaf_is_device_private(entry) ||
- softleaf_is_migration(entry);
+ return softleaf_is_valid_pmd_entry(entry);
+}
+
+/**
+ * pmd_to_softleaf_folio() - Convert the PMD entry to a folio.
+ * @pmd: PMD entry.
+ *
+ * The PMD entry is expected to be a valid PMD softleaf entry.
+ *
+ * Returns: the folio the softleaf entry references if this is a valid softleaf
+ * entry, otherwise NULL.
+ */
+static inline struct folio *pmd_to_softleaf_folio(pmd_t pmd)
+{
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ if (!softleaf_is_valid_pmd_entry(entry)) {
+ VM_WARN_ON_ONCE(true);
+ return NULL;
+ }
+ return softleaf_to_folio(entry);
}
#endif /* CONFIG_MMU */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 00346ce3af5e..5c085ef4eda7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1205,7 +1205,6 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern u64 ata_qc_get_active(struct ata_port *ap);
-extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
struct gendisk *unused,
sector_t capacity, int geom[]);
@@ -1226,7 +1225,8 @@ extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
-extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+int ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ struct list_head *eh_q);
/*
* SATA specific code - drivers/ata/libata-sata.c
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index b72b8cdba765..feb60ba4e30e 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -30,8 +30,8 @@
* @default_rate: Default sampling rate. 0 means reset default
* @setup_resources: Interrupt line setup call back function
* @release_resources: Interrupt line release call back function
- * @st_min_limits[3]: Selftest acceptance minimum values
- * @st_max_limits[3]: Selftest acceptance maximum values
+ * @st_min_limits: Selftest acceptance minimum values (x, y, z)
+ * @st_max_limits: Selftest acceptance maximum values (x, y, z)
* @irq2: Irq line 2 number
*
* Platform data is used to setup the sensor chip. Meaning of the different
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 382c56a97bba..584db296e43b 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -94,7 +94,7 @@ struct common_audit_data {
#endif
char *kmod_name;
struct lsm_ioctlop_audit *op;
- struct file *file;
+ const struct file *file;
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
int reason;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 8c42b4bde09c..2b8dfb35caed 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -191,6 +191,9 @@ LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
LSM_HOOK(int, 0, file_alloc_security, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+LSM_HOOK(int, 0, backing_file_alloc, struct file *backing_file,
+ const struct file *user_file)
+LSM_HOOK(void, LSM_RET_VOID, backing_file_free, struct file *backing_file)
LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
unsigned long arg)
LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
@@ -198,6 +201,8 @@ LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
+LSM_HOOK(int, 0, mmap_backing_file, struct vm_area_struct *vma,
+ struct file *backing_file, struct file *user_file)
LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd)
@@ -317,6 +322,11 @@ LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
LSM_HOOK(int, 0, watch_key, struct key *key)
#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+#if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_SECURITY_PATH)
+LSM_HOOK(int, 0, unix_find, const struct path *path, struct sock *other,
+ int flags)
+#endif /* CONFIG_SECURITY_NETWORK && CONFIG_SECURITY_PATH */
+
#ifdef CONFIG_SECURITY_NETWORK
LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
struct sock *newsk)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index d48bf0ad26f4..b4f8cad53ddb 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -104,6 +104,7 @@ struct security_hook_list {
struct lsm_blob_sizes {
unsigned int lbs_cred;
unsigned int lbs_file;
+ unsigned int lbs_backing_file;
unsigned int lbs_ib;
unsigned int lbs_inode;
unsigned int lbs_sock;
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 7b8aad47121e..0c464eade1d6 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -139,6 +139,7 @@ enum maple_type {
maple_leaf_64,
maple_range_64,
maple_arange_64,
+ maple_copy,
};
enum store_type {
@@ -154,6 +155,46 @@ enum store_type {
wr_slot_store,
};
+struct maple_copy {
+ /*
+ * min, max, and pivots are values
+ * start, end, split are indexes into arrays
+ * data is a size
+ */
+
+ struct {
+ struct maple_node *node;
+ unsigned long max;
+ enum maple_type mt;
+ } dst[3];
+ struct {
+ struct maple_node *node;
+ unsigned long max;
+ unsigned char start;
+ unsigned char end;
+ enum maple_type mt;
+ } src[4];
+ /* Simulated node */
+ void __rcu *slot[3];
+ unsigned long gap[3];
+ unsigned long min;
+ union {
+ unsigned long pivot[3];
+ struct {
+ void *_pad[2];
+ unsigned long max;
+ };
+ };
+ unsigned char end;
+
+ /*Avoid passing these around */
+ unsigned char s_count;
+ unsigned char d_count;
+ unsigned char split;
+ unsigned char data;
+ unsigned char height;
+};
+
/**
* DOC: Maple tree flags
*
@@ -299,6 +340,7 @@ struct maple_node {
};
struct maple_range_64 mr64;
struct maple_arange_64 ma64;
+ struct maple_copy cp;
};
};
diff --git a/include/linux/math.h b/include/linux/math.h
index 6dc1d1d32fbc..1e8fb3efbc8c 100644
--- a/include/linux/math.h
+++ b/include/linux/math.h
@@ -89,23 +89,7 @@
} \
)
-/*
- * Divide positive or negative dividend by positive or negative divisor
- * and round to closest integer. Result is undefined for negative
- * divisors if the dividend variable type is unsigned and for negative
- * dividends if the divisor variable type is unsigned.
- */
-#define DIV_ROUND_CLOSEST(x, divisor)( \
-{ \
- typeof(x) __x = x; \
- typeof(divisor) __d = divisor; \
- (((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || \
- (((__x) > 0) == ((__d) > 0))) ? \
- (((__x) + ((__d) / 2)) / (__d)) : \
- (((__x) - ((__d) / 2)) / (__d)); \
-} \
-)
+#define DIV_ROUND_CLOSEST __KERNEL_DIV_ROUND_CLOSEST
/*
* Same as above but for u64 dividends. divisor must be a 32-bit
* number.
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
deleted file mode 100644
index cea443a672cb..000000000000
--- a/include/linux/mdio-gpio.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_MDIO_GPIO_H
-#define __LINUX_MDIO_GPIO_H
-
-#define MDIO_GPIO_MDC 0
-#define MDIO_GPIO_MDIO 1
-#define MDIO_GPIO_MDO 2
-
-#endif
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 5d1203b9af20..f4f9d9609448 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -688,8 +688,6 @@ static inline int mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
val);
}
-int mdiobus_register_device(struct mdio_device *mdiodev);
-int mdiobus_unregister_device(struct mdio_device *mdiodev);
bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 6ec5e9ac0699..9eac4f268359 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -155,6 +155,7 @@ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
+int memblock_reserved_mark_kern(phys_addr_t base, phys_addr_t size);
int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size);
int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 70b685a85bf4..5173a9f16721 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,10 +35,10 @@ enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
MEMCG_PERCPU_B,
- MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_ZSWAP_B,
MEMCG_ZSWAPPED,
+ MEMCG_ZSWAP_INCOMP,
MEMCG_NR_STAT,
};
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index c328a7b356d0..b4fda09dab9f 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -18,6 +18,8 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
*/
int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
struct file *memfd_alloc_file(const char *name, unsigned int flags);
+int memfd_get_seals(struct file *file);
+int memfd_add_seals(struct file *file, unsigned int seals);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -37,6 +39,16 @@ static inline struct file *memfd_alloc_file(const char *name, unsigned int flags
{
return ERR_PTR(-EINVAL);
}
+
+static inline int memfd_get_seals(struct file *file)
+{
+ return -EINVAL;
+}
+
+static inline int memfd_add_seals(struct file *file, unsigned int seals)
+{
+ return -EINVAL;
+}
#endif
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 96987d9d95a8..7999c58629ee 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -52,7 +52,7 @@ int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
struct memory_dev_type *mt_find_alloc_memory_type(int adist,
struct list_head *memory_types);
void mt_put_memory_types(struct list_head *memory_types);
-#ifdef CONFIG_MIGRATION
+#ifdef CONFIG_NUMA_MIGRATION
int next_demotion_node(int node, const nodemask_t *allowed_mask);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
bool node_is_toptier(int node);
diff --git a/include/linux/memory.h b/include/linux/memory.h
index faeaa921e55b..5bb5599c6b2b 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -19,6 +19,7 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
+#include <linux/memory_hotplug.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
@@ -77,7 +78,7 @@ enum memory_block_state {
struct memory_block {
unsigned long start_section_nr;
enum memory_block_state state; /* serialized by the dev->lock */
- int online_type; /* for passing data to online routine */
+ enum mmop online_type; /* for passing data to online routine */
int nid; /* NID for this memory block */
/*
* The single zone of this memory block if all PFNs of this memory block
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index f2f16cdd73ee..815e908c4135 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -16,11 +16,8 @@ struct resource;
struct vmem_altmap;
struct dev_pagemap;
-#ifdef CONFIG_MEMORY_HOTPLUG
-struct page *pfn_to_online_page(unsigned long pfn);
-
/* Types for control the zone type of onlined and offlined memory */
-enum {
+enum mmop {
/* Offline the memory. */
MMOP_OFFLINE = 0,
/* Online the memory. Zone depends, see default_zone_for_pfn(). */
@@ -31,6 +28,9 @@ enum {
MMOP_ONLINE_MOVABLE,
};
+#ifdef CONFIG_MEMORY_HOTPLUG
+struct page *pfn_to_online_page(unsigned long pfn);
+
/* Flags for add_memory() and friends to specify memory hotplug details. */
typedef int __bitwise mhp_t;
@@ -286,8 +286,8 @@ static inline void __remove_memory(u64 start, u64 size) {}
#ifdef CONFIG_MEMORY_HOTPLUG
/* Default online_type (MMOP_*) when new memory blocks are added. */
-extern int mhp_get_default_online_type(void);
-extern void mhp_set_default_online_type(int online_type);
+extern enum mmop mhp_get_default_online_type(void);
+extern void mhp_set_default_online_type(enum mmop online_type);
extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
@@ -308,10 +308,8 @@ extern int sparse_add_section(int nid, unsigned long pfn,
struct dev_pagemap *pgmap);
extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap);
-extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
- unsigned long pnum);
-extern struct zone *zone_for_pfn_range(int online_type, int nid,
- struct memory_group *group, unsigned long start_pfn,
+extern struct zone *zone_for_pfn_range(enum mmop online_type,
+ int nid, struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index f72e6d4b14a7..d465dcd8c90a 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -117,11 +117,6 @@ struct arizona_pdata {
/** Check for line output with HPDET method */
bool hpdet_acc_id_line;
-#ifdef CONFIG_GPIOLIB_LEGACY
- /** GPIO used for mic isolation with HPDET */
- int hpdet_id_gpio;
-#endif
-
/** Channel to use for headphone detection */
unsigned int hpdet_channel;
@@ -131,11 +126,6 @@ struct arizona_pdata {
/** Extra debounce timeout used during initial mic detection (ms) */
unsigned int micd_detect_debounce;
-#ifdef CONFIG_GPIOLIB_LEGACY
- /** GPIO for mic detection polarity */
- int micd_pol_gpio;
-#endif
-
/** Mic detect ramp rate */
unsigned int micd_bias_start_time;
diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h
index c39a49269cb7..68831f113589 100644
--- a/include/linux/mfd/cs42l43-regs.h
+++ b/include/linux/mfd/cs42l43-regs.h
@@ -1181,4 +1181,80 @@
/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */
#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F
+/* CS42L43B VARIANT REGISTERS */
+#define CS42L43B_DEVID_VAL 0x0042A43B
+
+#define CS42L43B_DECIM_VOL_CTRL_CH1_CH2 0x00008280
+#define CS42L43B_DECIM_VOL_CTRL_CH3_CH4 0x00008284
+
+#define CS42L43B_DECIM_VOL_CTRL_CH5_CH6 0x00008290
+#define CS42L43B_DECIM_VOL_CTRL_UPDATE 0x0000829C
+
+#define CS42L43B_DECIM_HPF_WNF_CTRL5 0x000082A0
+#define CS42L43B_DECIM_HPF_WNF_CTRL6 0x000082A4
+
+#define CS42L43B_SWIRE_DP3_CH3_INPUT 0x0000C320
+#define CS42L43B_SWIRE_DP3_CH4_INPUT 0x0000C330
+#define CS42L43B_SWIRE_DP4_CH3_INPUT 0x0000C340
+#define CS42L43B_SWIRE_DP4_CH4_INPUT 0x0000C350
+
+#define CS42L43B_ISRC1DEC3_INPUT1 0x0000C780
+#define CS42L43B_ISRC1DEC4_INPUT1 0x0000C790
+#define CS42L43B_ISRC2DEC3_INPUT1 0x0000C7A0
+#define CS42L43B_ISRC2DEC4_INPUT1 0x0000C7B0
+
+#define CS42L43B_FW_MISSION_CTRL_NEED_CONFIGS 0x00117E00
+#define CS42L43B_FW_MISSION_CTRL_HAVE_CONFIGS 0x00117E04
+#define CS42L43B_FW_MISSION_CTRL_PATCH_START_ADDR_REG 0x00117E08
+#define CS42L43B_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x00117E0C
+#define CS42L43B_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x00117E10
+
+#define CS42L43B_MCU_SW_REV 0x00117314
+#define CS42L43B_PATCH_START_ADDR 0x00117318
+#define CS42L43B_CONFIG_SELECTION 0x0011731C
+#define CS42L43B_NEED_CONFIGS 0x00117320
+#define CS42L43B_BOOT_STATUS 0x00117330
+
+#define CS42L43B_FW_MISSION_CTRL_NEED_CONFIGS 0x00117E00
+#define CS42L43B_FW_MISSION_CTRL_HAVE_CONFIGS 0x00117E04
+#define CS42L43B_FW_MISSION_CTRL_PATCH_START_ADDR_REG 0x00117E08
+#define CS42L43B_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x00117E0C
+#define CS42L43B_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x00117E10
+
+#define CS42L43B_MCU_RAM_MAX 0x00117FFF
+
+/* CS42L43B_DECIM_DECIM_VOL_CTRL_CH5_CH6 */
+#define CS42L43B_DECIM6_MUTE_MASK 0x80000000
+#define CS42L43B_DECIM6_MUTE_SHIFT 31
+#define CS42L43B_DECIM6_VOL_MASK 0x3FC00000
+#define CS42L43B_DECIM6_VOL_SHIFT 22
+#define CS42L43B_DECIM6_PATH1_VOL_FALL_RATE_MASK 0x00380000
+#define CS42L43B_DECIM6_PATH1_VOL_FALL_RATE_SHIFT 19
+#define CS42L43B_DECIM6_PATH1_VOL_RISE_RATE_MASK 0x00070000
+#define CS42L43B_DECIM6_PATH1_VOL_RISE_RATE_SHIFT 16
+#define CS42L43B_DECIM5_MUTE_MASK 0x00008000
+#define CS42L43B_DECIM5_MUTE_SHIFT 15
+#define CS42L43B_DECIM5_VOL_MASK 0x00003FC0
+#define CS42L43B_DECIM5_VOL_SHIFT 6
+#define CS42L43B_DECIM5_PATH1_VOL_FALL_RATE_MASK 0x00000038
+#define CS42L43B_DECIM5_PATH1_VOL_FALL_RATE_SHIFT 3
+#define CS42L43B_DECIM5_PATH1_VOL_RISE_RATE_MASK 0x00000007
+#define CS42L43B_DECIM5_PATH1_VOL_RISE_RATE_SHIFT 0
+
+/* CS42L43B_DECIM_VOL_CTRL_UPDATE */
+#define CS42L43B_DECIM6_PATH1_VOL_TRIG_MASK 0x00000800
+#define CS42L43B_DECIM6_PATH1_VOL_TRIG_SHIFT 11
+#define CS42L43B_DECIM5_PATH1_VOL_TRIG_MASK 0x00000100
+#define CS42L43B_DECIM5_PATH1_VOL_TRIG_SHIFT 8
+#define CS42L43B_DECIM4_VOL_UPDATE_MASK 0x00000020
+#define CS42L43B_DECIM4_VOL_UPDATE_SHIFT 5
+
+/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */
+#define CS42L43B_ISRC_DEC4_EN_MASK 0x00000008
+#define CS42L43B_ISRC_DEC4_EN_SHIFT 3
+#define CS42L43B_ISRC_DEC4_EN_WIDTH 1
+#define CS42L43B_ISRC_DEC3_EN_MASK 0x00000004
+#define CS42L43B_ISRC_DEC3_EN_SHIFT 2
+#define CS42L43B_ISRC_DEC3_EN_WIDTH 1
+
#endif /* CS42L43_CORE_REGS_H */
diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h
index 2239d8585e78..ff0f7e365a19 100644
--- a/include/linux/mfd/cs42l43.h
+++ b/include/linux/mfd/cs42l43.h
@@ -98,6 +98,7 @@ struct cs42l43 {
bool sdw_pll_active;
bool attached;
bool hw_lock;
+ long variant_id;
};
#endif /* CS42L43_CORE_EXT_H */
diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h
index 643c096b93ac..2dbd80abfd1d 100644
--- a/include/linux/mfd/kempld.h
+++ b/include/linux/mfd/kempld.h
@@ -37,6 +37,7 @@
#define KEMPLD_SPEC_GET_MINOR(x) (x & 0x0f)
#define KEMPLD_SPEC_GET_MAJOR(x) ((x >> 4) & 0x0f)
#define KEMPLD_IRQ_GPIO 0x35
+#define KEMPLD_IRQ_GPIO_MASK 0x0f
#define KEMPLD_IRQ_I2C 0x36
#define KEMPLD_CFG 0x37
#define KEMPLD_CFG_GPIO_I2C_MUX (1 << 0)
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ca691641788b..9c6f9817383f 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -33,6 +33,7 @@
#define PHY_ID_LAN8804 0x00221670
#define PHY_ID_LAN8841 0x00221650
#define PHY_ID_LAN8842 0x002216C0
+#define PHY_ID_LAN9645X 0x002216D0
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
index 517288da19fd..7da956c666a0 100644
--- a/include/linux/microchipphy.h
+++ b/include/linux/microchipphy.h
@@ -61,6 +61,11 @@
/* Registers specific to the LAN7800/LAN7850 embedded phy */
#define LAN78XX_PHY_LED_MODE_SELECT (0x1D)
+/* PHY Control 3 register (page 1) */
+#define LAN78XX_PHY_CTRL3 (0x14)
+#define LAN78XX_PHY_CTRL3_AUTO_DOWNSHIFT BIT(4)
+#define LAN78XX_PHY_CTRL3_DOWNSHIFT_CTRL_MASK GENMASK(3, 2)
+
/* DSP registers */
#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A)
#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b37fe39cef27..07a25f264292 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -293,6 +293,7 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
+#define MLX5_UMR_ALIGN (2048)
#define MLX5_UMR_FLEX_ALIGNMENT 0x40
#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
@@ -1259,6 +1260,7 @@ enum mlx5_cap_type {
MLX5_CAP_PORT_SELECTION = 0x25,
MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
MLX5_CAP_ADV_RDMA = 0x28,
+ MLX5_CAP_TLP_EMULATION = 0x2a,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1481,6 +1483,14 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(virtio_emulation_cap, \
(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
+#define MLX5_CAP_DEV_TLP_EMULATION(mdev, cap)\
+ MLX5_GET(tlp_dev_emu_capabilities, \
+ (mdev)->caps.hca[MLX5_CAP_TLP_EMULATION]->cur, cap)
+
+#define MLX5_CAP64_DEV_TLP_EMULATION(mdev, cap)\
+ MLX5_GET64(tlp_dev_emu_capabilities, \
+ (mdev)->caps.hca[MLX5_CAP_TLP_EMULATION]->cur, cap)
+
#define MLX5_CAP_IPSEC(mdev, cap)\
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 04dcd09f7517..e1ded9cf0f70 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -550,7 +550,7 @@ struct mlx5_debugfs_entries {
};
enum mlx5_func_type {
- MLX5_PF,
+ MLX5_SELF,
MLX5_VF,
MLX5_SF,
MLX5_HOST_PF,
@@ -755,7 +755,6 @@ struct mlx5_core_dev {
} caps;
struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
- phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
phys_addr_t bar_addr;
enum mlx5_device_state state;
@@ -798,6 +797,7 @@ struct mlx5_core_dev {
enum mlx5_wc_state wc_state;
/* sync write combining state */
struct mutex wc_state_lock;
+ struct devlink *shd;
};
struct mlx5_db {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 9cadb1d5e6df..d8f3b7ef319e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -55,6 +55,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
MLX5_FLOW_DESTINATION_TYPE_RANGE,
MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
+ MLX5_FLOW_DESTINATION_TYPE_VHCA_RX,
};
enum {
@@ -190,6 +191,9 @@ struct mlx5_flow_destination {
struct mlx5_flow_table *ft;
struct mlx5_fc *counter;
struct {
+ u16 id;
+ } vhca;
+ struct {
u16 num;
u16 vhca_id;
struct mlx5_pkt_reformat *pkt_reformat;
@@ -248,9 +252,9 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr, u16 vport);
-struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
- struct mlx5_flow_namespace *ns,
- int prio, u32 level);
+struct mlx5_flow_table *
+mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
diff --git a/include/linux/mlx5/lag.h b/include/linux/mlx5/lag.h
new file mode 100644
index 000000000000..ab9f754664e5
--- /dev/null
+++ b/include/linux/mlx5/lag.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LAG_API_H__
+#define __MLX5_LAG_API_H__
+
+#include <linux/types.h>
+
+struct mlx5_core_dev;
+struct mlx5_flow_table;
+struct mlx5_flow_table_attr;
+
+int mlx5_lag_demux_init(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table_attr *ft_attr);
+void mlx5_lag_demux_cleanup(struct mlx5_core_dev *dev);
+int mlx5_lag_demux_rule_add(struct mlx5_core_dev *dev, u16 vport_num,
+ int vport_index);
+void mlx5_lag_demux_rule_del(struct mlx5_core_dev *dev, int vport_index);
+int mlx5_lag_get_dev_seq(struct mlx5_core_dev *dev);
+
+#endif /* __MLX5_LAG_API_H__ */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 775cb0c56865..49f3ad4b1a7c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -469,7 +469,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reformat_and_fwd_to_table[0x1];
- u8 reserved_at_1a[0x2];
+ u8 forward_vhca_rx[0x1];
+ u8 reserved_at_1b[0x1];
u8 ipsec_encrypt[0x1];
u8 ipsec_decrypt[0x1];
u8 sw_owner_v2[0x1];
@@ -1389,6 +1390,26 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
u8 reserved_at_1c0[0x640];
};
+struct mlx5_ifc_tlp_dev_emu_capabilities_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x13];
+ u8 log_tlp_rsp_gw_page_stride[0x5];
+ u8 reserved_at_38[0x8];
+
+ u8 reserved_at_40[0xc0];
+
+ u8 reserved_at_100[0xc];
+ u8 tlp_rsp_gw_num_pages[0x4];
+ u8 reserved_at_110[0x10];
+
+ u8 reserved_at_120[0xa0];
+
+ u8 tlp_rsp_gw_pages_bar_offset[0x40];
+
+ u8 reserved_at_200[0x600];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -1633,6 +1654,11 @@ enum {
MLX5_STEERING_FORMAT_CONNECTX_8 = 3,
};
+enum {
+ MLX5_ID_MODE_FUNCTION_INDEX = 0,
+ MLX5_ID_MODE_FUNCTION_VHCA_ID = 1,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x6];
u8 page_request_disable[0x1];
@@ -1895,7 +1921,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0x7];
+ u8 icm_mng_function_id_mode[0x1];
+ u8 reserved_at_2a1[0x6];
u8 mkey_pcie_tph[0x1];
u8 reserved_at_2a8[0x1];
u8 tis_tir_td_order[0x1];
@@ -1947,7 +1974,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_360[0x3];
u8 log_max_rq[0x5];
- u8 reserved_at_368[0x3];
+ u8 ft_alias_sw_vhca_id[0x1];
+ u8 reserved_at_369[0x2];
u8 log_max_sq[0x5];
u8 reserved_at_370[0x3];
u8 log_max_tir[0x5];
@@ -1961,7 +1989,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_rqt[0x5];
u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_398[0x1];
+ u8 tlp_device_emulation_manager[0x1];
u8 vnic_env_cnt_bar_uar_access[0x1];
u8 vnic_env_cnt_odp_page_fault[0x1];
u8 log_max_tis_per_sq[0x5];
@@ -1992,12 +2020,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
u8 reserved_at_3e8[0x1];
- u8 silent_mode[0x1];
+ u8 silent_mode_set[0x1];
u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
- u8 reserved_at_3f8[0x3];
+ u8 reserved_at_3f8[0x1];
+ u8 silent_mode_query[0x1];
+ u8 reserved_at_3fa[0x1];
u8 log_max_current_uc_list[0x5];
u8 general_obj_types[0x40];
@@ -2173,7 +2203,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 sf_eq_usage[0x1];
u8 reserved_at_d3[0x5];
u8 multiplane[0x1];
- u8 reserved_at_d9[0x7];
+ u8 migration_state[0x1];
+ u8 reserved_at_da[0x6];
u8 cross_vhca_object_to_object_supported[0x20];
@@ -2259,6 +2290,7 @@ enum mlx5_ifc_flow_destination_type {
MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VHCA_RX = 0x4,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
@@ -3830,6 +3862,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_tlp_dev_emu_capabilities_bits tlp_dev_emu_capabilities;
struct mlx5_ifc_macsec_cap_bits macsec_cap;
struct mlx5_ifc_crypto_cap_bits crypto_cap;
struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
@@ -6244,7 +6277,9 @@ struct mlx5_ifc_query_l2_table_entry_out_bits {
u8 reserved_at_40[0xa0];
- u8 reserved_at_e0[0x13];
+ u8 reserved_at_e0[0x11];
+ u8 silent_mode[0x1];
+ u8 reserved_at_f2[0x1];
u8 vlan_valid[0x1];
u8 vlan[0xc];
@@ -6260,7 +6295,10 @@ struct mlx5_ifc_query_l2_table_entry_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x60];
+ u8 reserved_at_40[0x40];
+
+ u8 silent_mode_query[0x1];
+ u8 reserved_at_81[0x1f];
u8 reserved_at_a0[0x8];
u8 table_index[0x18];
@@ -6927,7 +6965,9 @@ struct mlx5_ifc_create_match_definer_out_bits {
struct mlx5_ifc_alias_context_bits {
u8 vhca_id_to_be_accessed[0x10];
- u8 reserved_at_10[0xd];
+ u8 reserved_at_10[0xb];
+ u8 vhca_id_type[0x1];
+ u8 reserved_at_1c[0x1];
u8 status[0x3];
u8 object_id_to_be_accessed[0x20];
u8 reserved_at_40[0x40];
@@ -10824,7 +10864,9 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
u8 fec_200G_per_lane_in_pplm[0x1];
u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
- u8 reserved_at_49[0xa];
+ u8 reserved_at_49[0x2];
+ u8 shp_pbmc_pbsr_support[0x1];
+ u8 reserved_at_4c[0x7];
u8 buffer_ownership[0x1];
u8 resereved_at_54[0x14];
u8 fec_50G_per_lane_in_pplm[0x1];
@@ -12069,8 +12111,9 @@ struct mlx5_ifc_pbmc_reg_bits {
u8 port_buffer_size[0x10];
struct mlx5_ifc_bufferx_reg_bits buffer[10];
+ struct mlx5_ifc_bufferx_reg_bits shared_headroom_pool;
- u8 reserved_at_2e0[0x80];
+ u8 reserved_at_320[0x40];
};
struct mlx5_ifc_sbpr_reg_bits {
@@ -13280,13 +13323,24 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits {
u8 reserved_at_60[0x20];
};
+enum {
+ MLX5_QUERY_VHCA_MIG_STATE_UNINITIALIZED = 0x0,
+ MLX5_QUERY_VHCA_MIG_STATE_OPER_MIGRATION_IDLE = 0x1,
+ MLX5_QUERY_VHCA_MIG_STATE_OPER_MIGRATION_READY = 0x2,
+ MLX5_QUERY_VHCA_MIG_STATE_OPER_MIGRATION_DIRTY = 0x3,
+ MLX5_QUERY_VHCA_MIG_STATE_OPER_MIGRATION_INIT = 0x4,
+};
+
struct mlx5_ifc_query_vhca_migration_state_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+
+ u8 migration_state[0x4];
+ u8 reserved_at_64[0x1c];
u8 required_umem_size[0x20];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index abb4963c1f06..8260e28205e9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -27,7 +27,6 @@
#include <linux/page-flags.h>
#include <linux/page_ref.h>
#include <linux/overflow.h>
-#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
@@ -208,8 +207,6 @@ static inline void __mm_zero_struct_page(struct page *page)
#define MAPCOUNT_ELF_CORE_MARGIN (5)
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-extern int sysctl_max_map_count;
-
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
@@ -349,9 +346,9 @@ enum {
* if KVM does not lock down the memory type.
*/
DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC32)
DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
-#else
+#elif defined(CONFIG_64BIT)
DECLARE_VMA_BIT(DROPPABLE, 40),
#endif
DECLARE_VMA_BIT(UFFD_MINOR, 41),
@@ -466,8 +463,10 @@ enum {
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
defined(CONFIG_RISCV_USER_CFI)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
#else
#define VM_SHADOW_STACK VM_NONE
+#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
#endif
#if defined(CONFIG_PPC64)
#define VM_SAO INIT_VM_FLAG(SAO)
@@ -506,32 +505,41 @@ enum {
#endif
#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
#define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE)
+#define VMA_DROPPABLE mk_vma_flags(VMA_DROPPABLE_BIT)
#else
#define VM_DROPPABLE VM_NONE
+#define VMA_DROPPABLE EMPTY_VMA_FLAGS
#endif
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
-#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+#define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
+ VMA_EXEC_BIT : VMA_READ_BIT)
/* Common data flag combinations */
-#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
- VM_MAYWRITE | VM_MAYEXEC)
-#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
+#define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
+ VMA_MAYEXEC_BIT)
+#define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
+#define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
+ VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
+ VMA_MAYEXEC_BIT)
+
+#ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */
+#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC
#endif
-#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
-#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+#ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */
+#define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
#endif
-#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+#define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \
+ VMA_STACK_BIT, VMA_ACCOUNT_BIT)
+
+/* Temporary until VMA flags conversion complete. */
+#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
#define VM_SEALED_SYSMAP VM_SEALED
@@ -539,15 +547,17 @@ enum {
#define VM_SEALED_SYSMAP VM_NONE
#endif
-#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
-
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+#define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
/*
* Special vmas that are non-mergable, non-mlock()able.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+
+#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
+ VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
+#define VM_SPECIAL vma_flags_to_legacy(VMA_SPECIAL_FLAGS)
/*
* Physically remapped pages are special. Tell the
@@ -574,6 +584,8 @@ enum {
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
+#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
+
/* These flags can be updated atomically via VMA/mmap read lock. */
#define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
@@ -588,27 +600,32 @@ enum {
* possesses it but the other does not, the merged VMA should nonetheless have
* applied to it:
*
- * VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its
- * references cleared via /proc/$pid/clear_refs, any merged VMA
- * should be considered soft-dirty also as it operates at a VMA
- * granularity.
+ * VMA_SOFTDIRTY_BIT - if a VMA is marked soft-dirty, that is has not had its
+ * references cleared via /proc/$pid/clear_refs, any
+ * merged VMA should be considered soft-dirty also as it
+ * operates at a VMA granularity.
*
- * VM_MAYBE_GUARD - If a VMA may have guard regions in place it implies that
- * mapped page tables may contain metadata not described by the
- * VMA and thus any merged VMA may also contain this metadata,
- * and thus we must make this flag sticky.
+ * VMA_MAYBE_GUARD_BIT - If a VMA may have guard regions in place it implies
+ * that mapped page tables may contain metadata not
+ * described by the VMA and thus any merged VMA may also
+ * contain this metadata, and thus we must make this flag
+ * sticky.
*/
-#define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
+#else
+#define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
+#endif
/*
* VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
* of these flags and the other not does not preclude a merge.
*
- * VM_STICKY - When merging VMAs, VMA flags must match, unless they are
- * 'sticky'. If any sticky flags exist in either VMA, we simply
- * set all of them on the merged VMA.
+ * VMA_STICKY_FLAGS - When merging VMAs, VMA flags must match, unless they
+ * are 'sticky'. If any sticky flags exist in either VMA,
+ * we simply set all of them on the merged VMA.
*/
-#define VM_IGNORE_MERGE VM_STICKY
+#define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
/*
* Flags which should result in page tables being copied on fork. These are
@@ -747,15 +764,37 @@ struct vm_fault {
* to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
- void (*open)(struct vm_area_struct * area);
+ /**
+ * @open: Called when a VMA is remapped, split or forked. Not called
+ * upon first mapping a VMA.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ void (*open)(struct vm_area_struct *vma);
/**
* @close: Called when the VMA is being removed from the MM.
* Context: User context. May sleep. Caller holds mmap_lock.
*/
- void (*close)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct *vma);
+ /**
+ * @mapped: Called when the VMA is first mapped in the MM. Not called if
+ * the new VMA is merged with an adjacent VMA.
+ *
+ * The @vm_private_data field is an output field allowing the user to
+ * modify vma->vm_private_data as necessary.
+ *
+ * ONLY valid if set from f_op->mmap_prepare. Will result in an error if
+ * set from f_op->mmap.
+ *
+ * Returns %0 on success, or an error otherwise. On error, the VMA will
+ * be unmapped.
+ *
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ int (*mapped)(unsigned long start, unsigned long end, pgoff_t pgoff,
+ const struct file *file, void **vm_private_data);
/* Called any time before splitting to check if it's allowed */
- int (*may_split)(struct vm_area_struct *area, unsigned long addr);
- int (*mremap)(struct vm_area_struct *area);
+ int (*may_split)(struct vm_area_struct *vma, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *vma);
/*
* Called by mprotect() to make driver-specific permission
* checks before mprotect() is finalised. The VMA must not
@@ -767,7 +806,7 @@ struct vm_operations_struct {
vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
- unsigned long (*pagesize)(struct vm_area_struct * area);
+ unsigned long (*pagesize)(struct vm_area_struct *vma);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -937,22 +976,20 @@ static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_init(vma, flags);
}
-static inline void vm_flags_reset_once(struct vm_area_struct *vma,
- vm_flags_t flags)
+static inline void vma_flags_reset_once(struct vm_area_struct *vma,
+ vma_flags_t *flags)
{
- vma_assert_write_locked(vma);
- /*
- * If VMA flags exist beyond the first system word, also clear these. It
- * is assumed the write once behaviour is required only for the first
- * system word.
- */
+ const unsigned long word = flags->__vma_flags[0];
+
+ /* It is assumed only the first system word must be written once. */
+ vma_flags_overwrite_word_once(&vma->flags, word);
+ /* The remainder can be copied normally. */
if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
- unsigned long *bitmap = vma->flags.__vma_flags;
+ unsigned long *dst = &vma->flags.__vma_flags[1];
+ const unsigned long *src = &flags->__vma_flags[1];
- bitmap_zero(&bitmap[1], NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+ bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
}
-
- vma_flags_overwrite_word_once(&vma->flags, flags);
}
static inline void vm_flags_set(struct vm_area_struct *vma,
@@ -991,7 +1028,8 @@ static inline void vm_flags_mod(struct vm_area_struct *vma,
__vm_flags_mod(vma, set, clear);
}
-static inline bool __vma_atomic_valid_flag(struct vm_area_struct *vma, vma_flag_t bit)
+static __always_inline bool __vma_atomic_valid_flag(struct vm_area_struct *vma,
+ vma_flag_t bit)
{
const vm_flags_t mask = BIT((__force int)bit);
@@ -1006,7 +1044,8 @@ static inline bool __vma_atomic_valid_flag(struct vm_area_struct *vma, vma_flag_
* Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
* valid flags are allowed to do this.
*/
-static inline void vma_set_atomic_flag(struct vm_area_struct *vma, vma_flag_t bit)
+static __always_inline void vma_set_atomic_flag(struct vm_area_struct *vma,
+ vma_flag_t bit)
{
unsigned long *bitmap = vma->flags.__vma_flags;
@@ -1022,7 +1061,8 @@ static inline void vma_set_atomic_flag(struct vm_area_struct *vma, vma_flag_t bi
* This is necessarily racey, so callers must ensure that serialisation is
* achieved through some other means, or that races are permissible.
*/
-static inline bool vma_test_atomic_flag(struct vm_area_struct *vma, vma_flag_t bit)
+static __always_inline bool vma_test_atomic_flag(struct vm_area_struct *vma,
+ vma_flag_t bit)
{
if (__vma_atomic_valid_flag(vma, bit))
return test_bit((__force int)bit, &vma->vm_flags);
@@ -1031,21 +1071,21 @@ static inline bool vma_test_atomic_flag(struct vm_area_struct *vma, vma_flag_t b
}
/* Set an individual VMA flag in flags, non-atomically. */
-static inline void vma_flag_set(vma_flags_t *flags, vma_flag_t bit)
+static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
+ vma_flag_t bit)
{
unsigned long *bitmap = flags->__vma_flags;
__set_bit((__force int)bit, bitmap);
}
-static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits)
+static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
+ size_t count, const vma_flag_t *bits)
{
- vma_flags_t flags;
int i;
- vma_flags_clear_all(&flags);
for (i = 0; i < count; i++)
- vma_flag_set(&flags, bits[i]);
+ vma_flags_set_flag(&flags, bits[i]);
return flags;
}
@@ -1054,16 +1094,73 @@ static inline vma_flags_t __mk_vma_flags(size_t count, const vma_flag_t *bits)
* vma_flags_t bitmap value. E.g.:
*
* vma_flags_t flags = mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,
- * VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT);
+ * VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT);
*
* The compiler cleverly optimises away all of the work and this ends up being
* equivalent to aggregating the values manually.
*/
-#define mk_vma_flags(...) __mk_vma_flags(COUNT_ARGS(__VA_ARGS__), \
- (const vma_flag_t []){__VA_ARGS__})
+#define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS, \
+ COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
+
+/*
+ * Helper macro which acts like mk_vma_flags, only appending to a copy of the
+ * specified flags rather than establishing new flags. E.g.:
+ *
+ * vma_flags_t flags = append_vma_flags(VMA_STACK_DEFAULT_FLAGS, VMA_STACK_BIT,
+ * VMA_ACCOUNT_BIT);
+ */
+#define append_vma_flags(flags, ...) __mk_vma_flags(flags, \
+ COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
+
+/* Calculates the number of set bits in the specified VMA flags. */
+static __always_inline int vma_flags_count(const vma_flags_t *flags)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
+}
+
+/*
+ * Test whether a specific VMA flag is set, e.g.:
+ *
+ * if (vma_flags_test(flags, VMA_READ_BIT)) { ... }
+ */
+static __always_inline bool vma_flags_test(const vma_flags_t *flags,
+ vma_flag_t bit)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return test_bit((__force int)bit, bitmap);
+}
+
+/*
+ * Obtain a set of VMA flags which contain the overlapping flags contained
+ * within flags and to_and.
+ */
+static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
+ vma_flags_t to_and)
+{
+ vma_flags_t dst;
+ unsigned long *bitmap_dst = dst.__vma_flags;
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_to_and = to_and.__vma_flags;
+
+ bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
+ return dst;
+}
+
+/*
+ * Obtain a set of VMA flags which contains the specified overlapping flags,
+ * e.g.:
+ *
+ * vma_flags_t read_flags = vma_flags_and(&flags, VMA_READ_BIT,
+ * VMA_MAY_READ_BIT);
+ */
+#define vma_flags_and(flags, ...) \
+ vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
/* Test each of to_test flags in flags, non-atomically. */
-static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
+static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
vma_flags_t to_test)
{
const unsigned long *bitmap = flags->__vma_flags;
@@ -1075,10 +1172,10 @@ static __always_inline bool vma_flags_test_mask(const vma_flags_t *flags,
/*
* Test whether any specified VMA flag is set, e.g.:
*
- * if (vma_flags_test(flags, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
+ * if (vma_flags_test_any(flags, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
*/
-#define vma_flags_test(flags, ...) \
- vma_flags_test_mask(flags, mk_vma_flags(__VA_ARGS__))
+#define vma_flags_test_any(flags, ...) \
+ vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
/* Test that ALL of the to_test flags are set, non-atomically. */
static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
@@ -1098,8 +1195,29 @@ static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
#define vma_flags_test_all(flags, ...) \
vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
+/*
+ * Helper to test that a flag mask of type vma_flags_t has a SINGLE flag set
+ * (returning false if flagmask has no flags set).
+ *
+ * This is defined to make the semantics clearer when testing an optionally
+ * defined VMA flags mask, e.g.:
+ *
+ * if (vma_flags_test_single_mask(&flags, VMA_DROPPABLE)) { ... }
+ *
+ * When VMA_DROPPABLE is defined if available, or set to EMPTY_VMA_FLAGS
+ * otherwise.
+ */
+static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
+ vma_flags_t flagmask)
+{
+ VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
+
+ return vma_flags_test_any_mask(flags, flagmask);
+}
+
/* Set each of the to_set flags in flags, non-atomically. */
-static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
+static __always_inline void vma_flags_set_mask(vma_flags_t *flags,
+ vma_flags_t to_set)
{
unsigned long *bitmap = flags->__vma_flags;
const unsigned long *bitmap_to_set = to_set.__vma_flags;
@@ -1116,7 +1234,8 @@ static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t t
vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
/* Clear all of the to-clear flags in flags, non-atomically. */
-static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear)
+static __always_inline void vma_flags_clear_mask(vma_flags_t *flags,
+ vma_flags_t to_clear)
{
unsigned long *bitmap = flags->__vma_flags;
const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
@@ -1133,13 +1252,85 @@ static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t
vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
/*
+ * Obtain a VMA flags value containing those flags that are present in flags or
+ * flags_other but not in both.
+ */
+static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
+ const vma_flags_t *flags_other)
+{
+ vma_flags_t dst;
+ const unsigned long *bitmap_other = flags_other->__vma_flags;
+ const unsigned long *bitmap = flags->__vma_flags;
+ unsigned long *bitmap_dst = dst.__vma_flags;
+
+ bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+ return dst;
+}
+
+/* Determine if flags and flags_other have precisely the same flags set. */
+static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
+ const vma_flags_t *flags_other)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_other = flags_other->__vma_flags;
+
+ return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+}
+
+/* Determine if flags and flags_other have precisely the same flags set. */
+static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
+ vma_flags_t flags_other)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+ const unsigned long *bitmap_other = flags_other.__vma_flags;
+
+ return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
+}
+
+/*
+ * Helper macro to determine if only the specific flags are set, e.g.:
+ *
+ * if (vma_flags_same(&flags, VMA_WRITE_BIT) { ... }
+ */
+#define vma_flags_same(flags, ...) \
+ vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
+
+/*
+ * Test whether a specific flag in the VMA is set, e.g.:
+ *
+ * if (vma_test(vma, VMA_READ_BIT)) { ... }
+ */
+static __always_inline bool vma_test(const struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ return vma_flags_test(&vma->flags, bit);
+}
+
+/* Helper to test any VMA flags in a VMA . */
+static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
+ vma_flags_t flags)
+{
+ return vma_flags_test_any_mask(&vma->flags, flags);
+}
+
+/*
+ * Helper macro for testing whether any VMA flags are set in a VMA,
+ * e.g.:
+ *
+ * if (vma_test_any(vma, VMA_IO_BIT, VMA_PFNMAP_BIT,
+ * VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)) { ... }
+ */
+#define vma_test_any(vma, ...) \
+ vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
+
+/*
* Helper to test that ALL specified flags are set in a VMA.
*
* Note: appropriate locks must be held, this function does not acquire them for
* you.
*/
-static inline bool vma_test_all_flags_mask(const struct vm_area_struct *vma,
- vma_flags_t flags)
+static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
+ vma_flags_t flags)
{
return vma_flags_test_all_mask(&vma->flags, flags);
}
@@ -1147,10 +1338,28 @@ static inline bool vma_test_all_flags_mask(const struct vm_area_struct *vma,
/*
* Helper macro for checking that ALL specified flags are set in a VMA, e.g.:
*
- * if (vma_test_all_flags(vma, VMA_READ_BIT, VMA_MAYREAD_BIT) { ... }
+ * if (vma_test_all(vma, VMA_READ_BIT, VMA_MAYREAD_BIT) { ... }
+ */
+#define vma_test_all(vma, ...) \
+ vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
+
+/*
+ * Helper to test that a flag mask of type vma_flags_t has a SINGLE flag set
+ * (returning false if flagmask has no flags set).
+ *
+ * This is useful when a flag needs to be either defined or not depending upon
+ * kernel configuration, e.g.:
+ *
+ * if (vma_test_single_mask(vma, VMA_DROPPABLE)) { ... }
+ *
+ * When VMA_DROPPABLE is defined if available, or set to EMPTY_VMA_FLAGS
+ * otherwise.
*/
-#define vma_test_all_flags(vma, ...) \
- vma_test_all_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
+static __always_inline bool
+vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
+{
+ return vma_flags_test_single_mask(&vma->flags, flagmask);
+}
/*
* Helper to set all VMA flags in a VMA.
@@ -1158,8 +1367,8 @@ static inline bool vma_test_all_flags_mask(const struct vm_area_struct *vma,
* Note: appropriate locks must be held, this function does not acquire them for
* you.
*/
-static inline void vma_set_flags_mask(struct vm_area_struct *vma,
- vma_flags_t flags)
+static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
+ vma_flags_t flags)
{
vma_flags_set_mask(&vma->flags, flags);
}
@@ -1176,26 +1385,69 @@ static inline void vma_set_flags_mask(struct vm_area_struct *vma,
#define vma_set_flags(vma, ...) \
vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
-/* Helper to test all VMA flags in a VMA descriptor. */
-static inline bool vma_desc_test_flags_mask(const struct vm_area_desc *desc,
- vma_flags_t flags)
+/* Helper to clear all VMA flags in a VMA. */
+static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma,
+ vma_flags_t flags)
{
- return vma_flags_test_mask(&desc->vma_flags, flags);
+ vma_flags_clear_mask(&vma->flags, flags);
}
/*
- * Helper macro for testing VMA flags for an input pointer to a struct
- * vm_area_desc object describing a proposed VMA, e.g.:
+ * Helper macro for clearing VMA flags, e.g.:
*
- * if (vma_desc_test_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT,
+ * vma_clear_flags(vma, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT,
+ * VMA_DONTDUMP_BIT);
+ */
+#define vma_clear_flags(vma, ...) \
+ vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
+
+/*
+ * Test whether a specific VMA flag is set in a VMA descriptor, e.g.:
+ *
+ * if (vma_desc_test(desc, VMA_READ_BIT)) { ... }
+ */
+static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
+ vma_flag_t bit)
+{
+ return vma_flags_test(&desc->vma_flags, bit);
+}
+
+/* Helper to test any VMA flags in a VMA descriptor. */
+static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
+ vma_flags_t flags)
+{
+ return vma_flags_test_any_mask(&desc->vma_flags, flags);
+}
+
+/*
+ * Helper macro for testing whether any VMA flags are set in a VMA descriptor,
+ * e.g.:
+ *
+ * if (vma_desc_test_any(desc, VMA_IO_BIT, VMA_PFNMAP_BIT,
* VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)) { ... }
*/
-#define vma_desc_test_flags(desc, ...) \
- vma_desc_test_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
+#define vma_desc_test_any(desc, ...) \
+ vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
+
+/* Helper to test all VMA flags in a VMA descriptor. */
+static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
+ vma_flags_t flags)
+{
+ return vma_flags_test_all_mask(&desc->vma_flags, flags);
+}
+
+/*
+ * Helper macro for testing whether ALL VMA flags are set in a VMA descriptor,
+ * e.g.:
+ *
+ * if (vma_desc_test_all(desc, VMA_READ_BIT, VMA_MAYREAD_BIT)) { ... }
+ */
+#define vma_desc_test_all(desc, ...) \
+ vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
/* Helper to set all VMA flags in a VMA descriptor. */
-static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
+ vma_flags_t flags)
{
vma_flags_set_mask(&desc->vma_flags, flags);
}
@@ -1211,8 +1463,8 @@ static inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
/* Helper to clear all VMA flags in a VMA descriptor. */
-static inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
- vma_flags_t flags)
+static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
+ vma_flags_t flags)
{
vma_flags_clear_mask(&desc->vma_flags, flags);
}
@@ -1292,12 +1544,6 @@ static inline bool vma_is_accessible(const struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
-static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
-{
- return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
- (VM_SHARED | VM_MAYWRITE);
-}
-
static inline bool is_shared_maywrite(const vma_flags_t *flags)
{
return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
@@ -1308,6 +1554,28 @@ static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
return is_shared_maywrite(&vma->flags);
}
+/**
+ * vma_kernel_pagesize - Default page size granularity for this VMA.
+ * @vma: The user mapping.
+ *
+ * The kernel page size specifies in which granularity VMA modifications
+ * can be performed. Folios in this VMA will be aligned to, and at least
+ * the size of the number of bytes returned by this function.
+ *
+ * The default kernel page size is not affected by Transparent Huge Pages
+ * being in effect.
+ *
+ * Return: The default page size granularity for this VMA.
+ */
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+ if (unlikely(vma->vm_ops && vma->vm_ops->pagesize))
+ return vma->vm_ops->pagesize(vma);
+ return PAGE_SIZE;
+}
+
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
+
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
@@ -1507,7 +1775,7 @@ static inline int folio_put_testzero(struct folio *folio)
*/
static inline bool get_page_unless_zero(struct page *page)
{
- return page_ref_add_unless(page, 1, 0);
+ return page_ref_add_unless_zero(page, 1);
}
static inline struct folio *folio_get_nontail_page(struct page *page)
@@ -1957,7 +2225,7 @@ static inline bool is_nommu_shared_mapping(vm_flags_t flags)
static inline bool is_nommu_shared_vma_flags(const vma_flags_t *flags)
{
- return vma_flags_test(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
+ return vma_flags_test_any(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
}
#endif
@@ -2479,36 +2747,6 @@ static inline unsigned long folio_nr_pages(const struct folio *folio)
return folio_large_nr_pages(folio);
}
-#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
-/*
- * We don't expect any folios that exceed buddy sizes (and consequently
- * memory sections).
- */
-#define MAX_FOLIO_ORDER MAX_PAGE_ORDER
-#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
-/*
- * Only pages within a single memory section are guaranteed to be
- * contiguous. By limiting folios to a single memory section, all folio
- * pages are guaranteed to be contiguous.
- */
-#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
-#elif defined(CONFIG_HUGETLB_PAGE)
-/*
- * There is no real limit on the folio size. We limit them to the maximum we
- * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
- * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
- */
-#define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)
-#else
-/*
- * Without hugetlb, gigantic folios that are bigger than a single PUD are
- * currently impossible.
- */
-#define MAX_FOLIO_ORDER PUD_ORDER
-#endif
-
-#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER)
-
/*
* compound_nr() returns the number of pages in this potentially compound
* page. compound_nr() can be called on a tail page, and is defined to
@@ -2667,7 +2905,7 @@ static inline bool folio_maybe_mapped_shared(struct folio *folio)
* The caller must add any reference (e.g., from folio_try_get()) it might be
* holding itself to the result.
*
- * Returns the expected folio refcount.
+ * Returns: the expected folio refcount.
*/
static inline int folio_expected_ref_count(const struct folio *folio)
{
@@ -2798,8 +3036,9 @@ extern void pagefault_out_of_memory(void);
*/
struct zap_details {
struct folio *single_folio; /* Locked folio to be unmapped */
- bool even_cows; /* Zap COWed private pages too? */
+ bool skip_cows; /* Do not zap COWed private pages */
bool reclaim_pt; /* Need reclaim page tables? */
+ bool reaping; /* Reaping, do not block. */
zap_flags_t zap_flags; /* Extra flags for zapping */
};
@@ -2832,14 +3071,17 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t pud);
-void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
- unsigned long size, struct zap_details *details);
-static inline void zap_vma_pages(struct vm_area_struct *vma)
+void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+/**
+ * zap_vma - zap all page table entries in a vma
+ * @vma: The vma to zap.
+ */
+static inline void zap_vma(struct vm_area_struct *vma)
{
- zap_page_range_single(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, NULL);
+ zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
struct mmu_notifier_range;
@@ -3847,7 +4089,6 @@ extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
-extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
@@ -3898,11 +4139,13 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
/* This takes the mm semaphore itself */
-extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
-extern int vm_munmap(unsigned long, size_t);
-extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
- unsigned long, unsigned long,
- unsigned long, unsigned long);
+int __must_check vm_brk_flags(unsigned long addr, unsigned long request, bool is_exec);
+int vm_munmap(unsigned long start, size_t len);
+unsigned long __must_check vm_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset);
+unsigned long __must_check vm_mmap_shadow_stack(unsigned long addr,
+ unsigned long len, unsigned long flags);
struct vm_unmapped_area_info {
#define VM_UNMAPPED_AREA_TOPDOWN 1
@@ -3999,6 +4242,11 @@ static inline unsigned long vma_pages(const struct vm_area_struct *vma)
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+static inline unsigned long vma_last_pgoff(struct vm_area_struct *vma)
+{
+ return vma->vm_pgoff + vma_pages(vma) - 1;
+}
+
static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
{
return desc->end - desc->start;
@@ -4073,15 +4321,75 @@ static inline void mmap_action_ioremap(struct vm_area_desc *desc,
* @start_pfn: The first PFN in the range to remap.
*/
static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
- unsigned long start_pfn)
+ unsigned long start_pfn)
{
mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
}
-void mmap_action_prepare(struct mmap_action *action,
- struct vm_area_desc *desc);
-int mmap_action_complete(struct mmap_action *action,
- struct vm_area_struct *vma);
+/**
+ * mmap_action_simple_ioremap - helper for mmap_prepare hook to specify that the
+ * physical range in [start_phys_addr, start_phys_addr + size) should be I/O
+ * remapped.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start_phys_addr: Start of the physical memory to be mapped.
+ * @size: Size of the area to map.
+ *
+ * NOTE: Some drivers might want to tweak desc->page_prot for purposes of
+ * write-combine or similar.
+ */
+static inline void mmap_action_simple_ioremap(struct vm_area_desc *desc,
+ phys_addr_t start_phys_addr,
+ unsigned long size)
+{
+ struct mmap_action *action = &desc->action;
+
+ action->simple_ioremap.start_phys_addr = start_phys_addr;
+ action->simple_ioremap.size = size;
+ action->type = MMAP_SIMPLE_IO_REMAP;
+}
+
+/**
+ * mmap_action_map_kernel_pages - helper for mmap_prepare hook to specify that
+ * @num kernel pages contained in the @pages array should be mapped to userland
+ * starting at virtual address @start.
+ * @desc: The VMA descriptor for the VMA requiring kernel pags to be mapped.
+ * @start: The virtual address from which to map them.
+ * @pages: An array of struct page pointers describing the memory to map.
+ * @nr_pages: The number of entries in the @pages aray.
+ */
+static inline void mmap_action_map_kernel_pages(struct vm_area_desc *desc,
+ unsigned long start, struct page **pages,
+ unsigned long nr_pages)
+{
+ struct mmap_action *action = &desc->action;
+
+ action->type = MMAP_MAP_KERNEL_PAGES;
+ action->map_kernel.start = start;
+ action->map_kernel.pages = pages;
+ action->map_kernel.nr_pages = nr_pages;
+ action->map_kernel.pgoff = desc->pgoff;
+}
+
+/**
+ * mmap_action_map_kernel_pages_full - helper for mmap_prepare hook to specify that
+ * kernel pages contained in the @pages array should be mapped to userland
+ * from @desc->start to @desc->end.
+ * @desc: The VMA descriptor for the VMA requiring kernel pags to be mapped.
+ * @pages: An array of struct page pointers describing the memory to map.
+ *
+ * The caller must ensure that @pages contains sufficient entries to cover the
+ * entire range described by @desc.
+ */
+static inline void mmap_action_map_kernel_pages_full(struct vm_area_desc *desc,
+ struct page **pages)
+{
+ mmap_action_map_kernel_pages(desc, desc->start, pages,
+ vma_desc_pages(desc));
+}
+
+int mmap_action_prepare(struct vm_area_desc *desc);
+int mmap_action_complete(struct vm_area_struct *vma,
+ struct mmap_action *action);
/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
@@ -4095,20 +4403,81 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
+/**
+ * range_is_subset - Is the specified inner range a subset of the outer range?
+ * @outer_start: The start of the outer range.
+ * @outer_end: The exclusive end of the outer range.
+ * @inner_start: The start of the inner range.
+ * @inner_end: The exclusive end of the inner range.
+ *
+ * Returns: %true if [inner_start, inner_end) is a subset of [outer_start,
+ * outer_end), otherwise %false.
+ */
+static inline bool range_is_subset(unsigned long outer_start,
+ unsigned long outer_end,
+ unsigned long inner_start,
+ unsigned long inner_end)
+{
+ return outer_start <= inner_start && inner_end <= outer_end;
+}
+
+/**
+ * range_in_vma - is the specified [@start, @end) range a subset of the VMA?
+ * @vma: The VMA against which we want to check [@start, @end).
+ * @start: The start of the range we wish to check.
+ * @end: The exclusive end of the range we wish to check.
+ *
+ * Returns: %true if [@start, @end) is a subset of [@vma->vm_start,
+ * @vma->vm_end), %false otherwise.
+ */
static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- return (vma && vma->vm_start <= start && end <= vma->vm_end);
+ if (!vma)
+ return false;
+
+ return range_is_subset(vma->vm_start, vma->vm_end, start, end);
+}
+
+/**
+ * range_in_vma_desc - is the specified [@start, @end) range a subset of the VMA
+ * described by @desc, a VMA descriptor?
+ * @desc: The VMA descriptor against which we want to check [@start, @end).
+ * @start: The start of the range we wish to check.
+ * @end: The exclusive end of the range we wish to check.
+ *
+ * Returns: %true if [@start, @end) is a subset of [@desc->start, @desc->end),
+ * %false otherwise.
+ */
+static inline bool range_in_vma_desc(const struct vm_area_desc *desc,
+ unsigned long start, unsigned long end)
+{
+ if (!desc)
+ return false;
+
+ return range_is_subset(desc->start, desc->end, start, end);
}
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
+
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+ const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
+
+ return vm_get_page_prot(vm_flags);
+}
+
void vma_set_page_prot(struct vm_area_struct *vma);
#else
static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(0);
}
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+ return __pgprot(0);
+}
static inline void vma_set_page_prot(struct vm_area_struct *vma)
{
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
@@ -4130,6 +4499,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
+int map_kernel_pages_prepare(struct vm_area_desc *desc);
+int map_kernel_pages_complete(struct vm_area_struct *vma,
+ struct mmap_action *action);
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
@@ -4508,10 +4880,9 @@ int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
-int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
+int vmemmap_populate_hvo(unsigned long start, unsigned long end,
+ unsigned int order, struct zone *zone,
unsigned long headsize);
-int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
- unsigned long headsize);
void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
unsigned long headsize);
void vmemmap_populate_print_last(void);
@@ -4697,22 +5068,6 @@ long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault);
-/**
- * vma_is_special_huge - Are transhuge page-table entries considered special?
- * @vma: Pointer to the struct vm_area_struct to consider
- *
- * Whether transhuge page-table entries are considered "special" following
- * the definition in vm_normal_page().
- *
- * Return: true if transhuge page-table entries should be considered special,
- * false otherwise.
- */
-static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
-{
- return vma_is_dax(vma) || (vma->vm_file &&
- (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
-}
-
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if MAX_NUMNODES > 1
@@ -4817,10 +5172,9 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
* DMA mapping IDs for page_pool
*
* When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
- * stashes it in the upper bits of page->pp_magic. We always want to be able to
- * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
- * pages can have arbitrary kernel pointers stored in the same field as pp_magic
- * (since it overlaps with page->lru.next), so we must ensure that we cannot
+ * stashes it in the upper bits of page->pp_magic. Non-PP pages can have
+ * arbitrary kernel pointers stored in the same field as pp_magic (since
+ * it overlaps with page->lru.next), so we must ensure that we cannot
* mistake a valid kernel pointer with any of the values we write into this
* field.
*
@@ -4855,26 +5209,6 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
PP_DMA_INDEX_SHIFT)
-/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
- * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
- * the head page of compound page and bit 1 for pfmemalloc page, as well as the
- * bits used for the DMA index. page_is_pfmemalloc() is checked in
- * __page_pool_put_page() to avoid recycling the pfmemalloc page.
- */
-#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
-
-#ifdef CONFIG_PAGE_POOL
-static inline bool page_pool_page_is_pp(const struct page *page)
-{
- return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
-}
-#else
-static inline bool page_pool_page_is_pp(const struct page *page)
-{
- return false;
-}
-#endif
-
#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)
@@ -4894,4 +5228,8 @@ static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
void snapshot_page(struct page_snapshot *ps, const struct page *page);
+void map_anon_folio_pte_nopf(struct folio *folio, pte_t *pte,
+ struct vm_area_struct *vma, unsigned long addr,
+ bool uffd_wp);
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index fa2d6ba811b5..7fc2ced00f8f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -30,11 +30,6 @@ static inline int folio_is_file_lru(const struct folio *folio)
return !folio_test_swapbacked(folio);
}
-static inline int page_is_file_lru(struct page *page)
-{
- return folio_is_file_lru(page_folio(page));
-}
-
static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
long nr_pages)
@@ -102,6 +97,12 @@ static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
#ifdef CONFIG_LRU_GEN
+static inline bool lru_gen_switching(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_switch);
+
+ return static_branch_unlikely(&lru_switch);
+}
#ifdef CONFIG_LRU_GEN_ENABLED
static inline bool lru_gen_enabled(void)
{
@@ -316,6 +317,11 @@ static inline bool lru_gen_enabled(void)
return false;
}
+static inline bool lru_gen_switching(void)
+{
+ return false;
+}
+
static inline bool lru_gen_in_fault(void)
{
return false;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3cc8ae722886..a308e2c23b82 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -126,14 +126,14 @@ struct page {
atomic_long_t pp_ref_count;
};
struct { /* Tail pages of compound page */
- unsigned long compound_head; /* Bit zero is set */
+ unsigned long compound_info; /* Bit zero is set */
};
struct { /* ZONE_DEVICE pages */
/*
- * The first word is used for compound_head or folio
+ * The first word is used for compound_info or folio
* pgmap
*/
- void *_unused_pgmap_compound_head;
+ void *_unused_pgmap_compound_info;
void *zone_device_data;
/*
* ZONE_DEVICE private pages are counted as being
@@ -409,7 +409,7 @@ struct folio {
/* private: avoid cluttering the output */
/* For the Unevictable "LRU list" slot */
struct {
- /* Avoid compound_head */
+ /* Avoid compound_info */
void *__filler;
/* public: */
unsigned int mlock_count;
@@ -510,7 +510,7 @@ struct folio {
FOLIO_MATCH(flags, flags);
FOLIO_MATCH(lru, lru);
FOLIO_MATCH(mapping, mapping);
-FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(compound_info, lru);
FOLIO_MATCH(__folio_index, index);
FOLIO_MATCH(private, private);
FOLIO_MATCH(_mapcount, _mapcount);
@@ -529,7 +529,7 @@ FOLIO_MATCH(_last_cpupid, _last_cpupid);
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + sizeof(struct page))
FOLIO_MATCH(flags, _flags_1);
-FOLIO_MATCH(compound_head, _head_1);
+FOLIO_MATCH(compound_info, _head_1);
FOLIO_MATCH(_mapcount, _mapcount_1);
FOLIO_MATCH(_refcount, _refcount_1);
#undef FOLIO_MATCH
@@ -537,13 +537,13 @@ FOLIO_MATCH(_refcount, _refcount_1);
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
-FOLIO_MATCH(compound_head, _head_2);
+FOLIO_MATCH(compound_info, _head_2);
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 3 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_3);
-FOLIO_MATCH(compound_head, _head_3);
+FOLIO_MATCH(compound_info, _head_3);
#undef FOLIO_MATCH
/**
@@ -609,8 +609,8 @@ struct ptdesc {
#define TABLE_MATCH(pg, pt) \
static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
TABLE_MATCH(flags, pt_flags);
-TABLE_MATCH(compound_head, pt_list);
-TABLE_MATCH(compound_head, _pt_pad_1);
+TABLE_MATCH(compound_info, pt_list);
+TABLE_MATCH(compound_info, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
TABLE_MATCH(__folio_index, pt_index);
TABLE_MATCH(rcu_head, pt_rcu_head);
@@ -814,6 +814,8 @@ enum mmap_action_type {
MMAP_NOTHING, /* Mapping is complete, no further action. */
MMAP_REMAP_PFN, /* Remap PFN range. */
MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
+ MMAP_SIMPLE_IO_REMAP, /* I/O remap with guardrails. */
+ MMAP_MAP_KERNEL_PAGES, /* Map kernel page range from array. */
};
/*
@@ -822,13 +824,22 @@ enum mmap_action_type {
*/
struct mmap_action {
union {
- /* Remap range. */
struct {
unsigned long start;
unsigned long start_pfn;
unsigned long size;
pgprot_t pgprot;
} remap;
+ struct {
+ phys_addr_t start_phys_addr;
+ unsigned long size;
+ } simple_ioremap;
+ struct {
+ unsigned long start;
+ struct page **pages;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
+ } map_kernel;
};
enum mmap_action_type type;
@@ -870,6 +881,14 @@ typedef struct {
#define EMPTY_VMA_FLAGS ((vma_flags_t){ })
+/* Are no flags set in the specified VMA flags? */
+static __always_inline bool vma_flags_empty(const vma_flags_t *flags)
+{
+ const unsigned long *bitmap = flags->__vma_flags;
+
+ return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS);
+}
+
/*
* Describes a VMA that is about to be mmap()'ed. Drivers may choose to
* manipulate mutable fields which will cause those fields to be updated in the
@@ -879,8 +898,8 @@ typedef struct {
*/
struct vm_area_desc {
/* Immutable state. */
- const struct mm_struct *const mm;
- struct file *const file; /* May vary from vm_file in stacked callers. */
+ struct mm_struct *mm;
+ struct file *file; /* May vary from vm_file in stacked callers. */
unsigned long start;
unsigned long end;
@@ -1056,18 +1075,31 @@ struct vm_area_struct {
} __randomize_layout;
/* Clears all bits in the VMA flags bitmap, non-atomically. */
-static inline void vma_flags_clear_all(vma_flags_t *flags)
+static __always_inline void vma_flags_clear_all(vma_flags_t *flags)
{
bitmap_zero(flags->__vma_flags, NUM_VMA_FLAG_BITS);
}
/*
+ * Helper function which converts a vma_flags_t value to a legacy vm_flags_t
+ * value. This is only valid if the input flags value can be expressed in a
+ * system word.
+ *
+ * Will be removed once the conversion to VMA flags is complete.
+ */
+static __always_inline vm_flags_t vma_flags_to_legacy(vma_flags_t flags)
+{
+ return (vm_flags_t)flags.__vma_flags[0];
+}
+
+/*
* Copy value to the first system word of VMA flags, non-atomically.
*
* IMPORTANT: This does not overwrite bytes past the first system word. The
* caller must account for this.
*/
-static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_overwrite_word(vma_flags_t *flags,
+ unsigned long value)
{
unsigned long *bitmap = flags->__vma_flags;
@@ -1075,12 +1107,27 @@ static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long va
}
/*
+ * Helper function which converts a legacy vm_flags_t value to a vma_flags_t
+ * value.
+ *
+ * Will be removed once the conversion to VMA flags is complete.
+ */
+static __always_inline vma_flags_t legacy_to_vma_flags(vm_flags_t flags)
+{
+ vma_flags_t ret = EMPTY_VMA_FLAGS;
+
+ vma_flags_overwrite_word(&ret, flags);
+ return ret;
+}
+
+/*
* Copy value to the first system word of VMA flags ONCE, non-atomically.
*
* IMPORTANT: This does not overwrite bytes past the first system word. The
* caller must account for this.
*/
-static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_overwrite_word_once(vma_flags_t *flags,
+ unsigned long value)
{
unsigned long *bitmap = flags->__vma_flags;
@@ -1088,7 +1135,8 @@ static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned lo
}
/* Update the first system word of VMA flags setting bits, non-atomically. */
-static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_set_word(vma_flags_t *flags,
+ unsigned long value)
{
unsigned long *bitmap = flags->__vma_flags;
@@ -1096,7 +1144,8 @@ static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
}
/* Update the first system word of VMA flags clearing bits, non-atomically. */
-static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
+static __always_inline void vma_flags_clear_word(vma_flags_t *flags,
+ unsigned long value)
{
unsigned long *bitmap = flags->__vma_flags;
@@ -1241,7 +1290,11 @@ struct mm_struct {
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- vm_flags_t def_flags;
+ union {
+ /* Temporary while VMA flags are being converted. */
+ vm_flags_t def_flags;
+ vma_flags_t def_vma_flags;
+ };
/**
* @write_protect_seq: Locked when any thread is write
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 0ba8a7e8b90a..389521594c69 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -170,53 +170,4 @@ static inline bool arch_memory_deny_write_exec_supported(void)
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
#endif
-
-/*
- * Denies creating a writable executable mapping or gaining executable permissions.
- *
- * This denies the following:
- *
- * a) mmap(PROT_WRITE | PROT_EXEC)
- *
- * b) mmap(PROT_WRITE)
- * mprotect(PROT_EXEC)
- *
- * c) mmap(PROT_WRITE)
- * mprotect(PROT_READ)
- * mprotect(PROT_EXEC)
- *
- * But allows the following:
- *
- * d) mmap(PROT_READ | PROT_EXEC)
- * mmap(PROT_READ | PROT_EXEC | PROT_BTI)
- *
- * This is only applicable if the user has set the Memory-Deny-Write-Execute
- * (MDWE) protection mask for the current process.
- *
- * @old specifies the VMA flags the VMA originally possessed, and @new the ones
- * we propose to set.
- *
- * Return: false if proposed change is OK, true if not ok and should be denied.
- */
-static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
-{
- /* If MDWE is disabled, we have nothing to deny. */
- if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
- return false;
-
- /* If the new VMA is not executable, we have nothing to deny. */
- if (!(new & VM_EXEC))
- return false;
-
- /* Under MDWE we do not accept newly writably executable VMAs... */
- if (new & VM_WRITE)
- return true;
-
- /* ...nor previously non-executable VMAs becoming executable. */
- if (!(old & VM_EXEC))
- return true;
-
- return false;
-}
-
#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 93eca48bc443..04b8f61ece5d 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -546,7 +546,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
__mmap_lock_trace_acquire_returned(mm, true, true);
}
-static inline int mmap_write_lock_killable(struct mm_struct *mm)
+static inline int __must_check mmap_write_lock_killable(struct mm_struct *mm)
{
int ret;
@@ -593,7 +593,7 @@ static inline void mmap_read_lock(struct mm_struct *mm)
__mmap_lock_trace_acquire_returned(mm, false, true);
}
-static inline int mmap_read_lock_killable(struct mm_struct *mm)
+static inline int __must_check mmap_read_lock_killable(struct mm_struct *mm)
{
int ret;
@@ -603,7 +603,7 @@ static inline int mmap_read_lock_killable(struct mm_struct *mm)
return ret;
}
-static inline bool mmap_read_trylock(struct mm_struct *mm)
+static inline bool __must_check mmap_read_trylock(struct mm_struct *mm)
{
bool ret;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index e9e964c20e53..9dc4750296af 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -329,6 +329,8 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
#define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
+#define MMC_QUIRK_BROKEN_MDT (1<<19) /* Wrong manufacturing year */
+#define MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME (1<<20) /* Secure erase/trim time is fixed regardless of size */
bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 673cbdf43453..0685dd717e85 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -111,11 +111,15 @@
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_DEVICE_ID_MEDIATEK_MT7902 0x790a
#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
+#define SDIO_VENDOR_ID_NXP 0x0471
+#define SDIO_DEVICE_ID_NXP_IW61X 0x0205
+
#define SDIO_VENDOR_ID_REALTEK 0x024c
#define SDIO_DEVICE_ID_REALTEK_RTW8723BS 0xb723
#define SDIO_DEVICE_ID_REALTEK_RTW8821BS 0xb821
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 8450e18a87c2..69c304b467df 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -97,20 +97,20 @@ struct mmu_notifier_ops {
* Start-end is necessary in case the secondary MMU is mapping the page
* at a smaller granularity than the primary MMU.
*/
- int (*clear_flush_young)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ bool (*clear_flush_young)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* clear_young is a lightweight version of clear_flush_young. Like the
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
*/
- int (*clear_young)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ bool (*clear_young)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* test_young is called to check the young/accessed bitflag in
@@ -118,9 +118,9 @@ struct mmu_notifier_ops {
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
*/
- int (*test_young)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long address);
+ bool (*test_young)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long address);
/*
* invalidate_range_start() and invalidate_range_end() must be
@@ -234,15 +234,57 @@ struct mmu_notifier {
};
/**
+ * struct mmu_interval_notifier_finish - mmu_interval_notifier two-pass abstraction
+ * @link: Lockless list link for the notifiers pending pass list
+ * @notifier: The mmu_interval_notifier for which the finish pass is called.
+ *
+ * Allocate, typically using GFP_NOWAIT in the interval notifier's start pass.
+ * Note that with a large number of notifiers implementing two passes,
+ * allocation with GFP_NOWAIT will become increasingly likely to fail, so consider
+ * implementing a small pool instead of using kmalloc() allocations.
+ *
+ * If the implementation needs to pass data between the start and the finish passes,
+ * the recommended way is to embed struct mmu_interval_notifier_finish into a larger
+ * structure that also contains the data needed to be shared. Keep in mind that
+ * a notifier callback can be invoked in parallel, and each invocation needs its
+ * own struct mmu_interval_notifier_finish.
+ *
+ * If allocation fails, then the &mmu_interval_notifier_ops->invalidate_start op
+ * needs to implements the full notifier functionality. Please refer to its
+ * documentation.
+ */
+struct mmu_interval_notifier_finish {
+ struct llist_node link;
+ struct mmu_interval_notifier *notifier;
+};
+
+/**
* struct mmu_interval_notifier_ops - callback for range notification
* @invalidate: Upon return the caller must stop using any SPTEs within this
* range. This function can sleep. Return false only if sleeping
* was required but mmu_notifier_range_blockable(range) is false.
+ * @invalidate_start: Similar to @invalidate, but intended for two-pass notifier
+ * callbacks where the call to @invalidate_start is the first
+ * pass and any struct mmu_interval_notifier_finish pointer
+ * returned in the @finish parameter describes the finish pass.
+ * If *@finish is %NULL on return, then no final pass will be
+ * called, and @invalidate_start needs to implement the full
+ * notifier, behaving like @invalidate. The value of *@finish
+ * is guaranteed to be %NULL at function entry.
+ * @invalidate_finish: Called as the second pass for any notifier that returned
+ * a non-NULL *@finish from @invalidate_start. The @finish
+ * pointer passed here is the same one returned by
+ * @invalidate_start.
*/
struct mmu_interval_notifier_ops {
bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
+ bool (*invalidate_start)(struct mmu_interval_notifier *interval_sub,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq,
+ struct mmu_interval_notifier_finish **finish);
+ void (*invalidate_finish)(struct mmu_interval_notifier_finish *finish);
};
struct mmu_interval_notifier {
@@ -376,14 +418,12 @@ mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
-extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
-extern int __mmu_notifier_clear_young(struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
-extern int __mmu_notifier_test_young(struct mm_struct *mm,
- unsigned long address);
+bool __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+bool __mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+bool __mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
@@ -403,30 +443,28 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
__mmu_notifier_release(mm);
}
-static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static inline bool mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_clear_flush_young(mm, start, end);
- return 0;
+ return false;
}
-static inline int mmu_notifier_clear_young(struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static inline bool mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_clear_young(mm, start, end);
- return 0;
+ return false;
}
-static inline int mmu_notifier_test_young(struct mm_struct *mm,
- unsigned long address)
+static inline bool mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_test_young(mm, address);
- return 0;
+ return false;
}
static inline void
@@ -516,55 +554,6 @@ static inline void mmu_notifier_range_init_owner(
range->owner = owner;
}
-#define clear_flush_young_ptes_notify(__vma, __address, __ptep, __nr) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- unsigned int ___nr = __nr; \
- __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \
- __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
- ___address, \
- ___address + \
- ___nr * PAGE_SIZE); \
- __young; \
-})
-
-#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
- __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
- ___address, \
- ___address + \
- PMD_SIZE); \
- __young; \
-})
-
-#define ptep_clear_young_notify(__vma, __address, __ptep) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
- __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
- ___address + PAGE_SIZE); \
- __young; \
-})
-
-#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
-({ \
- int __young; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
- __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
- __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
- ___address + PMD_SIZE); \
- __young; \
-})
-
#else /* CONFIG_MMU_NOTIFIER */
struct mmu_notifier_range {
@@ -601,24 +590,22 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
-static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static inline bool mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
- return 0;
+ return false;
}
-static inline int mmu_notifier_clear_young(struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static inline bool mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
- return 0;
+ return false;
}
-static inline int mmu_notifier_test_young(struct mm_struct *mm,
- unsigned long address)
+static inline bool mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address)
{
- return 0;
+ return false;
}
static inline void
@@ -652,11 +639,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define mmu_notifier_range_update_to_read_only(r) false
-#define clear_flush_young_ptes_notify clear_flush_young_ptes
-#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
-#define ptep_clear_young_notify ptep_test_and_clear_young
-#define pmdp_clear_young_notify pmdp_test_and_clear_young
-
static inline void mmu_notifier_synchronize(void)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3e51190a55e4..3bcdda226a91 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -23,6 +23,7 @@
#include <linux/page-flags.h>
#include <linux/local_lock.h>
#include <linux/zswap.h>
+#include <linux/sizes.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
@@ -61,6 +62,59 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
+#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
+/*
+ * We don't expect any folios that exceed buddy sizes (and consequently
+ * memory sections).
+ */
+#define MAX_FOLIO_ORDER MAX_PAGE_ORDER
+#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+/*
+ * Only pages within a single memory section are guaranteed to be
+ * contiguous. By limiting folios to a single memory section, all folio
+ * pages are guaranteed to be contiguous.
+ */
+#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
+#elif defined(CONFIG_HUGETLB_PAGE)
+/*
+ * There is no real limit on the folio size. We limit them to the maximum we
+ * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
+ * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
+ */
+#ifdef CONFIG_64BIT
+#define MAX_FOLIO_ORDER (ilog2(SZ_16G) - PAGE_SHIFT)
+#else
+#define MAX_FOLIO_ORDER (ilog2(SZ_1G) - PAGE_SHIFT)
+#endif
+#else
+/*
+ * Without hugetlb, gigantic folios that are bigger than a single PUD are
+ * currently impossible.
+ */
+#define MAX_FOLIO_ORDER (PUD_SHIFT - PAGE_SHIFT)
+#endif
+
+#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER)
+
+/*
+ * HugeTLB Vmemmap Optimization (HVO) requires struct pages of the head page to
+ * be naturally aligned with regard to the folio size.
+ *
+ * HVO which is only active if the size of struct page is a power of 2.
+ */
+#define MAX_FOLIO_VMEMMAP_ALIGN \
+ (IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) && \
+ is_power_of_2(sizeof(struct page)) ? \
+ MAX_FOLIO_NR_PAGES * sizeof(struct page) : 0)
+
+/*
+ * vmemmap optimization (like HVO) is only possible for page orders that fill
+ * two or more pages with struct pages.
+ */
+#define VMEMMAP_TAIL_MIN_ORDER (ilog2(2 * PAGE_SIZE / sizeof(struct page)))
+#define __NR_VMEMMAP_TAILS (MAX_FOLIO_ORDER - VMEMMAP_TAIL_MIN_ORDER + 1)
+#define NR_VMEMMAP_TAILS (__NR_VMEMMAP_TAILS > 0 ? __NR_VMEMMAP_TAILS : 0)
+
enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
@@ -220,6 +274,7 @@ enum node_stat_item {
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
+ NR_VMALLOC,
NR_KERNEL_STACK_KB, /* measured in KiB */
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
NR_KERNEL_SCS_KB, /* measured in KiB */
@@ -255,11 +310,26 @@ enum node_stat_item {
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
PGDEMOTE_PROACTIVE,
+ PGSTEAL_KSWAPD,
+ PGSTEAL_DIRECT,
+ PGSTEAL_KHUGEPAGED,
+ PGSTEAL_PROACTIVE,
+ PGSTEAL_ANON,
+ PGSTEAL_FILE,
+ PGSCAN_KSWAPD,
+ PGSCAN_DIRECT,
+ PGSCAN_KHUGEPAGED,
+ PGSCAN_PROACTIVE,
+ PGSCAN_ANON,
+ PGSCAN_FILE,
+ PGREFILL,
#ifdef CONFIG_HUGETLB_PAGE
NR_HUGETLB,
#endif
NR_BALLOON_PAGES,
NR_KERNEL_FILE_PAGES,
+ NR_GPU_ACTIVE, /* Pages assigned to GPU objects */
+ NR_GPU_RECLAIM, /* Pages in shrinkable GPU pools */
NR_VM_NODE_STAT_ITEMS
};
@@ -616,7 +686,7 @@ struct lru_gen_memcg {
void lru_gen_init_pgdat(struct pglist_data *pgdat);
void lru_gen_init_lruvec(struct lruvec *lruvec);
-bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw, unsigned int nr);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -635,7 +705,8 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
-static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw,
+ unsigned int nr)
{
return false;
}
@@ -1057,6 +1128,9 @@ struct zone {
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+ struct page *vmemmap_tails[NR_VMEMMAP_TAILS];
+#endif
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
@@ -1910,15 +1984,13 @@ struct mem_section_usage {
unsigned long pageblock_flags[0];
};
-void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
-
struct page;
struct page_ext;
struct mem_section {
/*
* This is, logically, a pointer to an array of struct
* pages. However, it is stored with some other magic.
- * (see sparse.c::sparse_init_one_section())
+ * (see sparse_init_one_section())
*
* Additionally during early boot we encode node id of
* the location of the section here to guide allocation.
@@ -2300,11 +2372,9 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#endif
#else
-#define sparse_index_init(_sec, _nid) do {} while (0)
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
-#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 5b1725fe9707..23ff24080dfd 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -691,6 +691,7 @@ struct x86_cpu_id {
__u16 feature; /* bit index */
/* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
__u16 flags;
+ __u8 platform_mask;
__u8 type;
kernel_ulong_t driver_data;
};
@@ -702,6 +703,7 @@ struct x86_cpu_id {
#define X86_STEPPING_ANY 0
#define X86_STEP_MIN 0
#define X86_STEP_MAX 0xf
+#define X86_PLATFORM_ANY 0x0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
#define X86_CPU_TYPE_ANY 0
diff --git a/include/linux/module.h b/include/linux/module.h
index 14f391b186c6..7566815fabbe 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -413,11 +413,13 @@ struct module {
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
+ const char *imported_namespaces;
struct kobject *holders_dir;
/* Exported symbols */
const struct kernel_symbol *syms;
const u32 *crcs;
+ const u8 *flagstab;
unsigned int num_syms;
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
@@ -433,9 +435,6 @@ struct module {
unsigned int num_kp;
/* GPL-only exported symbols. */
- unsigned int num_gpl_syms;
- const struct kernel_symbol *gpl_syms;
- const u32 *gpl_crcs;
bool using_gplonly_symbols;
#ifdef CONFIG_MODULE_SIG
diff --git a/include/linux/module_signature.h b/include/linux/module_signature.h
index 7eb4b00381ac..db335d46787f 100644
--- a/include/linux/module_signature.h
+++ b/include/linux/module_signature.h
@@ -10,35 +10,7 @@
#define _LINUX_MODULE_SIGNATURE_H
#include <linux/types.h>
-
-/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
-#define MODULE_SIG_STRING "~Module signature appended~\n"
-
-enum pkey_id_type {
- PKEY_ID_PGP, /* OpenPGP generated key ID */
- PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
- PKEY_ID_PKCS7, /* Signature in PKCS#7 message */
-};
-
-/*
- * Module signature information block.
- *
- * The constituents of the signature section are, in order:
- *
- * - Signer's name
- * - Key identifier
- * - Signature data
- * - Information block
- */
-struct module_signature {
- u8 algo; /* Public-key crypto algorithm [0] */
- u8 hash; /* Digest algorithm [0] */
- u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
- u8 signer_len; /* Length of signer's name [0] */
- u8 key_id_len; /* Length of key identifier [0] */
- u8 __pad[3];
- __be32 sig_len; /* Length of signature data */
-};
+#include <uapi/linux/module_signature.h>
int mod_check_sig(const struct module_signature *ms, size_t file_len,
const char *name);
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
index 77c9895b9ddb..574609aced99 100644
--- a/include/linux/module_symbol.h
+++ b/include/linux/module_symbol.h
@@ -2,6 +2,11 @@
#ifndef _LINUX_MODULE_SYMBOL_H
#define _LINUX_MODULE_SYMBOL_H
+/* Kernel symbol flags bitset. */
+enum ksym_flags {
+ KSYM_FLAG_GPL_ONLY = 1 << 0,
+};
+
/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
static inline bool is_mapping_symbol(const char *str)
{
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 7d22d4c4ea2e..075f28585074 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -317,8 +317,8 @@ struct kparam_array
name, &__param_ops_##name, arg, perm, -1, 0)
#ifdef CONFIG_SYSFS
-extern void kernel_param_lock(struct module *mod);
-extern void kernel_param_unlock(struct module *mod);
+void kernel_param_lock(struct module *mod);
+void kernel_param_unlock(struct module *mod);
#else
static inline void kernel_param_lock(struct module *mod)
{
@@ -398,7 +398,7 @@ static inline void kernel_param_unlock(struct module *mod)
* Returns: true if the two parameter names are equal.
* Dashes (-) are considered equal to underscores (_).
*/
-extern bool parameq(const char *name1, const char *name2);
+bool parameq(const char *name1, const char *name2);
/**
* parameqn - checks if two parameter names match
@@ -412,28 +412,23 @@ extern bool parameq(const char *name1, const char *name2);
* are equal.
* Dashes (-) are considered equal to underscores (_).
*/
-extern bool parameqn(const char *name1, const char *name2, size_t n);
+bool parameqn(const char *name1, const char *name2, size_t n);
typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg);
/* Called on module insert or kernel boot */
-extern char *parse_args(const char *name,
- char *args,
- const struct kernel_param *params,
- unsigned num,
- s16 level_min,
- s16 level_max,
- void *arg, parse_unknown_fn unknown);
+char *parse_args(const char *doing,
+ char *args,
+ const struct kernel_param *params,
+ unsigned int num,
+ s16 min_level,
+ s16 max_level,
+ void *arg, parse_unknown_fn unknown);
/* Called by module remove. */
-#ifdef CONFIG_SYSFS
-extern void destroy_params(const struct kernel_param *params, unsigned num);
-#else
-static inline void destroy_params(const struct kernel_param *params,
- unsigned num)
-{
-}
-#endif /* !CONFIG_SYSFS */
+#ifdef CONFIG_MODULES
+void module_destroy_params(const struct kernel_param *params, unsigned int num);
+#endif
/* All the helper functions */
/* The macros to do compile-time type checking stolen from Jakub
@@ -442,78 +437,77 @@ static inline void destroy_params(const struct kernel_param *params,
static inline type __always_unused *__check_##name(void) { return(p); }
extern const struct kernel_param_ops param_ops_byte;
-extern int param_set_byte(const char *val, const struct kernel_param *kp);
-extern int param_get_byte(char *buffer, const struct kernel_param *kp);
+int param_set_byte(const char *val, const struct kernel_param *kp);
+int param_get_byte(char *buffer, const struct kernel_param *kp);
#define param_check_byte(name, p) __param_check(name, p, unsigned char)
extern const struct kernel_param_ops param_ops_short;
-extern int param_set_short(const char *val, const struct kernel_param *kp);
-extern int param_get_short(char *buffer, const struct kernel_param *kp);
+int param_set_short(const char *val, const struct kernel_param *kp);
+int param_get_short(char *buffer, const struct kernel_param *kp);
#define param_check_short(name, p) __param_check(name, p, short)
extern const struct kernel_param_ops param_ops_ushort;
-extern int param_set_ushort(const char *val, const struct kernel_param *kp);
-extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
+int param_set_ushort(const char *val, const struct kernel_param *kp);
+int param_get_ushort(char *buffer, const struct kernel_param *kp);
#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
extern const struct kernel_param_ops param_ops_int;
-extern int param_set_int(const char *val, const struct kernel_param *kp);
-extern int param_get_int(char *buffer, const struct kernel_param *kp);
+int param_set_int(const char *val, const struct kernel_param *kp);
+int param_get_int(char *buffer, const struct kernel_param *kp);
#define param_check_int(name, p) __param_check(name, p, int)
extern const struct kernel_param_ops param_ops_uint;
-extern int param_set_uint(const char *val, const struct kernel_param *kp);
-extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint(const char *val, const struct kernel_param *kp);
+int param_get_uint(char *buffer, const struct kernel_param *kp);
int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
unsigned int min, unsigned int max);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
extern const struct kernel_param_ops param_ops_long;
-extern int param_set_long(const char *val, const struct kernel_param *kp);
-extern int param_get_long(char *buffer, const struct kernel_param *kp);
+int param_set_long(const char *val, const struct kernel_param *kp);
+int param_get_long(char *buffer, const struct kernel_param *kp);
#define param_check_long(name, p) __param_check(name, p, long)
extern const struct kernel_param_ops param_ops_ulong;
-extern int param_set_ulong(const char *val, const struct kernel_param *kp);
-extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
+int param_set_ulong(const char *val, const struct kernel_param *kp);
+int param_get_ulong(char *buffer, const struct kernel_param *kp);
#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
extern const struct kernel_param_ops param_ops_ullong;
-extern int param_set_ullong(const char *val, const struct kernel_param *kp);
-extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
+int param_set_ullong(const char *val, const struct kernel_param *kp);
+int param_get_ullong(char *buffer, const struct kernel_param *kp);
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
extern const struct kernel_param_ops param_ops_hexint;
-extern int param_set_hexint(const char *val, const struct kernel_param *kp);
-extern int param_get_hexint(char *buffer, const struct kernel_param *kp);
+int param_set_hexint(const char *val, const struct kernel_param *kp);
+int param_get_hexint(char *buffer, const struct kernel_param *kp);
#define param_check_hexint(name, p) param_check_uint(name, p)
extern const struct kernel_param_ops param_ops_charp;
-extern int param_set_charp(const char *val, const struct kernel_param *kp);
-extern int param_get_charp(char *buffer, const struct kernel_param *kp);
-extern void param_free_charp(void *arg);
+int param_set_charp(const char *val, const struct kernel_param *kp);
+int param_get_charp(char *buffer, const struct kernel_param *kp);
+void param_free_charp(void *arg);
#define param_check_charp(name, p) __param_check(name, p, char *)
/* We used to allow int as well as bool. We're taking that away! */
extern const struct kernel_param_ops param_ops_bool;
-extern int param_set_bool(const char *val, const struct kernel_param *kp);
-extern int param_get_bool(char *buffer, const struct kernel_param *kp);
+int param_set_bool(const char *val, const struct kernel_param *kp);
+int param_get_bool(char *buffer, const struct kernel_param *kp);
#define param_check_bool(name, p) __param_check(name, p, bool)
extern const struct kernel_param_ops param_ops_bool_enable_only;
-extern int param_set_bool_enable_only(const char *val,
- const struct kernel_param *kp);
+int param_set_bool_enable_only(const char *val, const struct kernel_param *kp);
/* getter is the same as for the regular bool */
#define param_check_bool_enable_only param_check_bool
extern const struct kernel_param_ops param_ops_invbool;
-extern int param_set_invbool(const char *val, const struct kernel_param *kp);
-extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
+int param_set_invbool(const char *val, const struct kernel_param *kp);
+int param_get_invbool(char *buffer, const struct kernel_param *kp);
#define param_check_invbool(name, p) __param_check(name, p, bool)
/* An int, which can only be set like a bool (though it shows as an int). */
extern const struct kernel_param_ops param_ops_bint;
-extern int param_set_bint(const char *val, const struct kernel_param *kp);
+int param_set_bint(const char *val, const struct kernel_param *kp);
#define param_get_bint param_get_int
#define param_check_bint param_check_int
@@ -620,19 +614,19 @@ enum hwparam_type {
extern const struct kernel_param_ops param_array_ops;
extern const struct kernel_param_ops param_ops_string;
-extern int param_set_copystring(const char *val, const struct kernel_param *);
-extern int param_get_string(char *buffer, const struct kernel_param *kp);
+int param_set_copystring(const char *val, const struct kernel_param *kp);
+int param_get_string(char *buffer, const struct kernel_param *kp);
/* for exporting parameters in /sys/module/.../parameters */
struct module;
#if defined(CONFIG_SYSFS) && defined(CONFIG_MODULES)
-extern int module_param_sysfs_setup(struct module *mod,
- const struct kernel_param *kparam,
- unsigned int num_params);
+int module_param_sysfs_setup(struct module *mod,
+ const struct kernel_param *kparam,
+ unsigned int num_params);
-extern void module_param_sysfs_remove(struct module *mod);
+void module_param_sysfs_remove(struct module *mod);
#else
static inline int module_param_sysfs_setup(struct module *mod,
const struct kernel_param *kparam,
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 0075f6e5c3da..cf3374580f74 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -76,7 +76,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
struct vif_device *vif,
struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
- unsigned int *ipmr_seq)
+ atomic_t *ipmr_seq)
{
struct vif_entry_notifier_info info = {
.info = {
@@ -89,7 +89,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
};
ASSERT_RTNL();
- (*ipmr_seq)++;
+ atomic_inc(ipmr_seq);
return call_fib_notifiers(net, event_type, &info.info);
}
@@ -198,7 +198,7 @@ static inline int mr_call_mfc_notifiers(struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct mr_mfc *mfc, u32 tb_id,
- unsigned int *ipmr_seq)
+ atomic_t *ipmr_seq)
{
struct mfc_entry_notifier_info info = {
.info = {
@@ -208,8 +208,7 @@ static inline int mr_call_mfc_notifiers(struct net *net,
.tb_id = tb_id
};
- ASSERT_RTNL();
- (*ipmr_seq)++;
+ atomic_inc(ipmr_seq);
return call_fib_notifiers(net, event_type, &info.info);
}
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h
index d6f653e07426..f8d4d6ac1fc1 100644
--- a/include/linux/mtd/concat.h
+++ b/include/linux/mtd/concat.h
@@ -9,6 +9,18 @@
#define MTD_CONCAT_H
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info *subdev[];
+};
+
struct mtd_info *mtd_concat_create(
struct mtd_info *subdev[], /* subdevices to concatenate */
int num_devs, /* number of subdevices */
@@ -16,5 +28,54 @@ struct mtd_info *mtd_concat_create(
void mtd_concat_destroy(struct mtd_info *mtd);
-#endif
+/**
+ * mtd_virt_concat_node_create - Create a component for concatenation
+ *
+ * Returns a positive number representing the no. of devices found for
+ * concatenation, or a negative error code.
+ *
+ * List all the devices for concatenations found in DT and create a
+ * component for concatenation.
+ */
+int mtd_virt_concat_node_create(void);
+
+/**
+ * mtd_virt_concat_add - add mtd_info object to the list of subdevices for concatenation
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Returns true if the mtd_info object is added successfully else returns false.
+ *
+ * The mtd_info object is added to the list of subdevices for concatenation.
+ * It returns true if a match is found, and false if all subdevices have
+ * already been added or if the mtd_info object does not match any of the
+ * intended MTD devices.
+ */
+bool mtd_virt_concat_add(struct mtd_info *mtd);
+/**
+ * mtd_virt_concat_create_join - Create and register the concatenated MTD device
+ *
+ * Returns 0 on succes, or a negative error code.
+ *
+ * Creates and registers the concatenated MTD device
+ */
+int mtd_virt_concat_create_join(void);
+
+/**
+ * mtd_virt_concat_destroy - Remove the concat that includes a specific mtd device
+ * as one of its components.
+ * @mtd: pointer to MTD device info structure.
+ *
+ * Returns 0 on succes, or a negative error code.
+ *
+ * If the mtd_info object is part of a concatenated device, all other MTD devices
+ * within that concat are registered individually. The concatenated device is then
+ * removed, along with its concatenation component.
+ *
+ */
+int mtd_virt_concat_destroy(struct mtd_info *mtd);
+
+void mtd_virt_concat_destroy_joins(void);
+void mtd_virt_concat_destroy_items(void);
+
+#endif
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6a024cf1c53a..58abd306ebe3 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -477,8 +477,9 @@ struct spinand_ecc_info {
const struct mtd_ooblayout_ops *ooblayout;
};
-#define SPINAND_HAS_QE_BIT BIT(0)
-#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+/* SPI NAND flags */
+#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
#define SPINAND_NO_RAW_ACCESS BIT(4)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ecaa0440f6ec..734048c02f4f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -79,7 +79,7 @@ do { \
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
- , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
+ , .first_waiter = NULL \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -87,12 +87,12 @@ do { \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key);
+void mutex_init_lockdep(struct mutex *lock, const char *name, struct lock_class_key *key);
static inline void __mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key)
{
- mutex_init_lockep(lock, name, key);
+ mutex_init_lockdep(lock, name, key);
}
#else
extern void mutex_init_generic(struct mutex *lock);
@@ -146,7 +146,7 @@ static inline void __mutex_init(struct mutex *lock, const char *name,
{
mutex_rt_init_generic(lock);
}
-#endif /* !CONFIG_LOCKDEP */
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#endif /* CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_MUTEXES
@@ -183,7 +183,7 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
-extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __acquires(lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
@@ -253,6 +253,7 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_a
DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+DEFINE_LOCK_GUARD_1_COND(mutex, _kill, mutex_lock_killable(_T->lock), _RET == 0)
DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */)
DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
@@ -261,6 +262,8 @@ DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex
#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_kill, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_kill, _T)
DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T))
#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T)
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index 80975935ec48..24ed599fdda8 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -44,7 +44,7 @@ context_lock_struct(mutex) {
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
- struct list_head wait_list;
+ struct mutex_waiter *first_waiter __guarded_by(&wait_lock);
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 2e25c838f831..a961861a503b 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -16,6 +16,8 @@ struct device;
struct mux_control;
struct mux_state;
+#if IS_ENABLED(CONFIG_MULTIPLEXER)
+
unsigned int mux_control_states(struct mux_control *mux);
int __must_check mux_control_select_delay(struct mux_control *mux,
unsigned int state,
@@ -54,11 +56,109 @@ int mux_control_deselect(struct mux_control *mux);
int mux_state_deselect(struct mux_state *mstate);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
+struct mux_control *mux_control_get_optional(struct device *dev, const char *mux_name);
void mux_control_put(struct mux_control *mux);
-struct mux_control *devm_mux_control_get(struct device *dev,
- const char *mux_name);
-struct mux_state *devm_mux_state_get(struct device *dev,
- const char *mux_name);
+struct mux_control *devm_mux_control_get(struct device *dev, const char *mux_name);
+struct mux_state *devm_mux_state_get(struct device *dev, const char *mux_name);
+struct mux_state *devm_mux_state_get_optional(struct device *dev, const char *mux_name);
+struct mux_state *devm_mux_state_get_selected(struct device *dev, const char *mux_name);
+struct mux_state *devm_mux_state_get_optional_selected(struct device *dev, const char *mux_name);
+
+#else
+
+static inline unsigned int mux_control_states(struct mux_control *mux)
+{
+ return 0;
+}
+static inline int __must_check mux_control_select_delay(struct mux_control *mux,
+ unsigned int state, unsigned int delay_us)
+{
+ return -EOPNOTSUPP;
+}
+static inline int __must_check mux_state_select_delay(struct mux_state *mstate,
+ unsigned int delay_us)
+{
+ return -EOPNOTSUPP;
+}
+static inline int __must_check mux_control_try_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us)
+{
+ return -EOPNOTSUPP;
+}
+static inline int __must_check mux_state_try_select_delay(struct mux_state *mstate,
+ unsigned int delay_us)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __must_check mux_state_select(struct mux_state *mstate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __must_check mux_state_try_select(struct mux_state *mstate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mux_control_deselect(struct mux_control *mux)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mux_state_deselect(struct mux_state *mstate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline struct mux_control *mux_control_get_optional(struct device *dev,
+ const char *mux_name)
+{
+ return NULL;
+}
+static inline void mux_control_put(struct mux_control *mux) {}
+
+static inline struct mux_control *devm_mux_control_get(struct device *dev, const char *mux_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline struct mux_state *devm_mux_state_get(struct device *dev, const char *mux_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline struct mux_state *devm_mux_state_get_optional(struct device *dev,
+ const char *mux_name)
+{
+ return NULL;
+}
+static inline struct mux_state *devm_mux_state_get_selected(struct device *dev,
+ const char *mux_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline struct mux_state *devm_mux_state_get_optional_selected(struct device *dev,
+ const char *mux_name)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MULTIPLEXER */
#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 58600cf234bc..2ad6dd9987b9 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -54,9 +54,6 @@ extern int path_pts(struct path *path);
extern int user_path_at(int, const char __user *, unsigned, struct path *);
-struct dentry *lookup_one_qstr_excl(const struct qstr *name,
- struct dentry *base,
- unsigned int flags);
extern int kern_path(const char *, unsigned, struct path *);
struct dentry *kern_path_parent(const char *name, struct path *parent);
@@ -168,9 +165,6 @@ extern int follow_down_one(struct path *);
extern int follow_down(struct path *path, unsigned int flags);
extern int follow_up(struct path *);
-extern struct dentry *lock_rename(struct dentry *, struct dentry *);
-extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
-extern void unlock_rename(struct dentry *, struct dentry *);
int start_renaming(struct renamedata *rd, int lookup_flags,
struct qstr *old_last, struct qstr *new_last);
int start_renaming_dentry(struct renamedata *rd, int lookup_flags,
diff --git a/include/linux/net.h b/include/linux/net.h
index f58b38ab37f8..f268f395ce47 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -23,9 +23,30 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <linux/uio.h>
#include <uapi/linux/net.h>
+/**
+ * struct sockopt - socket option value container
+ * @iter_in: iov_iter for reading optval with the content from the caller.
+ * Use copy_from_iter() given this iov direction is ITER_SOURCE
+ * @iter_out: iov_iter for protocols to update optval data to userspace
+ * Use _copy_to_iter() given iov direction is ITER_DEST
+ * @optlen: serves as both input (buffer size) and output (returned data size).
+ *
+ * Type-safe wrapper for socket option data that works with both
+ * user and kernel buffers.
+ *
+ * The optlen field allows callbacks to return a specific length value
+ * independent of the bytes written via copy_to_iter().
+ */
+typedef struct sockopt {
+ struct iov_iter iter_in;
+ struct iov_iter iter_out;
+ int optlen;
+} sockopt_t;
+
struct poll_table_struct;
struct pipe_inode_info;
struct inode;
@@ -192,6 +213,8 @@ struct proto_ops {
unsigned int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
+ int (*getsockopt_iter)(struct socket *sock, int level,
+ int optname, sockopt_t *opt);
void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
@@ -223,6 +246,7 @@ struct proto_ops {
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
int (*set_rcvlowat)(struct sock *sk, int val);
+ void (*set_rcvbuf)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
@@ -304,6 +328,8 @@ do { \
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
+#define net_get_random_sleepable_once(buf, nbytes) \
+ get_random_sleepable_once((buf), (nbytes))
/*
* E.g. XFS meta- & log-data is in slab pages, or bcache meta
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7ca01eb3f7d2..7969fcdd5ac4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1716,7 +1716,6 @@ struct net_device_ops {
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
* @IFF_TEAM: device is a team device
- * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
* @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
@@ -1752,7 +1751,6 @@ enum netdev_priv_flags {
IFF_OPENVSWITCH = 1<<20,
IFF_L3MDEV_SLAVE = 1<<21,
IFF_TEAM = 1<<22,
- IFF_RXFH_CONFIGURED = 1<<23,
IFF_PHONY_HEADROOM = 1<<24,
IFF_MACSEC = 1<<25,
IFF_NO_RX_HANDLER = 1<<26,
@@ -2563,7 +2561,14 @@ struct net_device {
* Also protects some fields in:
* struct napi_struct, struct netdev_queue, struct netdev_rx_queue
*
- * Ordering: take after rtnl_lock.
+ * Ordering:
+ *
+ * - take after rtnl_lock
+ *
+ * - for the case of netdev queue leasing, the netdev-scope lock is
+ * taken for both the virtual and the physical device; to prevent
+ * deadlocks, the virtual device's lock must always be acquired
+ * before the physical device's (see netdev_nl_queue_create_doit)
*/
struct mutex lock;
@@ -2765,6 +2770,17 @@ static inline void *netdev_priv(const struct net_device *dev)
return (void *)dev->priv;
}
+/**
+ * netdev_from_priv() - get network device from priv
+ * @priv: network device private data
+ *
+ * Returns: net_device to which @priv belongs
+ */
+static inline struct net_device *netdev_from_priv(const void *priv)
+{
+ return container_of(priv, struct net_device, priv);
+}
+
/* Set the sysfs physical device reference for the network logical device
* if set prior to registration will cause a symlink during initialization.
*/
@@ -3404,6 +3420,8 @@ static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
+bool unregister_netdevice_queued(const struct net_device *dev);
+
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
@@ -5339,9 +5357,9 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
}
int netdev_class_create_file_ns(const struct class_attribute *class_attr,
- const void *ns);
+ const struct ns_common *ns);
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
- const void *ns);
+ const struct ns_common *ns);
extern const struct kobj_ns_type_operations net_ns_type_operations;
@@ -5569,10 +5587,7 @@ static inline bool netif_is_lag_port(const struct net_device *dev)
return netif_is_bond_slave(dev) || netif_is_team_port(dev);
}
-static inline bool netif_is_rxfh_configured(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_RXFH_CONFIGURED;
-}
+bool netif_is_rxfh_configured(const struct net_device *dev);
static inline bool netif_is_failover(const struct net_device *dev)
{
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index e9f4f845d760..b98331572ad2 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -309,7 +309,7 @@ enum {
/* register and unregister set references */
extern ip_set_id_t ip_set_get_byname(struct net *net,
- const char *name, struct ip_set **set);
+ const struct nlattr *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h
index dfe89f38d1f7..1719987e8fd8 100644
--- a/include/linux/netfilter/nf_conntrack_amanda.h
+++ b/include/linux/netfilter/nf_conntrack_amanda.h
@@ -7,10 +7,13 @@
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack_expect.h>
-extern unsigned int (__rcu *nf_nat_amanda_hook)(struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned int matchoff,
- unsigned int matchlen,
- struct nf_conntrack_expect *exp);
+typedef unsigned int
+nf_nat_amanda_hook_fn(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp);
+
+extern nf_nat_amanda_hook_fn __rcu *nf_nat_amanda_hook;
#endif /* _NF_CONNTRACK_AMANDA_H */
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
index f31292642035..7b62446ccec4 100644
--- a/include/linux/netfilter/nf_conntrack_ftp.h
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -26,11 +26,14 @@ struct nf_ct_ftp_master {
/* For NAT to hook in when we find a packet which describes what other
* connection we should expect. */
-extern unsigned int (__rcu *nf_nat_ftp_hook)(struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- enum nf_ct_ftp_type type,
- unsigned int protoff,
- unsigned int matchoff,
- unsigned int matchlen,
- struct nf_conntrack_expect *exp);
+typedef unsigned int
+nf_nat_ftp_hook_fn(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ enum nf_ct_ftp_type type,
+ unsigned int protoff,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp);
+
+extern nf_nat_ftp_hook_fn __rcu *nf_nat_ftp_hook;
#endif /* _NF_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h
index 4f3ca5621998..ce07250afb4e 100644
--- a/include/linux/netfilter/nf_conntrack_irc.h
+++ b/include/linux/netfilter/nf_conntrack_irc.h
@@ -8,11 +8,14 @@
#define IRC_PORT 6667
-extern unsigned int (__rcu *nf_nat_irc_hook)(struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned int matchoff,
- unsigned int matchlen,
- struct nf_conntrack_expect *exp);
+typedef unsigned int
+nf_nat_irc_hook_fn(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp);
+
+extern nf_nat_irc_hook_fn __rcu *nf_nat_irc_hook;
#endif /* _NF_CONNTRACK_IRC_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
index 99107e4f5234..bb39f04a9977 100644
--- a/include/linux/netfilter/nf_conntrack_snmp.h
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -5,9 +5,12 @@
#include <linux/netfilter.h>
#include <linux/skbuff.h>
-extern int (__rcu *nf_nat_snmp_hook)(struct sk_buff *skb,
- unsigned int protoff,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo);
+typedef int
+nf_nat_snmp_hook_fn(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+
+extern nf_nat_snmp_hook_fn __rcu *nf_nat_snmp_hook;
#endif /* _NF_CONNTRACK_SNMP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h
index 1490b68dd7d1..90b334bbce3c 100644
--- a/include/linux/netfilter/nf_conntrack_tftp.h
+++ b/include/linux/netfilter/nf_conntrack_tftp.h
@@ -19,8 +19,11 @@ struct tftphdr {
#define TFTP_OPCODE_ACK 4
#define TFTP_OPCODE_ERROR 5
-extern unsigned int (__rcu *nf_nat_tftp_hook)(struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- struct nf_conntrack_expect *exp);
+typedef unsigned int
+nf_nat_tftp_hook_fn(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ struct nf_conntrack_expect *exp);
+
+extern nf_nat_tftp_hook_fn __rcu *nf_nat_tftp_hook;
#endif /* _NF_CONNTRACK_TFTP_H */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 61aa48f46dd7..5ce45b6d890f 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -34,59 +34,13 @@ struct ip6_rt_info {
struct nf_queue_entry;
struct nf_bridge_frag_data;
-/*
- * Hook functions for ipv6 to allow xt_* modules to be built-in even
- * if IPv6 is a module.
- */
-struct nf_ipv6_ops {
-#if IS_MODULE(CONFIG_IPV6)
- int (*chk_addr)(struct net *net, const struct in6_addr *addr,
- const struct net_device *dev, int strict);
- int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
- int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
- const struct in6_addr *daddr, unsigned int srcprefs,
- struct in6_addr *saddr);
- int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
- bool strict);
- u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
- const struct tcphdr *th, u16 *mssp);
- int (*cookie_v6_check)(const struct ipv6hdr *iph,
- const struct tcphdr *th);
-#endif
- void (*route_input)(struct sk_buff *skb);
- int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
- int (*output)(struct net *, struct sock *, struct sk_buff *));
- int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
-#if IS_MODULE(CONFIG_IPV6)
- int (*br_fragment)(struct net *net, struct sock *sk,
- struct sk_buff *skb,
- struct nf_bridge_frag_data *data,
- int (*output)(struct net *, struct sock *sk,
- const struct nf_bridge_frag_data *data,
- struct sk_buff *));
-#endif
-};
-
#ifdef CONFIG_NETFILTER
#include <net/addrconf.h>
-extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
-static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
-{
- return rcu_dereference(nf_ipv6_ops);
-}
-
static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict)
{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (!v6_ops)
- return 1;
-
- return v6_ops->chk_addr(net, addr, dev, strict);
-#elif IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
return ipv6_chk_addr(net, addr, dev, strict);
#else
return 1;
@@ -99,15 +53,7 @@ int __nf_ip6_route(struct net *net, struct dst_entry **dst,
static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict)
{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
-
- if (v6ops)
- return v6ops->route(net, dst, fl, strict);
-
- return -EHOSTUNREACH;
-#endif
-#if IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
return __nf_ip6_route(net, dst, fl, strict);
#else
return -EHOSTUNREACH;
@@ -129,14 +75,7 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (!v6_ops)
- return 1;
-
- return v6_ops->br_fragment(net, sk, skb, data, output);
-#elif IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
return br_ip6_fragment(net, sk, skb, data, output);
#else
return 1;
@@ -147,14 +86,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (!v6_ops)
- return -EHOSTUNREACH;
-
- return v6_ops->route_me_harder(net, sk, skb);
-#elif IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
return ip6_route_me_harder(net, sk, skb);
#else
return -EHOSTUNREACH;
@@ -165,32 +97,18 @@ static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
const struct tcphdr *th,
u16 *mssp)
{
-#if IS_ENABLED(CONFIG_SYN_COOKIES)
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (v6_ops)
- return v6_ops->cookie_init_sequence(iph, th, mssp);
-#elif IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6) && IS_ENABLED(CONFIG_SYN_COOKIES)
return __cookie_v6_init_sequence(iph, th, mssp);
#endif
-#endif
return 0;
}
static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
const struct tcphdr *th)
{
-#if IS_ENABLED(CONFIG_SYN_COOKIES)
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (v6_ops)
- return v6_ops->cookie_v6_check(iph, th);
-#elif IS_BUILTIN(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6) && IS_ENABLED(CONFIG_SYN_COOKIES)
return __cookie_v6_check(iph, th);
#endif
-#endif
return 0;
}
@@ -198,14 +116,6 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
-#else /* CONFIG_NETFILTER */
-static inline int ipv6_netfilter_init(void) { return 0; }
-static inline void ipv6_netfilter_fini(void) { return; }
-static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
#endif /* CONFIG_NETFILTER */
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 207156f2143c..bc1162895f35 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -21,6 +21,7 @@ void lockup_detector_soft_poweroff(void);
extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
+extern int watchdog_hardlockup_miss_thresh;
extern struct cpumask watchdog_cpumask;
extern unsigned long *watchdog_cpumask_bits;
diff --git a/include/linux/ns/ns_common_types.h b/include/linux/ns/ns_common_types.h
index 0014fbc1c626..ea45c54e4435 100644
--- a/include/linux/ns/ns_common_types.h
+++ b/include/linux/ns/ns_common_types.h
@@ -7,6 +7,7 @@
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/types.h>
+#include <uapi/linux/sched.h>
struct cgroup_namespace;
struct dentry;
@@ -184,15 +185,38 @@ struct ns_common {
struct user_namespace *: (IS_ENABLED(CONFIG_USER_NS) ? &userns_operations : NULL), \
struct uts_namespace *: (IS_ENABLED(CONFIG_UTS_NS) ? &utsns_operations : NULL))
-#define ns_common_type(__ns) \
- _Generic((__ns), \
- struct cgroup_namespace *: CLONE_NEWCGROUP, \
- struct ipc_namespace *: CLONE_NEWIPC, \
- struct mnt_namespace *: CLONE_NEWNS, \
- struct net *: CLONE_NEWNET, \
- struct pid_namespace *: CLONE_NEWPID, \
- struct time_namespace *: CLONE_NEWTIME, \
- struct user_namespace *: CLONE_NEWUSER, \
- struct uts_namespace *: CLONE_NEWUTS)
+/*
+ * FOR_EACH_NS_TYPE - Canonical list of namespace types
+ *
+ * Enumerates all (struct type, CLONE_NEW* flag) pairs. This is the
+ * single source of truth used to derive ns_common_type() and
+ * CLONE_NS_ALL. When adding a new namespace type, add a single entry
+ * here; all consumers update automatically.
+ *
+ * @X: Callback macro taking (struct_name, clone_flag) as arguments.
+ */
+#define FOR_EACH_NS_TYPE(X) \
+ X(cgroup_namespace, CLONE_NEWCGROUP) \
+ X(ipc_namespace, CLONE_NEWIPC) \
+ X(mnt_namespace, CLONE_NEWNS) \
+ X(net, CLONE_NEWNET) \
+ X(pid_namespace, CLONE_NEWPID) \
+ X(time_namespace, CLONE_NEWTIME) \
+ X(user_namespace, CLONE_NEWUSER) \
+ X(uts_namespace, CLONE_NEWUTS)
+
+/* Bitmask of all known CLONE_NEW* flags. */
+#define _NS_TYPE_FLAG_OR(struct_name, flag) | (flag)
+#define CLONE_NS_ALL (0 FOR_EACH_NS_TYPE(_NS_TYPE_FLAG_OR))
+
+/*
+ * ns_common_type - Map a namespace struct pointer to its CLONE_NEW* flag
+ *
+ * Uses a leading-comma pattern so the FOR_EACH_NS_TYPE expansion
+ * produces ", struct foo *: FLAG" entries without a trailing comma.
+ */
+#define _NS_TYPE_ASSOC(struct_name, flag) , struct struct_name *: (flag)
+
+#define ns_common_type(__ns) _Generic((__ns)FOR_EACH_NS_TYPE(_NS_TYPE_ASSOC))
#endif /* _LINUX_NS_COMMON_TYPES_H */
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 8ff9d663096b..879c3e89e026 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -256,6 +256,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
* @msg_clear_mask: See ntb_msg_clear_mask().
* @msg_read: See ntb_msg_read().
* @peer_msg_write: See ntb_peer_msg_write().
+ * @get_dma_dev: See ntb_get_dma_dev().
*/
struct ntb_dev_ops {
int (*port_number)(struct ntb_dev *ntb);
@@ -329,6 +330,7 @@ struct ntb_dev_ops {
int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
+ struct device *(*get_dma_dev)(struct ntb_dev *ntb);
};
static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
@@ -391,6 +393,8 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* !ops->msg_clear_mask == !ops->msg_count && */
!ops->msg_read == !ops->msg_count &&
!ops->peer_msg_write == !ops->msg_count &&
+
+ /* ops->get_dma_dev is optional */
1;
}
@@ -1564,6 +1568,26 @@ static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
}
/**
+ * ntb_get_dma_dev() - get the device to use for DMA allocations/mappings
+ * @ntb: NTB device context.
+ *
+ * Return a struct device suitable for DMA API allocations and mappings.
+ * This is typically the parent of the NTB device, but may be overridden by a
+ * driver by implementing .get_dma_dev().
+ *
+ * Drivers that implement .get_dma_dev() must return a non-NULL pointer.
+ *
+ * Return: device pointer to use for DMA operations.
+ */
+static inline struct device *ntb_get_dma_dev(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->get_dma_dev)
+ return ntb->dev.parent;
+
+ return ntb->ops->get_dma_dev(ntb);
+}
+
+/**
* ntb_peer_resource_idx() - get a resource index for a given peer idx
* @ntb: NTB device context.
* @pidx: Peer port index.
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
index e75c29c51464..682f81046345 100644
--- a/include/linux/nvme-auth.h
+++ b/include/linux/nvme-auth.h
@@ -7,6 +7,7 @@
#define _NVME_AUTH_H
#include <crypto/kpp.h>
+#include <crypto/sha2.h>
struct nvme_dhchap_key {
size_t len;
@@ -20,32 +21,44 @@ const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
const char *nvme_auth_hmac_name(u8 hmac_id);
-const char *nvme_auth_digest_name(u8 hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id);
u8 nvme_auth_hmac_id(const char *hmac_name);
+struct nvme_auth_hmac_ctx {
+ u8 hmac_id;
+ union {
+ struct hmac_sha256_ctx sha256;
+ struct hmac_sha384_ctx sha384;
+ struct hmac_sha512_ctx sha512;
+ };
+};
+int nvme_auth_hmac_init(struct nvme_auth_hmac_ctx *hmac, u8 hmac_id,
+ const u8 *key, size_t key_len);
+void nvme_auth_hmac_update(struct nvme_auth_hmac_ctx *hmac, const u8 *data,
+ size_t data_len);
+void nvme_auth_hmac_final(struct nvme_auth_hmac_ctx *hmac, u8 *out);
u32 nvme_auth_key_struct_size(u32 key_len);
-struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
- u8 key_hash);
+struct nvme_dhchap_key *nvme_auth_extract_key(const char *secret, u8 key_hash);
void nvme_auth_free_key(struct nvme_dhchap_key *key);
struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
struct nvme_dhchap_key *nvme_auth_transform_key(
- struct nvme_dhchap_key *key, char *nqn);
-int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
-int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
- u8 *challenge, u8 *aug, size_t hlen);
+ const struct nvme_dhchap_key *key, const char *nqn);
+int nvme_auth_parse_key(const char *secret, struct nvme_dhchap_key **ret_key);
+int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len,
+ const u8 *challenge, u8 *aug, size_t hlen);
int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
u8 *host_key, size_t host_key_len);
int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
- u8 *ctrl_key, size_t ctrl_key_len,
+ const u8 *ctrl_key, size_t ctrl_key_len,
u8 *sess_key, size_t sess_key_len);
-int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
- u8 *c1, u8 *c2, size_t hash_len,
+int nvme_auth_generate_psk(u8 hmac_id, const u8 *skey, size_t skey_len,
+ const u8 *c1, const u8 *c2, size_t hash_len,
u8 **ret_psk, size_t *ret_len);
-int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
- char *subsysnqn, char *hostnqn, u8 **ret_digest);
-int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
- u8 *psk_digest, u8 **ret_psk);
+int nvme_auth_generate_digest(u8 hmac_id, const u8 *psk, size_t psk_len,
+ const char *subsysnqn, const char *hostnqn,
+ char **ret_digest);
+int nvme_auth_derive_tls_psk(int hmac_id, const u8 *psk, size_t psk_len,
+ const char *psk_digest, u8 **ret_psk);
#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 655d194f8e72..041f30931a90 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -513,9 +513,16 @@ struct nvme_id_ns_nvm {
__u8 pic;
__u8 rsvd9[3];
__le32 elbaf[64];
- __u8 rsvd268[3828];
+ __le32 npdgl;
+ __le32 nprg;
+ __le32 npra;
+ __le32 nors;
+ __le32 npdal;
+ __u8 rsvd288[3808];
};
+static_assert(sizeof(struct nvme_id_ns_nvm) == 4096);
+
enum {
NVME_ID_NS_NVM_STS_MASK = 0x7f,
NVME_ID_NS_NVM_GUARD_SHIFT = 7,
@@ -590,7 +597,11 @@ enum {
enum {
NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FEAT_ATOMICS = 1 << 1,
- NVME_NS_FEAT_IO_OPT = 1 << 4,
+ NVME_NS_FEAT_OPTPERF_SHIFT = 4,
+ /* In NVMe version 2.0 and below, OPTPERF is only bit 4 of NSFEAT */
+ NVME_NS_FEAT_OPTPERF_MASK = 0x1,
+ /* Since version 2.1, OPTPERF is bits 4 and 5 of NSFEAT */
+ NVME_NS_FEAT_OPTPERF_MASK_2_1 = 0x3,
NVME_NS_ATTR_RO = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
NVME_NS_FLBAS_LBA_UMASK = 0x60,
@@ -1837,6 +1848,11 @@ enum {
NVME_AUTH_HASH_INVALID = 0xff,
};
+/* Maximum digest size for any NVME_AUTH_HASH_* value */
+enum {
+ NVME_AUTH_MAX_DIGEST_SIZE = 64,
+};
+
/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
enum {
NVME_AUTH_DHGROUP_NULL = 0x00,
@@ -2332,4 +2348,8 @@ enum nvme_pr_change_ptpl {
#define NVME_PR_IGNORE_KEY (1 << 3)
+/* Section 8.3.4.5.2 of the NVMe 2.1 */
+#define NVME_AUTH_DHCHAP_MAX_HASH_IDS 30
+#define NVME_AUTH_DHCHAP_MAX_DH_IDS 30
+
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index be6ec4916adf..959786f8f196 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -410,7 +410,7 @@ extern int of_alias_get_id(const struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
bool of_machine_compatible_match(const char *const *compats);
-bool of_machine_device_match(const struct of_device_id *matches);
+const struct of_device_id *of_machine_get_match(const struct of_device_id *matches);
const void *of_machine_get_match_data(const struct of_device_id *matches);
/**
@@ -426,6 +426,9 @@ static inline bool of_machine_is_compatible(const char *compat)
return of_machine_compatible_match(compats);
}
+int of_machine_read_compatible(const char **compatible, unsigned int index);
+int of_machine_read_model(const char **model);
+
extern int of_add_property(struct device_node *np, struct property *prop);
extern int of_remove_property(struct device_node *np, struct property *prop);
extern int of_update_property(struct device_node *np, struct property *newprop);
@@ -851,6 +854,17 @@ static inline int of_machine_is_compatible(const char *compat)
return 0;
}
+static inline int of_machine_read_compatible(const char **compatible,
+ unsigned int index)
+{
+ return -ENOSYS;
+}
+
+static inline int of_machine_read_model(const char **model)
+{
+ return -ENOSYS;
+}
+
static inline int of_add_property(struct device_node *np, struct property *prop)
{
return 0;
@@ -866,9 +880,9 @@ static inline bool of_machine_compatible_match(const char *const *compats)
return false;
}
-static inline bool of_machine_device_match(const struct of_device_id *matches)
+static inline const struct of_device_id *of_machine_get_match(const struct of_device_id *matches)
{
- return false;
+ return NULL;
}
static inline const void *
@@ -976,6 +990,11 @@ static inline int of_numa_init(void)
}
#endif
+static inline bool of_machine_device_match(const struct of_device_id *matches)
+{
+ return of_machine_get_match(matches) != NULL;
+}
+
static inline struct device_node *of_find_matching_node(
struct device_node *from,
const struct of_device_id *matches)
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index fd706cdf255c..16b08234d03b 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -38,6 +38,26 @@ extern int of_dma_controller_register(struct device_node *np,
void *data);
extern void of_dma_controller_free(struct device_node *np);
+static void __of_dma_controller_free(void *np)
+{
+ of_dma_controller_free(np);
+}
+
+static inline int
+devm_of_dma_controller_register(struct device *dev, struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ int ret;
+
+ ret = of_dma_controller_register(np, of_dma_xlate, data);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, __of_dma_controller_free, np);
+}
+
extern int of_dma_router_register(struct device_node *np,
void *(*of_dma_route_allocate)
(struct of_phandle_args *, struct of_dma *),
@@ -64,6 +84,15 @@ static inline void of_dma_controller_free(struct device_node *np)
{
}
+static inline int
+devm_of_dma_controller_register(struct device *dev, struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+
static inline int of_dma_router_register(struct device_node *np,
void *(*of_dma_route_allocate)
(struct of_phandle_args *, struct of_dma *),
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
deleted file mode 100644
index d0f66a5e1b2a..000000000000
--- a/include/linux/of_gpio.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * OF helpers for the GPIO API
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- */
-
-#ifndef __LINUX_OF_GPIO_H
-#define __LINUX_OF_GPIO_H
-
-#include <linux/compiler.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio.h> /* FIXME: Shouldn't be here */
-#include <linux/of.h>
-
-struct device_node;
-
-#ifdef CONFIG_OF_GPIO
-
-extern int of_get_named_gpio(const struct device_node *np,
- const char *list_name, int index);
-
-#else /* CONFIG_OF_GPIO */
-
-#include <linux/errno.h>
-
-/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio(const struct device_node *np,
- const char *propname, int index)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_OF_GPIO */
-
-#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index f573423359f4..e8b20b29fa68 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -11,7 +11,6 @@ struct resource;
struct reserved_mem {
const char *name;
- unsigned long fdt_node;
const struct reserved_mem_ops *ops;
phys_addr_t base;
phys_addr_t size;
@@ -19,18 +18,20 @@ struct reserved_mem {
};
struct reserved_mem_ops {
+ int (*node_validate)(unsigned long fdt_node, phys_addr_t *align);
+ int (*node_fixup)(unsigned long fdt_node, phys_addr_t base,
+ phys_addr_t size);
+ int (*node_init)(unsigned long fdt_node, struct reserved_mem *rmem);
int (*device_init)(struct reserved_mem *rmem,
struct device *dev);
void (*device_release)(struct reserved_mem *rmem,
struct device *dev);
};
-typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
-
#ifdef CONFIG_OF_RESERVED_MEM
-#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
- _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
+#define RESERVEDMEM_OF_DECLARE(name, compat, ops) \
+ _OF_DECLARE(reservedmem, name, compat, ops, struct reserved_mem_ops *)
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
@@ -48,8 +49,9 @@ int of_reserved_mem_region_count(const struct device_node *np);
#else
-#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
- _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+#define RESERVEDMEM_OF_DECLARE(name, compat, ops) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, ops, \
+ struct reserved_mem_ops *)
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 765f2778e264..b6232bea6edf 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -149,23 +149,23 @@ struct padata_mt_job {
/**
* struct padata_instance - The overall control structure.
*
- * @cpu_online_node: Linkage for CPU online callback.
- * @cpu_dead_node: Linkage for CPU offline callback.
+ * @cpuhp_node: Linkage for CPU hotplug callbacks.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
* @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
+ * @validate_cpumask: Internal cpumask used to validate @cpumask during hotplug.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
*/
struct padata_instance {
- struct hlist_node cpu_online_node;
- struct hlist_node cpu_dead_node;
+ struct hlist_node cpuhp_node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
struct list_head pslist;
struct padata_cpumask cpumask;
+ cpumask_var_t validate_cpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f7a0e4af0c73..0e03d816e8b9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,97 +198,91 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
-
/*
- * Return the real head page struct iff the @page is a fake head page, otherwise
- * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
+ * For tail pages, if the size of struct page is power-of-2 ->compound_info
+ * encodes the mask that converts the address of the tail page address to
+ * the head page address.
+ *
+ * Otherwise, ->compound_info has direct pointer to head pages.
*/
-static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
+static __always_inline bool compound_info_has_mask(void)
{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return page;
-
/*
- * Only addresses aligned with PAGE_SIZE of struct page may be fake head
- * struct page. The alignment check aims to avoid access the fields (
- * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
- * cold cacheline in some cases.
+ * Limit mask usage to HugeTLB vmemmap optimization (HVO) where it
+ * makes a difference.
+ *
+ * The approach with mask would work in the wider set of conditions,
+ * but it requires validating that struct pages are naturally aligned
+ * for all orders up to the MAX_FOLIO_ORDER, which can be tricky.
*/
- if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
- test_bit(PG_head, &page->flags.f)) {
- /*
- * We can safely access the field of the @page[1] with PG_head
- * because the @page is a compound page composed with at least
- * two contiguous pages.
- */
- unsigned long head = READ_ONCE(page[1].compound_head);
-
- if (likely(head & 1))
- return (const struct page *)(head - 1);
- }
- return page;
+ if (!IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP))
+ return false;
+
+ return is_power_of_2(sizeof(struct page));
}
-static __always_inline bool page_count_writable(const struct page *page, int u)
+static __always_inline unsigned long _compound_head(const struct page *page)
{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return true;
+ unsigned long info = READ_ONCE(page->compound_info);
+ unsigned long mask;
+
+ if (!compound_info_has_mask()) {
+ /* Bit 0 encodes PageTail() */
+ if (info & 1)
+ return info - 1;
+
+ return (unsigned long)page;
+ }
/*
- * The refcount check is ordered before the fake-head check to prevent
- * the following race:
- * CPU 1 (HVO) CPU 2 (speculative PFN walker)
- *
- * page_ref_freeze()
- * synchronize_rcu()
- * rcu_read_lock()
- * page_is_fake_head() is false
- * vmemmap_remap_pte()
- * XXX: struct page[] becomes r/o
+ * If compound_info_has_mask() is true the rest of the info encodes
+ * the mask that converts the address of the tail page to the head page.
*
- * page_ref_unfreeze()
- * page_ref_count() is not zero
+ * No need to clear bit 0 in the mask as 'page' always has it clear.
*
- * atomic_add_unless(&page->_refcount)
- * XXX: try to modify r/o struct page[]
- *
- * The refcount check also prevents modification attempts to other (r/o)
- * tail pages that are not fake heads.
+ * Let's do it in a branchless manner.
*/
- if (atomic_read_acquire(&page->_refcount) == u)
- return false;
- return page_fixed_fake_head(page) == page;
-}
-#else
-static inline const struct page *page_fixed_fake_head(const struct page *page)
-{
- return page;
-}
+ /* Non-tail: -1UL, Tail: 0 */
+ mask = (info & 1) - 1;
-static inline bool page_count_writable(const struct page *page, int u)
-{
- return true;
-}
-#endif
+ /* Non-tail: -1UL, Tail: info */
+ mask |= info;
-static __always_inline int page_is_fake_head(const struct page *page)
-{
- return page_fixed_fake_head(page) != page;
+ return (unsigned long)page & mask;
}
-static __always_inline unsigned long _compound_head(const struct page *page)
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
+static __always_inline void set_compound_head(struct page *tail,
+ const struct page *head, unsigned int order)
{
- unsigned long head = READ_ONCE(page->compound_head);
+ unsigned int shift;
+ unsigned long mask;
+
+ if (!compound_info_has_mask()) {
+ WRITE_ONCE(tail->compound_info, (unsigned long)head | 1);
+ return;
+ }
+
+ /*
+ * If the size of struct page is power-of-2, bits [shift:0] of the
+ * virtual address of compound head are zero.
+ *
+ * Calculate mask that can be applied to the virtual address of
+ * the tail page to get address of the head page.
+ */
+ shift = order + order_base_2(sizeof(struct page));
+ mask = GENMASK(BITS_PER_LONG - 1, shift);
- if (unlikely(head & 1))
- return head - 1;
- return (unsigned long)page_fixed_fake_head(page);
+ /* Bit 0 encodes PageTail() */
+ WRITE_ONCE(tail->compound_info, mask | 1);
}
-#define compound_head(page) ((typeof(page))_compound_head(page))
+static __always_inline void clear_compound_head(struct page *page)
+{
+ WRITE_ONCE(page->compound_info, 0);
+}
/**
* page_folio - Converts from page to folio.
@@ -320,13 +314,13 @@ static __always_inline unsigned long _compound_head(const struct page *page)
static __always_inline int PageTail(const struct page *page)
{
- return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
+ return READ_ONCE(page->compound_info) & 1;
}
static __always_inline int PageCompound(const struct page *page)
{
return test_bit(PG_head, &page->flags.f) ||
- READ_ONCE(page->compound_head) & 1;
+ READ_ONCE(page->compound_info) & 1;
}
#define PAGE_POISON_PATTERN -1l
@@ -348,7 +342,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
{
const struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(page->compound_info & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
@@ -357,7 +351,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
{
struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(page->compound_info & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
return &page[n].flags.f;
}
@@ -724,6 +718,11 @@ static __always_inline bool folio_test_anon(const struct folio *folio)
return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
+static __always_inline bool folio_test_lazyfree(const struct folio *folio)
+{
+ return folio_test_anon(folio) && !folio_test_swapbacked(folio);
+}
+
static __always_inline bool PageAnonNotKsm(const struct page *page)
{
unsigned long flags = (unsigned long)page_folio(page)->mapping;
@@ -847,7 +846,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
- return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
+ return test_bit(PG_head, &page->flags.f);
}
__SETPAGEFLAG(Head, head, PF_ANY)
@@ -865,16 +864,6 @@ static inline bool folio_test_large(const struct folio *folio)
return folio_test_head(folio);
}
-static __always_inline void set_compound_head(struct page *page, struct page *head)
-{
- WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
-}
-
-static __always_inline void clear_compound_head(struct page *page)
-{
- WRITE_ONCE(page->compound_head, 0);
-}
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page)
{
@@ -934,6 +923,7 @@ enum pagetype {
PGTY_zsmalloc = 0xf6,
PGTY_unaccepted = 0xf7,
PGTY_large_kmalloc = 0xf8,
+ PGTY_netpp = 0xf9,
PGTY_mapcount_underflow = 0xff
};
@@ -1066,6 +1056,11 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
+/*
+ * Marks page_pool allocated pages.
+ */
+PAGE_TYPE_OPS(Netpp, netpp, netpp)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 544150d1d5fd..94d3f0e71c06 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -228,24 +228,18 @@ static inline int folio_ref_dec_return(struct folio *folio)
return page_ref_dec_return(&folio->page);
}
-static inline bool page_ref_add_unless(struct page *page, int nr, int u)
+static inline bool page_ref_add_unless_zero(struct page *page, int nr)
{
- bool ret = false;
-
- rcu_read_lock();
- /* avoid writing to the vmemmap area being remapped */
- if (page_count_writable(page, u))
- ret = atomic_add_unless(&page->_refcount, nr, u);
- rcu_read_unlock();
+ bool ret = atomic_add_unless(&page->_refcount, nr, 0);
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
-static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
+static inline bool folio_ref_add_unless_zero(struct folio *folio, int nr)
{
- return page_ref_add_unless(&folio->page, nr, u);
+ return page_ref_add_unless_zero(&folio->page, nr);
}
/**
@@ -261,12 +255,12 @@ static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
*/
static inline bool folio_try_get(struct folio *folio)
{
- return folio_ref_add_unless(folio, 1, 0);
+ return folio_ref_add_unless_zero(folio, 1);
}
static inline bool folio_ref_try_add(struct folio *folio, int count)
{
- return folio_ref_add_unless(folio, count, 0);
+ return folio_ref_add_unless_zero(folio, count);
}
static inline int page_ref_freeze(struct page *page, int count)
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
index fe648dfa3a7c..9d4ca5c218a0 100644
--- a/include/linux/page_reporting.h
+++ b/include/linux/page_reporting.h
@@ -7,6 +7,7 @@
/* This value should always be a power of 2, see page_reporting_cycle() */
#define PAGE_REPORTING_CAPACITY 32
+#define PAGE_REPORTING_ORDER_UNSPECIFIED -1
struct page_reporting_dev_info {
/* function that alters pages to make them "reported" */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 88e18615dd72..b41d7265c01b 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -148,14 +148,8 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
typedef int __bitwise folio_walk_flags_t;
-/*
- * Walk migration entries as well. Careful: a large folio might get split
- * concurrently.
- */
-#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0))
-
/* Walk shared zeropages (small + huge) as well. */
-#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1))
+#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(0))
enum folio_walk_level {
FW_LEVEL_PTE,
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index c021c7af175f..1eca1264815b 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -191,13 +191,49 @@ struct pci_epc {
* @BAR_RESIZABLE: The BAR implements the PCI-SIG Resizable BAR Capability.
* NOTE: An EPC driver can currently only set a single supported
* size.
- * @BAR_RESERVED: The BAR should not be touched by an EPF driver.
+ * @BAR_RESERVED: Used for HW-backed BARs (e.g. MSI-X table, DMA regs). The BAR
+ * should not be disabled by an EPC driver. The BAR should not be
+ * reprogrammed by an EPF driver. An EPF driver is allowed to
+ * disable the BAR if absolutely necessary. (However, right now
+ * there is no EPC operation to disable a BAR that has not been
+ * programmed using pci_epc_set_bar().)
+ * @BAR_DISABLED: The BAR should be disabled by an EPC driver. The BAR will be
+ * unavailable to an EPF driver.
*/
enum pci_epc_bar_type {
BAR_PROGRAMMABLE = 0,
BAR_FIXED,
BAR_RESIZABLE,
BAR_RESERVED,
+ BAR_DISABLED,
+};
+
+/**
+ * enum pci_epc_bar_rsvd_region_type - type of a fixed subregion behind a BAR
+ * @PCI_EPC_BAR_RSVD_DMA_CTRL_MMIO: Integrated DMA controller MMIO window
+ * @PCI_EPC_BAR_RSVD_MSIX_TBL_RAM: MSI-X table structure
+ * @PCI_EPC_BAR_RSVD_MSIX_PBA_RAM: MSI-X PBA structure
+ *
+ * BARs marked BAR_RESERVED are owned by the SoC/EPC hardware and must not be
+ * reprogrammed by EPF drivers. Some of them still expose fixed subregions that
+ * EPFs may want to reference (e.g. embedded doorbell fallback).
+ */
+enum pci_epc_bar_rsvd_region_type {
+ PCI_EPC_BAR_RSVD_DMA_CTRL_MMIO = 0,
+ PCI_EPC_BAR_RSVD_MSIX_TBL_RAM,
+ PCI_EPC_BAR_RSVD_MSIX_PBA_RAM,
+};
+
+/**
+ * struct pci_epc_bar_rsvd_region - fixed subregion behind a BAR
+ * @type: reserved region type
+ * @offset: offset within the BAR aperture
+ * @size: size of the reserved region
+ */
+struct pci_epc_bar_rsvd_region {
+ enum pci_epc_bar_rsvd_region_type type;
+ resource_size_t offset;
+ resource_size_t size;
};
/**
@@ -206,18 +242,16 @@ enum pci_epc_bar_type {
* @fixed_size: the fixed size, only applicable if type is BAR_FIXED_MASK.
* @only_64bit: if true, an EPF driver is not allowed to choose if this BAR
* should be configured as 32-bit or 64-bit, the EPF driver must
- * configure this BAR as 64-bit. Additionally, the BAR succeeding
- * this BAR must be set to type BAR_RESERVED.
- *
- * only_64bit should not be set on a BAR of type BAR_RESERVED.
- * (If BARx is a 64-bit BAR that an EPF driver is not allowed to
- * touch, then both BARx and BARx+1 must be set to type
- * BAR_RESERVED.)
+ * configure this BAR as 64-bit.
+ * @nr_rsvd_regions: number of fixed subregions described for BAR_RESERVED
+ * @rsvd_regions: fixed subregions behind BAR_RESERVED
*/
struct pci_epc_bar_desc {
enum pci_epc_bar_type type;
u64 fixed_size;
bool only_64bit;
+ u8 nr_rsvd_regions;
+ const struct pci_epc_bar_rsvd_region *rsvd_regions;
};
/**
diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h
index ba28140ce670..be68cd17f2f8 100644
--- a/include/linux/pci-tph.h
+++ b/include/linux/pci-tph.h
@@ -25,7 +25,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev,
unsigned int index, u16 tag);
int pcie_tph_get_cpu_st(struct pci_dev *dev,
enum tph_mem_type mem_type,
- unsigned int cpu_uid, u16 *tag);
+ unsigned int cpu, u16 *tag);
void pcie_disable_tph(struct pci_dev *pdev);
int pcie_enable_tph(struct pci_dev *pdev, int mode);
u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
@@ -36,7 +36,7 @@ static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
{ return -EINVAL; }
static inline int pcie_tph_get_cpu_st(struct pci_dev *dev,
enum tph_mem_type mem_type,
- unsigned int cpu_uid, u16 *tag)
+ unsigned int cpu, u16 *tag)
{ return -EINVAL; }
static inline void pcie_disable_tph(struct pci_dev *pdev) { }
static inline int pcie_enable_tph(struct pci_dev *pdev, int mode)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1c270f1d5123..2c4454583c11 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -72,12 +72,20 @@
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
+/*
+ * PCI_SLOT_ALL_DEVICES indicates a slot that covers all devices on the bus.
+ * Used for PCIe hotplug where the physical slot is the entire secondary bus,
+ * and, if ARI Forwarding is enabled, functions may appear to be on multiple
+ * devices.
+ */
+#define PCI_SLOT_ALL_DEVICES 0xfe
+
/* pci_slot represents a physical slot */
struct pci_slot {
struct pci_bus *bus; /* Bus this slot is on */
struct list_head list; /* Node in list of slots */
struct hotplug_slot *hotplug; /* Hotplug info (move here) */
- unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
+ unsigned char number; /* Device nr, or PCI_SLOT_ALL_DEVICES */
struct kobject kobj;
};
@@ -518,7 +526,7 @@ struct pci_dev {
unsigned int ptm_root:1;
unsigned int ptm_responder:1;
unsigned int ptm_requester:1;
- unsigned int ptm_enabled:1;
+ atomic_t ptm_enable_cnt;
u8 ptm_granularity;
#endif
#ifdef CONFIG_PCI_MSI
@@ -575,12 +583,6 @@ struct pci_dev {
u8 supported_speeds; /* Supported Link Speeds Vector */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
- /*
- * Driver name to force a match. Do not set directly, because core
- * frees it. Use driver_set_override() to set or clear it.
- */
- const char *driver_override;
-
unsigned long priv_flags; /* Private flags for the PCI driver */
/* These methods index pci_reset_fn_methods[] */
@@ -1193,8 +1195,6 @@ extern const struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
extern struct list_head pci_root_buses; /* List of all known PCI buses */
-/* Some device drivers need know if PCI is initiated */
-int no_pci_devices(void);
void pcibios_resource_survey_bus(struct pci_bus *bus);
void pcibios_bus_add_device(struct pci_dev *pdev);
@@ -1206,9 +1206,15 @@ int __must_check pcibios_enable_device(struct pci_dev *, int mask);
char *pcibios_setup(char *str);
/* Used only when drivers/pci/setup.c is used */
-resource_size_t pcibios_align_resource(void *, const struct resource *,
- resource_size_t,
- resource_size_t);
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ const struct resource *empty_res,
+ resource_size_t size,
+ resource_size_t align);
+resource_size_t pci_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ const struct resource *empty_res,
+ resource_size_t size,
+ resource_size_t align);
/* Generic PCI functions used internally */
@@ -1975,11 +1981,11 @@ struct pci_ptm_debugfs {
};
#ifdef CONFIG_PCIE_PTM
-int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+int pci_enable_ptm(struct pci_dev *dev);
void pci_disable_ptm(struct pci_dev *dev);
bool pcie_ptm_enabled(struct pci_dev *dev);
#else
-static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+static inline int pci_enable_ptm(struct pci_dev *dev)
{ return -EINVAL; }
static inline void pci_disable_ptm(struct pci_dev *dev) { }
static inline bool pcie_ptm_enabled(struct pci_dev *dev)
@@ -2132,7 +2138,6 @@ static inline struct pci_dev *pci_get_base_class(unsigned int class,
static inline int pci_dev_present(const struct pci_device_id *ids)
{ return 0; }
-#define no_pci_devices() (1)
#define pci_dev_put(dev) do { } while (0)
static inline void pci_set_master(struct pci_dev *dev) { }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 406abf629be2..24cb42f66e4b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2586,6 +2586,8 @@
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
+#define PCI_VENDOR_ID_GOOGLE 0x1ae0
+
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBDEVICE_ID_QEMU 0x1100
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a50df42a893f..cdd68ed3ae1a 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -491,64 +491,63 @@ static inline pgd_t pgdp_get(pgd_t *pgdp)
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
+static inline bool ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
- int r = 1;
+ bool young = true;
+
if (!pte_young(pte))
- r = 0;
+ young = false;
else
set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
- return r;
+ return young;
}
#endif
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
+static inline bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
- int r = 1;
+ bool young = true;
+
if (!pmd_young(pmd))
- r = 0;
+ young = false;
else
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
- return r;
+ return young;
}
#else
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
+static inline bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
{
BUILD_BUG();
- return 0;
+ return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-int ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep);
+bool ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
#endif
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
+bool pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#else
/*
* Despite relevant to THP only, this API is called from generic rmap code
* under PageTransHuge(), hence needs a dummy implementation for !THP
*/
-static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
+static inline bool pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
{
BUILD_BUG();
- return 0;
+ return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -1086,10 +1085,10 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
-static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
+static inline bool clear_flush_young_ptes(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned int nr)
{
- int young = 0;
+ bool young = false;
for (;;) {
young |= ptep_clear_flush_young(vma, addr, ptep);
@@ -1103,6 +1102,43 @@ static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
}
#endif
+#ifndef test_and_clear_young_ptes
+/**
+ * test_and_clear_young_ptes - Mark PTEs that map consecutive pages of the same
+ * folio as old
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear access bit.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_test_and_clear_young().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ *
+ * Returns: whether any PTE was young.
+ */
+static inline bool test_and_clear_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ bool young = false;
+
+ for (;;) {
+ young |= ptep_test_and_clear_young(vma, addr, ptep);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+
+ return young;
+}
+#endif
+
/*
* On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings
@@ -1917,41 +1953,56 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
}
-#ifdef CONFIG_MMU
+/*
+ * ZERO_PAGE() is global shared page(s) that is always zero. It is used for
+ * zero-mapped memory areas, CoW etc.
+ *
+ * On architectures that __HAVE_COLOR_ZERO_PAGE there are several such pages
+ * for different ranges in the virtual address space.
+ *
+ * zero_page_pfn identifies the first (or the only) pfn for these pages.
+ *
+ * For architectures that don't __HAVE_COLOR_ZERO_PAGE the zero page lives in
+ * empty_zero_page in BSS.
+ */
+void arch_setup_zero_pages(void);
+
#ifdef __HAVE_COLOR_ZERO_PAGE
static inline int is_zero_pfn(unsigned long pfn)
{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ extern unsigned long zero_page_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_page_pfn;
+
return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+#define zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
#else
static inline int is_zero_pfn(unsigned long pfn)
{
- extern unsigned long zero_pfn;
- return pfn == zero_pfn;
-}
+ extern unsigned long zero_page_pfn;
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
- extern unsigned long zero_pfn;
- return zero_pfn;
+ return pfn == zero_page_pfn;
}
-#endif
-#else
-static inline int is_zero_pfn(unsigned long pfn)
+
+static inline unsigned long zero_pfn(unsigned long addr)
{
- return 0;
+ extern unsigned long zero_page_pfn;
+
+ return zero_page_pfn;
}
-static inline unsigned long my_zero_pfn(unsigned long addr)
+extern uint8_t empty_zero_page[PAGE_SIZE];
+extern struct page *__zero_page;
+
+static inline struct page *_zero_page(unsigned long addr)
{
- return 0;
+ return __zero_page;
}
-#endif /* CONFIG_MMU */
+#define ZERO_PAGE(vaddr) _zero_page(vaddr)
+
+#endif /* __HAVE_COLOR_ZERO_PAGE */
#ifdef CONFIG_MMU
@@ -1989,7 +2040,7 @@ static inline int pud_trans_unstable(pud_t *pud)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- pud_t pudval = READ_ONCE(*pud);
+ pud_t pudval = pudp_get(pud);
if (pud_none(pudval) || pud_trans_huge(pudval))
return 1;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6f9979a26892..199a7aaa341b 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -612,6 +612,8 @@ struct phy_oatc14_sqi_capability {
* @advertising_eee: Currently advertised EEE linkmodes
* @enable_tx_lpi: When True, MAC should transmit LPI to PHY
* @eee_active: phylib private state, indicating that EEE has been negotiated
+ * @autonomous_eee_disabled: Set when autonomous EEE has been disabled,
+ * used to re-apply after PHY soft reset
* @eee_cfg: User configuration of EEE
* @lp_advertising: Current link partner advertised linkmodes
* @host_interfaces: PHY interface modes supported by host
@@ -739,6 +741,7 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(eee_disabled_modes);
bool enable_tx_lpi;
bool eee_active;
+ bool autonomous_eee_disabled;
struct eee_config eee_cfg;
/* Host supported PHY interface types. Should be ignored if empty. */
@@ -1359,6 +1362,17 @@ struct phy_driver {
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+ /**
+ * @disable_autonomous_eee: Disable PHY-autonomous EEE
+ *
+ * Some PHYs manage EEE autonomously, preventing the MAC from
+ * controlling LPI signaling. This callback disables autonomous
+ * EEE at the PHY.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ int (*disable_autonomous_eee)(struct phy_device *dev);
+
/* Get and Set PHY tunables */
/** @get_tunable: Return the value of a tunable */
int (*get_tunable)(struct phy_device *dev,
@@ -2152,8 +2166,6 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable, int speed);
-struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
- phy_interface_t interface);
struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
@@ -2448,9 +2460,6 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
struct phy_port *phy_get_sfp_port(struct phy_device *phydev);
-extern const struct bus_type mdio_bus_type;
-extern const struct class mdio_bus_class;
-
/**
* phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 63ce16191eb9..11b8f0b8da0c 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -35,6 +35,8 @@ int pinctrl_gpio_direction_output(struct gpio_chip *gc,
unsigned int offset);
int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config);
+int pinctrl_gpio_get_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long *config);
struct pinctrl * __must_check pinctrl_get(struct device *dev);
void pinctrl_put(struct pinctrl *p);
@@ -102,6 +104,13 @@ pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
}
static inline int
+pinctrl_gpio_get_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long *config)
+{
+ return 0;
+}
+
+static inline int
pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
index d718ccfa3421..0b31af66a1ac 100644
--- a/include/linux/platform_data/dma-mcf-edma.h
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -26,8 +26,9 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
/**
* struct mcf_edma_platform_data - platform specific data for eDMA engine
*
- * @ver The eDMA module version.
- * @dma_channels The number of eDMA channels.
+ * @dma_channels: The number of eDMA channels.
+ * @slave_map: Slave device map
+ * @slavecnt: Number of entries in @slave_map
*/
struct mcf_edma_platform_data {
int dma_channels;
diff --git a/include/linux/platform_data/dsa.h b/include/linux/platform_data/dsa.h
index d4d9bf2060a6..77424bb24723 100644
--- a/include/linux/platform_data/dsa.h
+++ b/include/linux/platform_data/dsa.h
@@ -3,20 +3,11 @@
#define __DSA_PDATA_H
struct device;
-struct net_device;
-#define DSA_MAX_SWITCHES 4
#define DSA_MAX_PORTS 12
-#define DSA_RTABLE_NONE -1
struct dsa_chip_data {
/*
- * How to access the switch configuration registers.
- */
- struct device *host_dev;
- int sw_addr;
-
- /*
* Reference to network devices
*/
struct device *netdev[DSA_MAX_PORTS];
@@ -24,12 +15,6 @@ struct dsa_chip_data {
/* set to size of eeprom if supported by the switch */
int eeprom_len;
- /* Device tree node pointer for this specific switch chip
- * used during switch setup in case additional properties
- * and resources needs to be used
- */
- struct device_node *of_node;
-
/*
* The names of the switch's ports. Use "cpu" to
* designate the switch port that the cpu is connected to,
@@ -38,31 +23,6 @@ struct dsa_chip_data {
* or any other string to indicate this is a physical port.
*/
char *port_names[DSA_MAX_PORTS];
- struct device_node *port_dn[DSA_MAX_PORTS];
-
- /*
- * An array of which element [a] indicates which port on this
- * switch should be used to send packets to that are destined
- * for switch a. Can be NULL if there is only one switch chip.
- */
- s8 rtable[DSA_MAX_SWITCHES];
};
-struct dsa_platform_data {
- /*
- * Reference to a Linux network interface that connects
- * to the root switch chip of the tree.
- */
- struct device *netdev;
- struct net_device *of_netdev;
-
- /*
- * Info structs describing each of the switch chips
- * connected via this network interface.
- */
- int nr_chips;
- struct dsa_chip_data *chip;
-};
-
-
#endif /* __DSA_PDATA_H */
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
deleted file mode 100644
index 2aa5ee9a9050..000000000000
--- a/include/linux/platform_data/ina2xx.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for Texas Instruments INA219, INA226 power monitor chips
- *
- * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
- *
- * For further information, see the Documentation/hwmon/ina2xx.rst file.
- */
-
-/**
- * struct ina2xx_platform_data - ina2xx info
- * @shunt_uohms shunt resistance in microohms
- */
-struct ina2xx_platform_data {
- long shunt_uohms;
-};
diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
deleted file mode 100644
index 13874fa6e767..000000000000
--- a/include/linux/platform_data/mdio-gpio.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * MDIO-GPIO bus platform data structure
- */
-
-#ifndef __LINUX_MDIO_GPIO_PDATA_H
-#define __LINUX_MDIO_GPIO_PDATA_H
-
-struct mdio_gpio_platform_data {
- u32 phy_mask;
- u32 phy_ignore_ta_mask;
-};
-
-#endif /* __LINUX_MDIO_GPIO_PDATA_H */
diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h
index 6d74e507dbd2..2b48f2b0135d 100644
--- a/include/linux/platform_data/voltage-omap.h
+++ b/include/linux/platform_data/voltage-omap.h
@@ -10,14 +10,14 @@
/**
* struct omap_volt_data - Omap voltage specific data.
- * @voltage_nominal: The possible voltage value in uV
+ * @volt_nominal: The possible voltage value in uV
* @sr_efuse_offs: The offset of the efuse register(from system
* control module base address) from where to read
* the n-target value for the smartreflex module.
* @sr_errminlimit: Error min limit value for smartreflex. This value
* differs at differnet opp and thus is linked
* with voltage.
- * @vp_errorgain: Error gain value for the voltage processor. This
+ * @vp_errgain: Error gain value for the voltage processor. This
* field also differs according to the voltage/opp.
*/
struct omap_volt_data {
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ed1d50d1c3c1..975400a472e3 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -113,22 +113,58 @@ extern int platform_get_irq_byname_optional(struct platform_device *dev,
const char *name);
extern int platform_add_devices(struct platform_device **, int);
+/**
+ * struct platform_device_info - set of parameters for creating a platform device
+ * @parent: parent device for the new platform device.
+ * @fwnode: firmware node associated with the device.
+ * @of_node_reused: indicates that device tree node associated with the device
+ * is shared with another device, typically its ancestor. Setting this to
+ * %true prevents the device from being matched via the OF match table,
+ * and stops the device core from automatically binding pinctrl
+ * configuration to avoid disrupting the other device.
+ * @name: name of the device.
+ * @id: instance ID of the device. Use %PLATFORM_DEVID_NONE if there is only
+ * one instance of the device, or %PLATFORM_DEVID_AUTO to let the
+ * kernel automatically assign a unique instance ID.
+ * @res: set of resources to attach to the device.
+ * @num_res: number of entries in @res.
+ * @data: device-specific data for this platform device.
+ * @size_data: size of device-specific data.
+ * @dma_mask: DMA mask for the device.
+ * @swnode: a secondary software node to be attached to the device. The node
+ * will be automatically registered and its lifetime tied to the platform
+ * device if it is not registered yet.
+ * @properties: a set of software properties for the device. If provided,
+ * a managed software node will be automatically created and
+ * assigned to the device. The properties array must be terminated
+ * with a sentinel entry. Specifying both @properties and @swnode is not
+ * allowed.
+ *
+ * This structure is used to hold information needed to create and register
+ * a platform device using platform_device_register_full().
+ *
+ * platform_device_register_full() makes deep copies of @name, @res, @data and
+ * @properties, so the caller does not need to keep them after registration.
+ * If the registration is performed during initialization, these can be marked
+ * as __initconst.
+ */
struct platform_device_info {
- struct device *parent;
- struct fwnode_handle *fwnode;
- bool of_node_reused;
+ struct device *parent;
+ struct fwnode_handle *fwnode;
+ bool of_node_reused;
- const char *name;
- int id;
+ const char *name;
+ int id;
- const struct resource *res;
- unsigned int num_res;
+ const struct resource *res;
+ unsigned int num_res;
- const void *data;
- size_t size_data;
- u64 dma_mask;
+ const void *data;
+ size_t size_data;
+ u64 dma_mask;
- const struct property_entry *properties;
+ const struct software_node *swnode;
+ const struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 93ba0143ca47..b299dc0128d6 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -49,8 +49,8 @@
struct dev_pm_domain_attach_data {
const char * const *pd_names;
- const u32 num_pd_names;
- const u32 pd_flags;
+ u32 num_pd_names;
+ u32 pd_flags;
};
struct dev_pm_domain_list {
@@ -183,6 +183,7 @@ struct genpd_power_state {
u64 rejected;
u64 above;
u64 below;
+ u64 usage_s2idle;
struct fwnode_handle *fwnode;
u64 idle_time;
void *data;
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index c417abd2ab70..d5b08313cf11 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -17,6 +17,7 @@
#define MAX17042_DEFAULT_VMAX (4500) /* LiHV cell max */
#define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */
#define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */
+#define MAX17042_DEFAULT_TASK_PERIOD (5760)
/* Consider RepCap which is less then 10 units below FullCAP full */
#define MAX17042_FULL_THRESHOLD 10
@@ -105,7 +106,7 @@ enum max17042_register {
MAX17042_OCV = 0xEE,
- MAX17042_OCVInternal = 0xFB, /* MAX17055 VFOCV */
+ MAX17042_OCVInternal = 0xFB, /* MAX17055/77759 VFOCV */
MAX17042_VFSOC = 0xFF,
};
@@ -156,7 +157,7 @@ enum max17055_register {
MAX17055_AtAvCap = 0xDF,
};
-/* Registers specific to max17047/50/55 */
+/* Registers specific to max17047/50/55/77759 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
MAX17047_FullSOCThr = 0x13,
@@ -167,12 +168,32 @@ enum max17047_register {
MAX17047_QRTbl30 = 0x42,
};
+enum max77759_register {
+ MAX77759_AvgTA0 = 0x26,
+ MAX77759_AtTTF = 0x33,
+ MAX77759_Tconvert = 0x34,
+ MAX77759_AvgCurrent0 = 0x3B,
+ MAX77759_THMHOT = 0x40,
+ MAX77759_CTESample = 0x41,
+ MAX77759_ISys = 0x43,
+ MAX77759_AvgVCell0 = 0x44,
+ MAX77759_RlxSOC = 0x47,
+ MAX77759_AvgISys = 0x4B,
+ MAX77759_QH0 = 0x4C,
+ MAX77759_MixAtFull = 0x4F,
+ MAX77759_VSys = 0xB1,
+ MAX77759_TAlrtTh2 = 0xB2,
+ MAX77759_VByp = 0xB3,
+ MAX77759_IIn = 0xD0,
+};
+
enum max170xx_chip_type {
MAXIM_DEVICE_TYPE_UNKNOWN = 0,
MAXIM_DEVICE_TYPE_MAX17042,
MAXIM_DEVICE_TYPE_MAX17047,
MAXIM_DEVICE_TYPE_MAX17050,
MAXIM_DEVICE_TYPE_MAX17055,
+ MAXIM_DEVICE_TYPE_MAX77759,
MAXIM_DEVICE_TYPE_NUM
};
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
index 3d557bbcd2c7..603419db924c 100644
--- a/include/linux/powercap.h
+++ b/include/linux/powercap.h
@@ -238,7 +238,7 @@ static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
* Advantage of this parameter is that client can embed
* this data in its data structures and allocate in a
* single call, preventing multiple allocations.
-* @control_type_name: The Name of this control_type, which will be shown
+* @name: The Name of this control_type, which will be shown
* in the sysfs Interface.
* @ops: Callbacks for control type. This parameter is optional.
*
@@ -277,7 +277,7 @@ int powercap_unregister_control_type(struct powercap_control_type *instance);
* @name: A name for this zone.
* @parent: A pointer to the parent power zone instance if any or NULL
* @ops: Pointer to zone operation callback structure.
-* @no_constraints: Number of constraints for this zone
+* @nr_constraints: Number of constraints for this zone
* @const_ops: Pointer to constraint callback structure
*
* Register a power zone under a given control type. A power zone must register
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index f73fbea0dbc2..2f63e9a6cc88 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -55,7 +55,7 @@ extern void ppp_input(struct ppp_channel *, struct sk_buff *);
/* Called by the channel when an input error occurs, indicating
that we may have missed a packet. */
-extern void ppp_input_error(struct ppp_channel *, int code);
+extern void ppp_input_error(struct ppp_channel *);
/* Attach a channel to a given PPP unit in specified net. */
extern int ppp_register_net_channel(struct net *, struct ppp_channel *);
@@ -72,7 +72,9 @@ extern int ppp_channel_index(struct ppp_channel *);
/* Get the unit number associated with a channel, or -1 if none */
extern int ppp_unit_number(struct ppp_channel *);
-/* Get the device name associated with a channel, or NULL if none */
+/* Get the device name associated with a channel, or NULL if none.
+ * Caller must hold RCU read lock.
+ */
extern char *ppp_dev_name(struct ppp_channel *);
/*
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 63d516c873b4..54e3c621fec3 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -801,6 +801,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+#if defined(DEBUG)
+#define print_hex_dump_devel(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#else
+static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+}
+#endif
+
/**
* print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
* @prefix_str: string to prefix each line with;
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 69ffa4b4d1fa..d5099a2baca5 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -829,12 +829,14 @@ struct sev_data_range_list {
*
* @len: length of the command buffer read by the PSP
* @iommu_snp_shutdown: Disable enforcement of SNP in the IOMMU
+ * @x86_snp_shutdown: Disable SNP on all cores
* @rsvd1: reserved
*/
struct sev_data_snp_shutdown_ex {
u32 len;
u32 iommu_snp_shutdown:1;
- u32 rsvd1:31;
+ u32 x86_snp_shutdown:1;
+ u32 rsvd1:30;
} __packed;
/**
@@ -891,6 +893,7 @@ struct snp_feature_info {
} __packed;
/* Feature bits in ECX */
+#define SNP_X86_SHUTDOWN_SUPPORTED BIT(1)
#define SNP_RAPL_DISABLE_SUPPORTED BIT(2)
#define SNP_CIPHER_TEXT_HIDING_SUPPORTED BIT(3)
#define SNP_AES_256_XTS_POLICY_SUPPORTED BIT(4)
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 534531807d95..d2c3629bbe45 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -48,7 +48,7 @@ struct ptr_ring {
*/
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
- return r->queue[r->producer];
+ return data_race(r->queue[r->producer]);
}
static inline bool ptr_ring_full(struct ptr_ring *r)
@@ -103,7 +103,7 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
*/
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
- if (unlikely(!r->size) || r->queue[r->producer])
+ if (unlikely(!r->size) || data_race(r->queue[r->producer]))
return -ENOSPC;
/* Make sure the pointer we are storing points to a valid data. */
@@ -194,7 +194,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
if (likely(r->size))
- return !r->queue[READ_ONCE(r->consumer_head)];
+ return !data_race(r->queue[READ_ONCE(r->consumer_head)]);
return true;
}
@@ -256,7 +256,7 @@ static inline void __ptr_ring_zero_tail(struct ptr_ring *r, int consumer_head)
* besides the first one until we write out all entries.
*/
while (likely(head > r->consumer_tail))
- r->queue[--head] = NULL;
+ data_race(r->queue[--head] = NULL);
r->consumer_tail = consumer_head;
}
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index c334f82ed385..f9c0f9d7c9d9 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -44,14 +44,7 @@ int dquot_initialize(struct inode *inode);
bool dquot_initialize_needed(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
-static inline struct dquot *dqgrab(struct dquot *dquot)
-{
- /* Make sure someone else has active reference to dquot */
- WARN_ON_ONCE(!atomic_read(&dquot->dq_count));
- WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
- atomic_inc(&dquot->dq_count);
- return dquot;
-}
+struct dquot *dqgrab(struct dquot *dquot);
static inline bool dquot_is_busy(struct dquot *dquot)
{
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 51b811b62322..870558c9d36e 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -2,29 +2,6 @@
#ifndef _XOR_H
#define _XOR_H
-#define MAX_XOR_BLOCKS 4
+void xor_gen(void *dest, void **srcs, unsigned int src_cnt, unsigned int bytes);
-extern void xor_blocks(unsigned int count, unsigned int bytes,
- void *dest, void **srcs);
-
-struct xor_block_template {
- struct xor_block_template *next;
- const char *name;
- int speed;
- void (*do_2)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict);
- void (*do_3)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict);
- void (*do_4)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict);
- void (*do_5)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict);
-};
-
-#endif
+#endif /* _XOR_H */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 1d982dbdd0d0..024fc20e7762 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -6,10 +6,10 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/percpu-defs.h>
+#include <linux/prandom.h>
DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
randomize_kstack_offset);
-DECLARE_PER_CPU(u32, kstack_offset);
/*
* Do not use this anywhere else in the kernel. This is used here because
@@ -46,53 +46,39 @@ DECLARE_PER_CPU(u32, kstack_offset);
#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100)
#endif
+DECLARE_PER_CPU(struct rnd_state, kstack_rnd_state);
+
+static __always_inline u32 get_kstack_offset(void)
+{
+ struct rnd_state *state;
+ u32 rnd;
+
+ state = &get_cpu_var(kstack_rnd_state);
+ rnd = prandom_u32_state(state);
+ put_cpu_var(kstack_rnd_state);
+
+ return rnd;
+}
+
/**
- * add_random_kstack_offset - Increase stack utilization by previously
- * chosen random offset
+ * add_random_kstack_offset - Increase stack utilization by a random offset.
*
- * This should be used in the syscall entry path when interrupts and
- * preempt are disabled, and after user registers have been stored to
- * the stack. For testing the resulting entropy, please see:
- * tools/testing/selftests/lkdtm/stack-entropy.sh
+ * This should be used in the syscall entry path after user registers have been
+ * stored to the stack. Preemption may be enabled. For testing the resulting
+ * entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh
*/
#define add_random_kstack_offset() do { \
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
- u32 offset = raw_cpu_read(kstack_offset); \
+ u32 offset = get_kstack_offset(); \
u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
/* Keep allocation even after "ptr" loses scope. */ \
asm volatile("" :: "r"(ptr) : "memory"); \
} \
} while (0)
-/**
- * choose_random_kstack_offset - Choose the random offset for the next
- * add_random_kstack_offset()
- *
- * This should only be used during syscall exit when interrupts and
- * preempt are disabled. This position in the syscall flow is done to
- * frustrate attacks from userspace attempting to learn the next offset:
- * - Maximize the timing uncertainty visible from userspace: if the
- * offset is chosen at syscall entry, userspace has much more control
- * over the timing between choosing offsets. "How long will we be in
- * kernel mode?" tends to be more difficult to predict than "how long
- * will we be in user mode?"
- * - Reduce the lifetime of the new offset sitting in memory during
- * kernel mode execution. Exposure of "thread-local" memory content
- * (e.g. current, percpu, etc) tends to be easier than arbitrary
- * location memory exposure.
- */
-#define choose_random_kstack_offset(rand) do { \
- if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
- &randomize_kstack_offset)) { \
- u32 offset = raw_cpu_read(kstack_offset); \
- offset = ror32(offset, 5) ^ (rand); \
- raw_cpu_write(kstack_offset, offset); \
- } \
-} while (0)
#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#define add_random_kstack_offset() do { } while (0)
-#define choose_random_kstack_offset(rand) do { } while (0)
#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#endif
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 4091e978aef2..48acdc3889dd 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -35,10 +35,15 @@
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
+#define RB_EMPTY_LINKED_NODE(lnode) RB_EMPTY_NODE(&(lnode)->node)
+#define RB_CLEAR_LINKED_NODE(lnode) ({ \
+ RB_CLEAR_NODE(&(lnode)->node); \
+ (lnode)->prev = (lnode)->next = NULL; \
+})
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
-
+extern bool rb_erase_linked(struct rb_node_linked *, struct rb_root_linked *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
@@ -213,15 +218,10 @@ rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
return leftmost ? node : NULL;
}
-/**
- * rb_add() - insert @node into @tree
- * @node: node to insert
- * @tree: tree to insert @node into
- * @less: operator defining the (partial) node order
- */
static __always_inline void
-rb_add(struct rb_node *node, struct rb_root *tree,
- bool (*less)(struct rb_node *, const struct rb_node *))
+__rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *),
+ void (*linkop)(struct rb_node *, struct rb_node *, struct rb_node **))
{
struct rb_node **link = &tree->rb_node;
struct rb_node *parent = NULL;
@@ -234,10 +234,73 @@ rb_add(struct rb_node *node, struct rb_root *tree,
link = &parent->rb_right;
}
+ linkop(node, parent, link);
rb_link_node(node, parent, link);
rb_insert_color(node, tree);
}
+#define __node_2_linked_node(_n) \
+ rb_entry((_n), struct rb_node_linked, node)
+
+static inline void
+rb_link_linked_node(struct rb_node *node, struct rb_node *parent, struct rb_node **link)
+{
+ if (!parent)
+ return;
+
+ struct rb_node_linked *nnew = __node_2_linked_node(node);
+ struct rb_node_linked *npar = __node_2_linked_node(parent);
+
+ if (link == &parent->rb_left) {
+ nnew->prev = npar->prev;
+ nnew->next = npar;
+ npar->prev = nnew;
+ if (nnew->prev)
+ nnew->prev->next = nnew;
+ } else {
+ nnew->next = npar->next;
+ nnew->prev = npar;
+ npar->next = nnew;
+ if (nnew->next)
+ nnew->next->prev = nnew;
+ }
+}
+
+/**
+ * rb_add_linked() - insert @node into the leftmost linked tree @tree
+ * @node: node to insert
+ * @tree: linked tree to insert @node into
+ * @less: operator defining the (partial) node order
+ *
+ * Returns @true when @node is the new leftmost, @false otherwise.
+ */
+static __always_inline bool
+rb_add_linked(struct rb_node_linked *node, struct rb_root_linked *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ __rb_add(&node->node, &tree->rb_root, less, rb_link_linked_node);
+ if (!node->prev)
+ tree->rb_leftmost = node;
+ return !node->prev;
+}
+
+/* Empty linkop function which is optimized away by the compiler */
+static __always_inline void
+rb_link_noop(struct rb_node *n, struct rb_node *p, struct rb_node **l) { }
+
+/**
+ * rb_add() - insert @node into @tree
+ * @node: node to insert
+ * @tree: tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ __rb_add(node, tree, less, rb_link_noop);
+}
+
/**
* rb_find_add_cached() - find equivalent @node in @tree, or add @node
* @node: node to look-for / insert
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
index 45b6ecde3665..3c7ae53e8139 100644
--- a/include/linux/rbtree_types.h
+++ b/include/linux/rbtree_types.h
@@ -9,6 +9,12 @@ struct rb_node {
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
+struct rb_node_linked {
+ struct rb_node node;
+ struct rb_node_linked *prev;
+ struct rb_node_linked *next;
+};
+
struct rb_root {
struct rb_node *rb_node;
};
@@ -28,7 +34,17 @@ struct rb_root_cached {
struct rb_node *rb_leftmost;
};
+/*
+ * Leftmost tree with links. This would allow a trivial rb_rightmost update,
+ * but that has been omitted due to the lack of users.
+ */
+struct rb_root_linked {
+ struct rb_root rb_root;
+ struct rb_node_linked *rb_leftmost;
+};
+
#define RB_ROOT (struct rb_root) { NULL, }
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+#define RB_ROOT_LINKED (struct rb_root_linked) { {NULL, }, NULL }
#endif
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
index 0b952d06eb0b..36363b876e53 100644
--- a/include/linux/rculist_bl.h
+++ b/include/linux/rculist_bl.h
@@ -8,21 +8,31 @@
#include <linux/list_bl.h>
#include <linux/rcupdate.h>
+/* return the first ptr or next element in an RCU protected list */
+#define hlist_bl_first_rcu(head) \
+ (*((struct hlist_bl_node __rcu **)(&(head)->first)))
+#define hlist_bl_next_rcu(node) \
+ (*((struct hlist_bl_node __rcu **)(&(node)->next)))
+
static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
struct hlist_bl_node *n)
{
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
LIST_BL_LOCKMASK);
- rcu_assign_pointer(h->first,
+ rcu_assign_pointer(hlist_bl_first_rcu(h),
(struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
}
-static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
-{
- return (struct hlist_bl_node *)
- ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
-}
+#define hlist_bl_first_rcu_dereference(head) \
+({ \
+ struct hlist_bl_head *__head = (head); \
+ \
+ (struct hlist_bl_node *) \
+ ((unsigned long)rcu_dereference_check(hlist_bl_first_rcu(__head), \
+ hlist_bl_is_locked(__head)) & \
+ ~LIST_BL_LOCKMASK); \
+})
/**
* hlist_bl_del_rcu - deletes entry from hash list without re-initialization
@@ -73,7 +83,7 @@ static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
{
struct hlist_bl_node *first;
- /* don't need hlist_bl_first_rcu because we're under lock */
+ /* don't need hlist_bl_first_rcu* because we're under lock */
first = hlist_bl_first(h);
n->next = first;
@@ -93,9 +103,30 @@ static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
*
*/
#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = hlist_bl_first_rcu(head); \
+ for (pos = hlist_bl_first_rcu_dereference(head); \
pos && \
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(hlist_bl_next_rcu(pos)))
+
+/**
+ * hlist_bl_for_each_entry_continue_rcu - continue iteration over list of given
+ * type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_bl_node to use as a loop cursor.
+ * @member: the name of the hlist_bl_node within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ */
+#define hlist_bl_for_each_entry_continue_rcu(tpos, pos, member) \
+ for (pos = rcu_dereference_raw(hlist_bl_next_rcu(&(tpos)->member)); \
+ pos && \
+ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_raw(hlist_bl_next_rcu(pos)))
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 04f3f86a4145..bfa765132de8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -206,18 +206,6 @@ static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
- * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
- *
- * As an accident of implementation, an RCU Tasks Trace grace period also
- * acts as an RCU grace period. However, this could change at any time.
- * Code relying on this accident must call this function to verify that
- * this accident is still happening.
- *
- * You have been warned!
- */
-static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
-
-/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 3da377ffb0c2..ba7657ced281 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -170,7 +170,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-static inline __must_check __signed_wrap
+static inline __must_check
bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{
int old = refcount_read(r);
@@ -212,7 +212,7 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
return __refcount_add_not_zero(i, r, NULL);
}
-static inline __must_check __signed_wrap
+static inline __must_check
bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp,
int limit)
{
@@ -244,7 +244,7 @@ __refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit)
return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit);
}
-static inline __must_check __signed_wrap
+static inline __must_check
bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp)
{
return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX);
@@ -277,7 +277,7 @@ static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t
return __refcount_add_not_zero_acquire(i, r, NULL);
}
-static inline __signed_wrap
+static inline
void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
@@ -383,7 +383,7 @@ static inline void refcount_inc(refcount_t *r)
__refcount_inc(r, NULL);
}
-static inline __must_check __signed_wrap
+static inline __must_check
bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index caff2240bdab..df44cb30f53b 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -10,15 +10,16 @@
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*/
-#include <linux/list.h>
-#include <linux/rbtree.h>
-#include <linux/ktime.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/bug.h>
-#include <linux/lockdep.h>
-#include <linux/iopoll.h>
#include <linux/fwnode.h>
+#include <linux/iopoll.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/rbtree.h>
struct module;
struct clk;
@@ -692,6 +693,10 @@ struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_i3c(struct i3c_device *i3c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -999,6 +1004,19 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
dev, sdw, config, mbq_config)
/**
+ * regmap_init_i3c() - Initialise register map
+ *
+ * @i3c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i3c(i3c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_i3c, #config, \
+ i3c, config)
+
+/**
* regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
* to AVMM Bus Bridge
*
@@ -1460,6 +1478,8 @@ struct regmap_field *regmap_field_alloc(struct regmap *regmap,
struct reg_field reg_field);
void regmap_field_free(struct regmap_field *field);
+DEFINE_FREE(regmap_field, struct regmap_field *, if (_T) regmap_field_free(_T))
+
struct regmap_field *devm_regmap_field_alloc(struct device *dev,
struct regmap *regmap, struct reg_field reg_field);
void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
index 344ff41c22c7..4070537d6542 100644
--- a/include/linux/remoteproc/mtk_scp.h
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -58,7 +58,7 @@ int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler,
void *priv);
void scp_ipi_unregister(struct mtk_scp *scp, u32 id);
-int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+int scp_ipi_send(struct mtk_scp *scp, u32 id, const void *buf, unsigned int len,
unsigned int wait);
unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp);
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 46514cb1b9e0..52a5a4e81f18 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -3,7 +3,10 @@
#define _LINUX_RESET_CONTROLLER_H_
#include <linux/list.h>
+#include <linux/mutex.h>
+struct fwnode_handle;
+struct fwnode_reference_args;
struct reset_controller_dev;
/**
@@ -35,14 +38,16 @@ struct of_phandle_args;
* @reset_control_head: head of internal list of requested reset controls
* @dev: corresponding driver model device struct
* @of_node: corresponding device tree node as phandle target
- * @of_args: for reset-gpios controllers: corresponding phandle args with
- * of_node and GPIO number complementing of_node; either this or
- * of_node should be present
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
- * device tree to id as given to the reset control ops, defaults
- * to :c:func:`of_reset_simple_xlate`.
+ * device tree to id as given to the reset control ops
+ * @fwnode: firmware node associated with this device
+ * @fwnode_reset_n_cells: number of cells in reset line specifiers
+ * @fwnode_xlate: translation function to translate from firmware specifier to
+ * id as given to the reset control ops, defaults to
+ * :c:func:`fwnode_reset_simple_xlate`
* @nr_resets: number of reset controls in this reset controller device
+ * @lock: protects the reset control list from concurrent access
*/
struct reset_controller_dev {
const struct reset_control_ops *ops;
@@ -51,11 +56,15 @@ struct reset_controller_dev {
struct list_head reset_control_head;
struct device *dev;
struct device_node *of_node;
- const struct of_phandle_args *of_args;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec);
+ struct fwnode_handle *fwnode;
+ int fwnode_reset_n_cells;
+ int (*fwnode_xlate)(struct reset_controller_dev *rcdev,
+ const struct fwnode_reference_args *reset_spec);
unsigned int nr_resets;
+ struct mutex lock;
};
#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 44f9e3415f92..9c391cf0c822 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -5,10 +5,12 @@
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/of.h>
#include <linux/types.h>
struct device;
struct device_node;
+struct fwnode_handle;
struct reset_control;
/**
@@ -84,7 +86,7 @@ int reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *r
int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs);
void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
-struct reset_control *__of_reset_control_get(struct device_node *node,
+struct reset_control *__fwnode_reset_control_get(struct fwnode_handle *fwnode,
const char *id, int index, enum reset_control_flags flags);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, enum reset_control_flags flags);
@@ -103,7 +105,8 @@ int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control *devm_reset_control_array_get(struct device *dev,
enum reset_control_flags flags);
-struct reset_control *of_reset_control_array_get(struct device_node *np, enum reset_control_flags);
+struct reset_control *fwnode_reset_control_array_get(struct fwnode_handle *fwnode,
+ enum reset_control_flags);
int reset_control_get_count(struct device *dev);
@@ -152,8 +155,8 @@ static inline int __device_reset(struct device *dev, bool optional)
return optional ? 0 : -ENOTSUPP;
}
-static inline struct reset_control *__of_reset_control_get(
- struct device_node *node,
+static inline struct reset_control *__fwnode_reset_control_get(
+ struct fwnode_handle *fwnode,
const char *id, int index, enum reset_control_flags flags)
{
bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
@@ -242,7 +245,7 @@ devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags)
}
static inline struct reset_control *
-of_reset_control_array_get(struct device_node *np, enum reset_control_flags flags)
+fwnode_reset_control_array_get(struct fwnode_handle *fwnode, enum reset_control_flags flags)
{
bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
@@ -500,7 +503,8 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE);
+ return __fwnode_reset_control_get(of_fwnode_handle(node), id, 0,
+ RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -520,7 +524,8 @@ static inline struct reset_control *of_reset_control_get_exclusive(
static inline struct reset_control *of_reset_control_get_optional_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
+ return __fwnode_reset_control_get(of_fwnode_handle(node), id, 0,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -545,7 +550,8 @@ static inline struct reset_control *of_reset_control_get_optional_exclusive(
static inline struct reset_control *of_reset_control_get_shared(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED);
+ return __fwnode_reset_control_get(of_fwnode_handle(node), id, 0,
+ RESET_CONTROL_SHARED);
}
/**
@@ -562,7 +568,8 @@ static inline struct reset_control *of_reset_control_get_shared(
static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE);
+ return __fwnode_reset_control_get(of_fwnode_handle(node), NULL, index,
+ RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -590,7 +597,8 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
static inline struct reset_control *of_reset_control_get_shared_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED);
+ return __fwnode_reset_control_get(of_fwnode_handle(node), NULL, index,
+ RESET_CONTROL_SHARED);
}
/**
@@ -1032,30 +1040,35 @@ devm_reset_control_array_get_optional_shared(struct device *dev)
static inline struct reset_control *
of_reset_control_array_get_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE);
+ return fwnode_reset_control_array_get(of_fwnode_handle(node),
+ RESET_CONTROL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive_released(struct device_node *node)
{
- return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED);
+ return fwnode_reset_control_array_get(of_fwnode_handle(node),
+ RESET_CONTROL_EXCLUSIVE_RELEASED);
}
static inline struct reset_control *
of_reset_control_array_get_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, RESET_CONTROL_SHARED);
+ return fwnode_reset_control_array_get(of_fwnode_handle(node),
+ RESET_CONTROL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_optional_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
+ return fwnode_reset_control_array_get(of_fwnode_handle(node),
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_optional_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED);
+ return fwnode_reset_control_array_get(of_fwnode_handle(node),
+ RESET_CONTROL_OPTIONAL_SHARED);
}
#endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 133ccb39137a..0480509a6339 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -129,10 +129,10 @@ static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
unsigned int hash;
/* params must be equal to ht->p if it isn't constant. */
- if (!__builtin_constant_p(params.key_len))
+ if (!__builtin_constant_p(params.key_len)) {
hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
- else if (params.key_len) {
- unsigned int key_len = params.key_len;
+ } else {
+ unsigned int key_len = params.key_len ? : ht->p.key_len;
if (params.hashfn)
hash = params.hashfn(key, key_len, hash_rnd);
@@ -140,13 +140,6 @@ static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
hash = jhash(key, key_len, hash_rnd);
else
hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
- } else {
- unsigned int key_len = ht->p.key_len;
-
- if (params.hashfn)
- hash = params.hashfn(key, key_len, hash_rnd);
- else
- hash = jhash(key, key_len, hash_rnd);
}
return hash;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index d862fa610270..994f52b34344 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -251,4 +251,62 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
void ring_buffer_map_dup(struct trace_buffer *buffer, int cpu);
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
+
+struct ring_buffer_desc {
+ int cpu;
+ unsigned int nr_page_va; /* excludes the meta page */
+ unsigned long meta_va;
+ unsigned long page_va[] __counted_by(nr_page_va);
+};
+
+struct trace_buffer_desc {
+ int nr_cpus;
+ size_t struct_len;
+ char __data[]; /* list of ring_buffer_desc */
+};
+
+static inline struct ring_buffer_desc *__next_ring_buffer_desc(struct ring_buffer_desc *desc)
+{
+ size_t len = struct_size(desc, page_va, desc->nr_page_va);
+
+ return (struct ring_buffer_desc *)((void *)desc + len);
+}
+
+static inline struct ring_buffer_desc *__first_ring_buffer_desc(struct trace_buffer_desc *desc)
+{
+ return (struct ring_buffer_desc *)(&desc->__data[0]);
+}
+
+static inline size_t trace_buffer_desc_size(size_t buffer_size, unsigned int nr_cpus)
+{
+ unsigned int nr_pages = max(DIV_ROUND_UP(buffer_size, PAGE_SIZE), 2UL) + 1;
+ struct ring_buffer_desc *rbdesc;
+
+ return size_add(offsetof(struct trace_buffer_desc, __data),
+ size_mul(nr_cpus, struct_size(rbdesc, page_va, nr_pages)));
+}
+
+#define for_each_ring_buffer_desc(__pdesc, __cpu, __trace_pdesc) \
+ for (__pdesc = __first_ring_buffer_desc(__trace_pdesc), __cpu = 0; \
+ (__cpu) < (__trace_pdesc)->nr_cpus; \
+ (__cpu)++, __pdesc = __next_ring_buffer_desc(__pdesc))
+
+struct ring_buffer_remote {
+ struct trace_buffer_desc *desc;
+ int (*swap_reader_page)(unsigned int cpu, void *priv);
+ int (*reset)(unsigned int cpu, void *priv);
+ void *priv;
+};
+
+int ring_buffer_poll_remote(struct trace_buffer *buffer, int cpu);
+
+struct trace_buffer *
+__ring_buffer_alloc_remote(struct ring_buffer_remote *remote,
+ struct lock_class_key *key);
+
+#define ring_buffer_alloc_remote(remote) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_remote(remote, &__key); \
+})
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/ring_buffer_types.h b/include/linux/ring_buffer_types.h
new file mode 100644
index 000000000000..54577021a49d
--- /dev/null
+++ b/include/linux/ring_buffer_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RING_BUFFER_TYPES_H
+#define _LINUX_RING_BUFFER_TYPES_H
+
+#include <asm/local.h>
+
+#define TS_SHIFT 27
+#define TS_MASK ((1ULL << TS_SHIFT) - 1)
+#define TS_DELTA_TEST (~TS_MASK)
+
+/*
+ * We need to fit the time_stamp delta into 27 bits.
+ */
+static inline bool test_time_stamp(u64 delta)
+{
+ return !!(delta & TS_DELTA_TEST);
+}
+
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
+
+#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
+#define RB_ALIGNMENT 4U
+#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
+#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
+
+#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
+# define RB_FORCE_8BYTE_ALIGNMENT 0
+# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT 1
+# define RB_ARCH_ALIGNMENT 8U
+#endif
+
+#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
+
+struct buffer_data_page {
+ u64 time_stamp; /* page time stamp */
+ local_t commit; /* write committed index */
+ unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
+};
+#endif
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index fb7ab9165645..83266ce14642 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -182,11 +182,11 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
-int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
-int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_send(struct rpmsg_endpoint *ept, const void *data, int len);
+int rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data, int len, u32 dst);
-int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
-int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data, int len);
+int rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data, int len, u32 dst);
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
@@ -249,7 +249,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev
return NULL;
}
-static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+static inline int rpmsg_send(struct rpmsg_endpoint *ept, const void *data, int len)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -257,7 +257,7 @@ static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
return -ENXIO;
}
-static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
+static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data, int len,
u32 dst)
{
/* This shouldn't be possible */
@@ -267,7 +267,8 @@ static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
}
-static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data,
+ int len)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -275,7 +276,7 @@ static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return -ENXIO;
}
-static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
+static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data,
int len, u32 dst)
{
/* This shouldn't be possible */
diff --git a/include/linux/rpmsg/mtk_rpmsg.h b/include/linux/rpmsg/mtk_rpmsg.h
index 363b60178040..badcbc89917f 100644
--- a/include/linux/rpmsg/mtk_rpmsg.h
+++ b/include/linux/rpmsg/mtk_rpmsg.h
@@ -25,7 +25,7 @@ struct mtk_rpmsg_info {
ipi_handler_t handler, void *priv);
void (*unregister_ipi)(struct platform_device *pdev, u32 id);
int (*send_ipi)(struct platform_device *pdev, u32 id,
- void *buf, unsigned int len, unsigned int wait);
+ const void *buf, unsigned int len, unsigned int wait);
int ns_ipi_id;
};
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index c6831c93cd6e..f11ebd34f8b9 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -40,6 +40,7 @@ DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
#endif /* !CONFIG_RSEQ_STATS */
#ifdef CONFIG_RSEQ
+#include <linux/hrtimer_rearm.h>
#include <linux/jump_label.h>
#include <linux/rseq.h>
#include <linux/sched/signal.h>
@@ -110,7 +111,7 @@ static __always_inline void rseq_slice_clear_grant(struct task_struct *t)
t->rseq.slice.state.granted = false;
}
-static __always_inline bool rseq_grant_slice_extension(bool work_pending)
+static __always_inline bool __rseq_grant_slice_extension(bool work_pending)
{
struct task_struct *curr = current;
struct rseq_slice_ctrl usr_ctrl;
@@ -215,11 +216,20 @@ efault:
return false;
}
+static __always_inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask)
+{
+ if (unlikely(__rseq_grant_slice_extension(ti_work & mask))) {
+ hrtimer_rearm_deferred_tif(ti_work);
+ return true;
+ }
+ return false;
+}
+
#else /* CONFIG_RSEQ_SLICE_EXTENSION */
static __always_inline bool rseq_slice_extension_enabled(void) { return false; }
static __always_inline bool rseq_arm_slice_extension_timer(void) { return false; }
static __always_inline void rseq_slice_clear_grant(struct task_struct *t) { }
-static __always_inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+static __always_inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask) { return false; }
#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
@@ -778,7 +788,7 @@ static inline void rseq_syscall_exit_to_user_mode(void) { }
static inline void rseq_irqentry_exit_to_user_mode(void) { }
static inline void rseq_exit_to_user_mode_legacy(void) { }
static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
-static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+static inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask) { return false; }
#endif /* !CONFIG_RSEQ */
#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index ede4c6bf6f22..78e7e588817c 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -22,8 +22,8 @@ extern int max_lock_depth;
struct rt_mutex_base {
raw_spinlock_t wait_lock;
- struct rb_root_cached waiters;
- struct task_struct *owner;
+ struct rb_root_cached waiters __guarded_by(&wait_lock);
+ struct task_struct *owner __guarded_by(&wait_lock);
};
#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
@@ -41,7 +41,7 @@ struct rt_mutex_base {
*/
static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
{
- return READ_ONCE(lock->owner) != NULL;
+ return data_race(READ_ONCE(lock->owner) != NULL);
}
#ifdef CONFIG_RT_MUTEXES
@@ -49,7 +49,7 @@ static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
{
- unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+ unsigned long owner = (unsigned long) data_race(READ_ONCE(lock->owner));
return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
}
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 58774eb3aecf..541ba404926a 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -13,6 +13,7 @@
#define RV_MON_GLOBAL 0
#define RV_MON_PER_CPU 1
#define RV_MON_PER_TASK 2
+#define RV_MON_PER_OBJ 3
#ifdef CONFIG_RV
#include <linux/array_size.h>
@@ -81,11 +82,49 @@ struct ltl_monitor {};
#endif /* CONFIG_RV_LTL_MONITOR */
+#ifdef CONFIG_RV_HA_MONITOR
+/*
+ * In the future, hybrid automata may rely on multiple
+ * environment variables, e.g. different clocks started at
+ * different times or running at different speed.
+ * For now we support only 1 variable.
+ */
+#define MAX_HA_ENV_LEN 1
+
+/*
+ * Monitors can pick the preferred timer implementation:
+ * No timer: if monitors don't have state invariants.
+ * Timer wheel: lightweight invariants check but far less precise.
+ * Hrtimer: accurate invariants check with higher overhead.
+ */
+#define HA_TIMER_NONE 0
+#define HA_TIMER_WHEEL 1
+#define HA_TIMER_HRTIMER 2
+
+/*
+ * Hybrid automaton per-object variables.
+ */
+struct ha_monitor {
+ struct da_monitor da_mon;
+ u64 env_store[MAX_HA_ENV_LEN];
+ union {
+ struct hrtimer hrtimer;
+ struct timer_list timer;
+ };
+};
+
+#else
+
+struct ha_monitor { };
+
+#endif /* CONFIG_RV_HA_MONITOR */
+
#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS)
union rv_task_monitor {
struct da_monitor da_mon;
struct ltl_monitor ltl_mon;
+ struct ha_monitor ha_mon;
};
#ifdef CONFIG_RV_REACTORS
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 3390d21c95dd..4e67cd934d8f 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -30,17 +30,27 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
- extern int do_raw_read_trylock(rwlock_t *lock);
+ extern int do_raw_read_trylock(rwlock_t *lock) __cond_acquires_shared(true, lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
- extern int do_raw_write_trylock(rwlock_t *lock);
+extern int do_raw_write_trylock(rwlock_t *lock) __cond_acquires(true, lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
# define do_raw_read_lock(rwlock) do {__acquire_shared(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+static inline int do_raw_read_trylock(rwlock_t *rwlock)
+ __cond_acquires_shared(true, rwlock)
+ __no_context_analysis
+{
+ return arch_read_trylock(&(rwlock)->raw_lock);
+}
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release_shared(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+static inline int do_raw_write_trylock(rwlock_t *rwlock)
+ __cond_acquires(true, rwlock)
+ __no_context_analysis
+{
+ return arch_write_trylock(&(rwlock)->raw_lock);
+}
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 61a852609eab..9e02a5f28cd1 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -23,7 +23,7 @@ void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
+ __acquires_shared(lock);
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
int __lockfunc _raw_read_trylock(rwlock_t *lock) __cond_acquires_shared(true, lock);
@@ -36,7 +36,7 @@ void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
+ __releases_shared(lock);
void __lockfunc
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
@@ -116,6 +116,7 @@ _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
#endif
static inline int __raw_read_trylock(rwlock_t *lock)
+ __cond_acquires_shared(true, lock)
{
preempt_disable();
if (do_raw_read_trylock(lock)) {
@@ -127,6 +128,7 @@ static inline int __raw_read_trylock(rwlock_t *lock)
}
static inline int __raw_write_trylock(rwlock_t *lock)
+ __cond_acquires(true, lock)
{
preempt_disable();
if (do_raw_write_trylock(lock)) {
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 9bf1d93d3d7b..6a1a7bae5f81 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -57,7 +57,7 @@ context_lock_struct(rw_semaphore) {
struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif
raw_spinlock_t wait_lock;
- struct list_head wait_list;
+ struct rwsem_waiter *first_waiter __guarded_by(&wait_lock);
#ifdef CONFIG_DEBUG_RWSEMS
void *magic;
#endif
@@ -106,7 +106,7 @@ static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *
.owner = ATOMIC_LONG_INIT(0), \
__RWSEM_OPT_INIT(name) \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .first_waiter = NULL, \
__RWSEM_DEBUG_INIT(name) \
__RWSEM_DEP_MAP_INIT(name) }
@@ -129,9 +129,9 @@ do { \
* rwsem to see if somebody from an incompatible type is wanting access to the
* lock.
*/
-static inline int rwsem_is_contended(struct rw_semaphore *sem)
+static inline bool rwsem_is_contended(struct rw_semaphore *sem)
{
- return !list_empty(&sem->wait_list);
+ return data_race(sem->first_waiter != NULL);
}
#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a5d3dbc9cdf..004e6d56a499 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -949,6 +949,10 @@ struct task_struct {
struct srcu_ctr __percpu *trc_reader_scp;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+#ifdef CONFIG_TRIVIAL_PREEMPT_RCU
+ int rcu_trivial_preempt_nesting;
+#endif /* #ifdef CONFIG_TRIVIAL_PREEMPT_RCU */
+
struct sched_info sched_info;
struct list_head tasks;
@@ -1159,12 +1163,9 @@ struct task_struct {
/*
* executable name, excluding path.
*
- * - normally initialized begin_new_exec()
- * - set it with set_task_comm()
- * - strscpy_pad() to ensure it is always NUL-terminated and
- * zero-padded
- * - task_lock() to ensure the operation is atomic and the name is
- * fully updated.
+ * - normally initialized by begin_new_exec()
+ * - set it with set_task_comm() to ensure it is always
+ * NUL-terminated and zero-padded
*/
char comm[TASK_COMM_LEN];
@@ -1238,6 +1239,7 @@ struct task_struct {
#endif
struct mutex *blocked_on; /* lock we're blocked on */
+ raw_spinlock_t blocked_lock;
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
/*
@@ -2179,61 +2181,85 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
})
#ifndef CONFIG_PREEMPT_RT
+
+/*
+ * With proxy exec, if a task has been proxy-migrated, it may be a donor
+ * on a cpu that it can't actually run on. Thus we need a special state
+ * to denote that the task is being woken, but that it needs to be
+ * evaluated for return-migration before it is run. So if the task is
+ * blocked_on PROXY_WAKING, return migrate it before running it.
+ */
+#define PROXY_WAKING ((struct mutex *)(-1L))
+
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
- struct mutex *m = p->blocked_on;
-
- if (m)
- lockdep_assert_held_once(&m->wait_lock);
- return m;
+ lockdep_assert_held_once(&p->blocked_lock);
+ return p->blocked_on == PROXY_WAKING ? NULL : p->blocked_on;
}
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- struct mutex *blocked_on = READ_ONCE(p->blocked_on);
-
WARN_ON_ONCE(!m);
/* The task should only be setting itself as blocked */
WARN_ON_ONCE(p != current);
- /* Currently we serialize blocked_on under the mutex::wait_lock */
- lockdep_assert_held_once(&m->wait_lock);
+ /* Currently we serialize blocked_on under the task::blocked_lock */
+ lockdep_assert_held_once(&p->blocked_lock);
/*
* Check ensure we don't overwrite existing mutex value
* with a different mutex. Note, setting it to the same
* lock repeatedly is ok.
*/
- WARN_ON_ONCE(blocked_on && blocked_on != m);
- WRITE_ONCE(p->blocked_on, m);
+ WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);
+ p->blocked_on = m;
}
-static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
+static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- guard(raw_spinlock_irqsave)(&m->wait_lock);
- __set_task_blocked_on(p, m);
+ /* Currently we serialize blocked_on under the task::blocked_lock */
+ lockdep_assert_held_once(&p->blocked_lock);
+ /*
+ * There may be cases where we re-clear already cleared
+ * blocked_on relationships, but make sure we are not
+ * clearing the relationship with a different lock.
+ */
+ WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m && p->blocked_on != PROXY_WAKING);
+ p->blocked_on = NULL;
}
-static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- if (m) {
- struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+ guard(raw_spinlock_irqsave)(&p->blocked_lock);
+ __clear_task_blocked_on(p, m);
+}
- /* Currently we serialize blocked_on under the mutex::wait_lock */
- lockdep_assert_held_once(&m->wait_lock);
- /*
- * There may be cases where we re-clear already cleared
- * blocked_on relationships, but make sure we are not
- * clearing the relationship with a different lock.
- */
- WARN_ON_ONCE(blocked_on && blocked_on != m);
+static inline void __set_task_blocked_on_waking(struct task_struct *p, struct mutex *m)
+{
+ /* Currently we serialize blocked_on under the task::blocked_lock */
+ lockdep_assert_held_once(&p->blocked_lock);
+
+ if (!sched_proxy_exec()) {
+ __clear_task_blocked_on(p, m);
+ return;
}
- WRITE_ONCE(p->blocked_on, NULL);
+
+ /* Don't set PROXY_WAKING if blocked_on was already cleared */
+ if (!p->blocked_on)
+ return;
+ /*
+ * There may be cases where we set PROXY_WAKING on tasks that were
+ * already set to waking, but make sure we are not changing
+ * the relationship with a different lock.
+ */
+ WARN_ON_ONCE(m && p->blocked_on != m && p->blocked_on != PROXY_WAKING);
+ p->blocked_on = PROXY_WAKING;
}
-static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+static inline void set_task_blocked_on_waking(struct task_struct *p, struct mutex *m)
{
- guard(raw_spinlock_irqsave)(&m->wait_lock);
- __clear_task_blocked_on(p, m);
+ guard(raw_spinlock_irqsave)(&p->blocked_lock);
+ __set_task_blocked_on_waking(p, m);
}
+
#else
static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
@@ -2242,6 +2268,14 @@ static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mute
static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
}
+
+static inline void __set_task_blocked_on_waking(struct task_struct *p, struct rt_mutex *m)
+{
+}
+
+static inline void set_task_blocked_on_waking(struct task_struct *p, struct rt_mutex *m)
+{
+}
#endif /* !CONFIG_PREEMPT_RT */
static __always_inline bool need_resched(void)
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index c40115d4e34d..1198138cb839 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -37,4 +37,31 @@ extern void dl_clear_root_domain_cpu(int cpu);
extern u64 dl_cookie;
extern bool dl_bw_visited(int cpu, u64 cookie);
+static inline bool dl_server(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_server;
+}
+
+static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
+{
+ BUG_ON(dl_server(dl_se));
+ return container_of(dl_se, struct task_struct, dl);
+}
+
+/*
+ * Regarding the deadline, a task with implicit deadline has a relative
+ * deadline == relative period. A task with constrained deadline has a
+ * relative deadline <= relative period.
+ *
+ * We support constrained deadline tasks. However, there are some restrictions
+ * applied only for tasks which do not have an implicit deadline. See
+ * update_dl_entity() to know more about such restrictions.
+ *
+ * The dl_is_implicit() returns true if the task has an implicit deadline.
+ */
+static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_deadline == dl_se->dl_period;
+}
+
#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index bcb962d5ee7d..1a3af2ea2a79 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -62,6 +62,16 @@ enum scx_dsq_id_flags {
SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
};
+struct scx_deferred_reenq_user {
+ struct list_head node;
+ u64 flags;
+};
+
+struct scx_dsq_pcpu {
+ struct scx_dispatch_q *dsq;
+ struct scx_deferred_reenq_user deferred_reenq_user;
+};
+
/*
* A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
* queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
@@ -78,30 +88,58 @@ struct scx_dispatch_q {
u64 id;
struct rhash_head hash_node;
struct llist_node free_node;
+ struct scx_sched *sched;
+ struct scx_dsq_pcpu __percpu *pcpu;
struct rcu_head rcu;
};
-/* scx_entity.flags */
+/* sched_ext_entity.flags */
enum scx_ent_flags {
SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_IN_CUSTODY = 1 << 1, /* in custody, needs ops.dequeue() when leaving */
SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+ SCX_TASK_SUB_INIT = 1 << 4, /* task being initialized for a sub sched */
+ SCX_TASK_IMMED = 1 << 5, /* task is on local DSQ with %SCX_ENQ_IMMED */
- SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ /*
+ * Bits 8 and 9 are used to carry task state:
+ *
+ * NONE ops.init_task() not called yet
+ * INIT ops.init_task() succeeded, but task can be cancelled
+ * READY fully initialized, but not in sched_ext
+ * ENABLED fully initialized and in sched_ext
+ */
+ SCX_TASK_STATE_SHIFT = 8, /* bits 8 and 9 are used to carry task state */
SCX_TASK_STATE_BITS = 2,
SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
- SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
-};
+ SCX_TASK_NONE = 0 << SCX_TASK_STATE_SHIFT,
+ SCX_TASK_INIT = 1 << SCX_TASK_STATE_SHIFT,
+ SCX_TASK_READY = 2 << SCX_TASK_STATE_SHIFT,
+ SCX_TASK_ENABLED = 3 << SCX_TASK_STATE_SHIFT,
-/* scx_entity.flags & SCX_TASK_STATE_MASK */
-enum scx_task_state {
- SCX_TASK_NONE, /* ops.init_task() not called yet */
- SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
- SCX_TASK_READY, /* fully initialized, but not in sched_ext */
- SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+ /*
+ * Bits 12 and 13 are used to carry reenqueue reason. In addition to
+ * %SCX_ENQ_REENQ flag, ops.enqueue() can also test for
+ * %SCX_TASK_REENQ_REASON_NONE to distinguish reenqueues.
+ *
+ * NONE not being reenqueued
+ * KFUNC reenqueued by scx_bpf_dsq_reenq() and friends
+ * IMMED reenqueued due to failed ENQ_IMMED
+ * PREEMPTED preempted while running
+ */
+ SCX_TASK_REENQ_REASON_SHIFT = 12,
+ SCX_TASK_REENQ_REASON_BITS = 2,
+ SCX_TASK_REENQ_REASON_MASK = ((1 << SCX_TASK_REENQ_REASON_BITS) - 1) << SCX_TASK_REENQ_REASON_SHIFT,
+
+ SCX_TASK_REENQ_NONE = 0 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_KFUNC = 1 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_IMMED = 2 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_PREEMPTED = 3 << SCX_TASK_REENQ_REASON_SHIFT,
- SCX_TASK_NR_STATES,
+ /* iteration cursor, not a task */
+ SCX_TASK_CURSOR = 1 << 31,
};
/* scx_entity.dsq_flags */
@@ -109,33 +147,6 @@ enum scx_ent_dsq_flags {
SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
};
-/*
- * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
- * everywhere and the following bits track which kfunc sets are currently
- * allowed for %current. This simple per-task tracking works because SCX ops
- * nest in a limited way. BPF will likely implement a way to allow and disallow
- * kfuncs depending on the calling context which will replace this manual
- * mechanism. See scx_kf_allow().
- */
-enum scx_kf_mask {
- SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
- /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
- SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
- /*
- * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
- * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
- * nested inside DISPATCH.
- */
- SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
- SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
- SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
- SCX_KF_REST = 1 << 4, /* other rq-locked operations */
-
- __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
- SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
- __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
-};
-
enum scx_dsq_lnode_flags {
SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
@@ -149,19 +160,31 @@ struct scx_dsq_list_node {
u32 priv; /* can be used by iter cursor */
};
-#define INIT_DSQ_LIST_CURSOR(__node, __flags, __priv) \
+#define INIT_DSQ_LIST_CURSOR(__cursor, __dsq, __flags) \
(struct scx_dsq_list_node) { \
- .node = LIST_HEAD_INIT((__node).node), \
+ .node = LIST_HEAD_INIT((__cursor).node), \
.flags = SCX_DSQ_LNODE_ITER_CURSOR | (__flags), \
- .priv = (__priv), \
+ .priv = READ_ONCE((__dsq)->seq), \
}
+struct scx_sched;
+
/*
* The following is embedded in task_struct and contains all fields necessary
* for a task to be scheduled by SCX.
*/
struct sched_ext_entity {
+#ifdef CONFIG_CGROUPS
+ /*
+ * Associated scx_sched. Updated either during fork or while holding
+ * both p->pi_lock and rq lock.
+ */
+ struct scx_sched __rcu *sched;
+#endif
struct scx_dispatch_q *dsq;
+ atomic_long_t ops_state;
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
struct scx_dsq_list_node dsq_list; /* dispatch order */
struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
u32 dsq_seq;
@@ -171,9 +194,7 @@ struct sched_ext_entity {
s32 sticky_cpu;
s32 holding_cpu;
s32 selected_cpu;
- u32 kf_mask; /* see scx_kf_mask above */
struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
- atomic_long_t ops_state;
struct list_head runnable_node; /* rq->scx.runnable_list */
unsigned long runnable_at;
@@ -181,8 +202,6 @@ struct sched_ext_entity {
#ifdef CONFIG_SCHED_CORE
u64 core_sched_at; /* see scx_prio_less() */
#endif
- u64 ddsp_dsq_id;
- u64 ddsp_enq_flags;
/* BPF scheduler modifiable fields */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index a22248aebcf9..584ae88b435e 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -132,6 +132,7 @@ struct signal_struct {
*/
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
+ unsigned int autoreap:1;
#ifdef CONFIG_POSIX_TIMERS
@@ -739,7 +740,7 @@ static inline int thread_group_empty(struct task_struct *p)
extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
unsigned long *flags)
- __acquires(&task->sighand->siglock);
+ __cond_acquires(nonnull, &task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 45c0022b91ce..36553e14866d 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -95,6 +95,7 @@ struct sched_domain {
unsigned int newidle_call;
unsigned int newidle_success;
unsigned int newidle_ratio;
+ u64 newidle_stamp;
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
@@ -141,18 +142,30 @@ struct sched_domain {
unsigned int span_weight;
/*
- * Span of all CPUs in this domain.
+ * See sched_domain_span(), on why flex arrays are broken.
*
- * NOTE: this field is variable length. (Allocated dynamically
- * by attaching extra space to the end of the structure,
- * depending on how many CPUs the kernel has booted up with)
- */
unsigned long span[];
+ */
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
- return to_cpumask(sd->span);
+ /*
+ * Turns out that C flexible arrays are fundamentally broken since it
+ * is allowed for offsetof(*sd, span) < sizeof(*sd), this means that
+ * structure initialzation *sd = { ... }; which writes every byte
+ * inside sizeof(*type), will over-write the start of the flexible
+ * array.
+ *
+ * Luckily, the way we allocate sched_domain is by:
+ *
+ * sizeof(*sd) + cpumask_size()
+ *
+ * this means that we have sufficient space for the whole flex array
+ * *outside* of sizeof(*sd). So use that, and avoid using sd->span.
+ */
+ unsigned long *bitmap = (void *)sd + sizeof(*sd);
+ return to_cpumask(bitmap);
}
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
@@ -171,7 +184,6 @@ typedef int (*sched_domain_flags_f)(void);
struct sd_data {
struct sched_domain *__percpu *sd;
- struct sched_domain_shared *__percpu *sds;
struct sched_group *__percpu *sg;
struct sched_group_capacity *__percpu *sgc;
};
diff --git a/include/linux/secure_boot.h b/include/linux/secure_boot.h
new file mode 100644
index 000000000000..d17e92351567
--- /dev/null
+++ b/include/linux/secure_boot.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2026 Red Hat, Inc. All Rights Reserved.
+ *
+ * Author: Coiby Xu <coxu@redhat.com>
+ */
+
+#ifndef _LINUX_SECURE_BOOT_H
+#define _LINUX_SECURE_BOOT_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_HAVE_ARCH_GET_SECUREBOOT
+/*
+ * Returns true if the platform secure boot is enabled.
+ * Returns false if disabled or not supported.
+ */
+bool arch_get_secureboot(void);
+#else
+static inline bool arch_get_secureboot(void) { return false; }
+#endif
+
+#endif /* _LINUX_SECURE_BOOT_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index ee88dd2d2d1f..41d7367cf403 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -472,11 +472,17 @@ int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_release(struct file *file);
void security_file_free(struct file *file);
+int security_backing_file_alloc(struct file *backing_file,
+ const struct file *user_file);
+void security_backing_file_free(struct file *backing_file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int security_file_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
+int security_mmap_backing_file(struct vm_area_struct *vma,
+ struct file *backing_file,
+ struct file *user_file);
int security_mmap_addr(unsigned long addr);
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot);
@@ -1141,6 +1147,15 @@ static inline void security_file_release(struct file *file)
static inline void security_file_free(struct file *file)
{ }
+static inline int security_backing_file_alloc(struct file *backing_file,
+ const struct file *user_file)
+{
+ return 0;
+}
+
+static inline void security_backing_file_free(struct file *backing_file)
+{ }
+
static inline int security_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1160,6 +1175,13 @@ static inline int security_mmap_file(struct file *file, unsigned long prot,
return 0;
}
+static inline int security_mmap_backing_file(struct vm_area_struct *vma,
+ struct file *backing_file,
+ struct file *user_file)
+{
+ return 0;
+}
+
static inline int security_mmap_addr(unsigned long addr)
{
return cap_mmap_addr(addr);
@@ -1932,6 +1954,17 @@ static inline int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
}
#endif /* CONFIG_SECURITY_NETWORK */
+#if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_SECURITY_PATH)
+
+int security_unix_find(const struct path *path, struct sock *other, int flags);
+
+#else /* CONFIG_SECURITY_NETWORK && CONFIG_SECURITY_PATH */
+static inline int security_unix_find(const struct path *path, struct sock *other, int flags)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY_NETWORK && CONFIG_SECURITY_PATH */
+
#ifdef CONFIG_SECURITY_INFINIBAND
int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey);
int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num);
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 80f33a93f944..0630430cc01a 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -53,6 +53,11 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_DISCOVERY:
case IOC_OPAL_REVERT_LSP:
case IOC_OPAL_SET_SID_PW:
+ case IOC_OPAL_REACTIVATE_LSP:
+ case IOC_OPAL_LR_SET_START_LEN:
+ case IOC_OPAL_ENABLE_DISABLE_LR:
+ case IOC_OPAL_GET_SUM_STATUS:
+ case IOC_OPAL_STACK_RESET:
return true;
}
return false;
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 89706157e622..a4c8651ef021 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -15,7 +15,7 @@
struct semaphore {
raw_spinlock_t lock;
unsigned int count;
- struct list_head wait_list;
+ struct semaphore_waiter *first_waiter;
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
unsigned long last_holder;
@@ -33,7 +33,7 @@ struct semaphore {
{ \
.lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
- .wait_list = LIST_HEAD_INIT((name).wait_list) \
+ .first_waiter = NULL \
__LAST_HOLDER_SEMAPHORE_INITIALIZER \
}
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 5654c58eb73c..188c0ba62d50 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -49,10 +49,7 @@ struct serdev_device {
struct mutex write_lock;
};
-static inline struct serdev_device *to_serdev_device(struct device *d)
-{
- return container_of(d, struct serdev_device, dev);
-}
+#define to_serdev_device(d) container_of_const(d, struct serdev_device, dev)
/**
* struct serdev_device_driver - serdev slave device driver
@@ -68,10 +65,7 @@ struct serdev_device_driver {
void (*shutdown)(struct serdev_device *);
};
-static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d)
-{
- return container_of(d, struct serdev_device_driver, driver);
-}
+#define to_serdev_device_driver(d) container_of_const(d, struct serdev_device_driver, driver)
enum serdev_parity {
SERDEV_PARITY_NONE,
@@ -112,10 +106,7 @@ struct serdev_controller {
const struct serdev_controller_ops *ops;
};
-static inline struct serdev_controller *to_serdev_controller(struct device *d)
-{
- return container_of(d, struct serdev_controller, dev);
-}
+#define to_serdev_controller(d) container_of_const(d, struct serdev_controller, dev)
static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev)
{
@@ -343,4 +334,13 @@ static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
}
#endif /* CONFIG_ACPI */
+#ifdef CONFIG_OF
+struct serdev_controller *of_find_serdev_controller_by_node(struct device_node *node);
+#else
+static inline struct serdev_controller *of_find_serdev_controller_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a8273b32e041..f6a2d3402d76 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -48,7 +48,7 @@ struct shmem_inode_info {
};
struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
- struct simple_xattrs xattrs; /* list of xattrs */
+ struct simple_xattrs *xattrs; /* list of xattrs */
pgoff_t fallocend; /* highest fallocate endindex */
unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
diff --git a/include/linux/simple_ring_buffer.h b/include/linux/simple_ring_buffer.h
new file mode 100644
index 000000000000..21aec556293e
--- /dev/null
+++ b/include/linux/simple_ring_buffer.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SIMPLE_RING_BUFFER_H
+#define _LINUX_SIMPLE_RING_BUFFER_H
+
+#include <linux/list.h>
+#include <linux/ring_buffer.h>
+#include <linux/ring_buffer_types.h>
+#include <linux/types.h>
+
+/*
+ * Ideally those struct would stay private but the caller needs to know
+ * the allocation size for simple_ring_buffer_init().
+ */
+struct simple_buffer_page {
+ struct list_head link;
+ struct buffer_data_page *page;
+ u64 entries;
+ u32 write;
+ u32 id;
+};
+
+struct simple_rb_per_cpu {
+ struct simple_buffer_page *tail_page;
+ struct simple_buffer_page *reader_page;
+ struct simple_buffer_page *head_page;
+ struct simple_buffer_page *bpages;
+ struct trace_buffer_meta *meta;
+ u32 nr_pages;
+
+#define SIMPLE_RB_UNAVAILABLE 0
+#define SIMPLE_RB_READY 1
+#define SIMPLE_RB_WRITING 2
+ u32 status;
+
+ u64 last_overrun;
+ u64 write_stamp;
+
+ struct simple_rb_cbs *cbs;
+};
+
+int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc);
+
+void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer);
+
+void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer, unsigned long length,
+ u64 timestamp);
+
+void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer);
+
+int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable);
+
+int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer);
+
+int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer);
+
+int simple_ring_buffer_init_mm(struct simple_rb_per_cpu *cpu_buffer,
+ struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc,
+ void *(*load_page)(unsigned long va),
+ void (*unload_page)(void *va));
+
+void simple_ring_buffer_unload_mm(struct simple_rb_per_cpu *cpu_buffer,
+ void (*unload_page)(void *));
+#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index daa4e4944ce3..2bcf78a4de7b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2605,8 +2605,9 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
*
* Does not take any additional reference on the fragment.
*/
-static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
- netmem_ref netmem, int off, int size)
+static __always_inline void
+__skb_fill_netmem_desc(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size)
{
struct page *page;
@@ -2628,14 +2629,16 @@ static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
skb->pfmemalloc = true;
}
-static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+static __always_inline void
+__skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
{
__skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
-static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
- netmem_ref netmem, int off, int size)
+static __always_inline void
+skb_fill_netmem_desc(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size)
{
__skb_fill_netmem_desc(skb, i, netmem, off, size);
skb_shinfo(skb)->nr_frags = i + 1;
@@ -2655,8 +2658,9 @@ static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
*
* Does not take any additional reference on the fragment.
*/
-static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+static __always_inline void
+skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
{
skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
@@ -2682,8 +2686,17 @@ static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
shinfo->nr_frags = i + 1;
}
-void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
- int off, int size, unsigned int truesize);
+static inline void skb_add_rx_frag_netmem(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off,
+ int size, unsigned int truesize)
+{
+ DEBUG_NET_WARN_ON_ONCE(size > truesize);
+
+ skb_fill_netmem_desc(skb, i, netmem, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+}
static inline void skb_add_rx_frag(struct sk_buff *skb, int i,
struct page *page, int off, int size,
@@ -2819,7 +2832,7 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
}
void *skb_pull(struct sk_buff *skb, unsigned int len);
-static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
+static __always_inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
@@ -2844,7 +2857,7 @@ void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline enum skb_drop_reason
+static __always_inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
@@ -2862,12 +2875,13 @@ pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
return SKB_NOT_DROPPED_YET;
}
-static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static __always_inline bool
+pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
-static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
+static __always_inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (!pskb_may_pull(skb, len))
return NULL;
@@ -3328,7 +3342,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
return 0;
}
-static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+static __always_inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
skb_might_realloc(skb);
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
@@ -3371,7 +3385,7 @@ static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
* destructor function and make the @skb unowned. The buffer continues
* to exist but is no longer charged to its former owner.
*/
-static inline void skb_orphan(struct sk_buff *skb)
+static __always_inline void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor) {
skb->destructor(skb);
@@ -3750,6 +3764,17 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
}
/**
+ * skb_frag_phys - gets the physical address of the data in a paged fragment
+ * @frag: the paged fragment buffer
+ *
+ * Returns: the physical address of the data within @frag.
+ */
+static inline phys_addr_t skb_frag_phys(const skb_frag_t *frag)
+{
+ return page_to_phys(skb_frag_page(frag)) + skb_frag_off(frag);
+}
+
+/**
* skb_frag_page_copy() - sets the page in a fragment from another fragment
* @fragto: skb fragment where page is set
* @fragfrom: skb fragment page is copied from
@@ -4035,8 +4060,8 @@ __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
-static inline void skb_postpull_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
+static __always_inline void
+skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = wsum_negate(csum_partial(start, len,
@@ -4295,7 +4320,7 @@ __skb_header_pointer(const struct sk_buff *skb, int offset, int len,
return buffer;
}
-static inline void * __must_check
+static __always_inline void * __must_check
skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
@@ -4467,7 +4492,7 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
/* It is used in the ingress path to clear the delivery_time.
* If needed, set the skb->tstamp to the (rcv) timestamp.
*/
-static inline void skb_clear_delivery_time(struct sk_buff *skb)
+static __always_inline void skb_clear_delivery_time(struct sk_buff *skb)
{
if (skb->tstamp_type) {
skb->tstamp_type = SKB_CLOCK_REALTIME;
@@ -4494,7 +4519,8 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb)
return skb->tstamp;
}
-static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
+static __always_inline ktime_t
+skb_tstamp_cond(const struct sk_buff *skb, bool cond)
{
if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)
return skb->tstamp;
@@ -5097,6 +5123,7 @@ static inline bool skb_has_extensions(struct sk_buff *skb)
return unlikely(skb->active_extensions);
}
#else
+static inline void __skb_ext_put(struct skb_ext *ext) {}
static inline void skb_ext_put(struct sk_buff *skb) {}
static inline void skb_ext_reset(struct sk_buff *skb) {}
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
@@ -5283,7 +5310,7 @@ static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
-static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+static __always_inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 1ebd88026119..6925d15ccaa7 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -73,7 +73,7 @@ static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
}
/**
- * on_each_cpu_mask(): Run a function on processors specified by
+ * on_each_cpu_mask() - Run a function on processors specified by
* cpumask, which may include the local processor.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
@@ -239,13 +239,30 @@ static inline int get_boot_cpu_id(void)
#endif /* !SMP */
-/**
+/*
* raw_smp_processor_id() - get the current (unstable) CPU id
*
- * For then you know what you are doing and need an unstable
+ * raw_smp_processor_id() is arch-specific/arch-defined and
+ * may be a macro or a static inline function.
+ *
+ * For when you know what you are doing and need an unstable
* CPU id.
*/
+/*
+ * Allow the architecture to differentiate between a stable and unstable read.
+ * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
+ * regular asm read for the stable.
+ */
+#ifndef __smp_processor_id
+#define __smp_processor_id() raw_smp_processor_id()
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+ extern unsigned int debug_smp_processor_id(void);
+# define smp_processor_id() debug_smp_processor_id()
+
+#else
/**
* smp_processor_id() - get the current (stable) CPU id
*
@@ -258,23 +275,10 @@ static inline int get_boot_cpu_id(void)
* - preemption is disabled;
* - the task is CPU affine.
*
- * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
+ * When CONFIG_DEBUG_PREEMPT=y, we verify these assumptions and WARN
* when smp_processor_id() is used when the CPU id is not stable.
*/
-/*
- * Allow the architecture to differentiate between a stable and unstable read.
- * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
- * regular asm read for the stable.
- */
-#ifndef __smp_processor_id
-#define __smp_processor_id() raw_smp_processor_id()
-#endif
-
-#ifdef CONFIG_DEBUG_PREEMPT
- extern unsigned int debug_smp_processor_id(void);
-# define smp_processor_id() debug_smp_processor_id()
-#else
# define smp_processor_id() __smp_processor_id()
#endif
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 6e1b1202e818..58fa1df96347 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -191,7 +191,7 @@ int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
gpr_port_t *gpr_alloc_port(gpr_device_t *gdev, struct device *dev,
gpr_port_cb cb, void *priv);
void gpr_free_port(gpr_port_t *port);
-int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt);
-int gpr_send_pkt(gpr_device_t *gdev, struct gpr_pkt *pkt);
+int gpr_send_port_pkt(gpr_port_t *port, const struct gpr_pkt *pkt);
+int gpr_send_pkt(gpr_device_t *gdev, const struct gpr_pkt *pkt);
#endif /* __QCOM_APR_H_ */
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 8243ab3a12a8..227125d84318 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -91,10 +91,12 @@
* struct llcc_slice_desc - Cache slice descriptor
* @slice_id: llcc slice id
* @slice_size: Size allocated for the llcc slice
+ * @refcount: Atomic counter to track activate/deactivate calls
*/
struct llcc_slice_desc {
u32 slice_id;
size_t slice_size;
+ refcount_t refcount;
};
/**
@@ -152,11 +154,10 @@ struct llcc_edac_reg_offset {
* @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
- * @max_slices: max slices as read from device tree
* @num_banks: Number of llcc banks
- * @bitmap: Bit map to track the active slice ids
* @ecc_irq: interrupt for llcc cache error detection and reporting
* @ecc_irq_configured: 'True' if firmware has already configured the irq propagation
+ * @desc: Array pointer of pre-allocated LLCC slice descriptors
* @version: Indicates the LLCC version
*/
struct llcc_drv_data {
@@ -167,12 +168,11 @@ struct llcc_drv_data {
const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
u32 cfg_size;
- u32 max_slices;
u32 num_banks;
- unsigned long *bitmap;
int ecc_irq;
bool ecc_irq_configured;
u32 version;
+ struct llcc_slice_desc *desc;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)
diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h
index 83a8ea612e69..2b7691e47c2a 100644
--- a/include/linux/soc/qcom/pdr.h
+++ b/include/linux/soc/qcom/pdr.h
@@ -5,6 +5,7 @@
#include <linux/soc/qcom/qmi.h>
#define SERVREG_NAME_LENGTH 64
+#define SERVREG_PFR_LENGTH 256
struct pdr_service;
struct pdr_handle;
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index 291cdc7ef49c..b9dcb437a0be 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -92,6 +92,18 @@ struct qmi_elem_info {
#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
#define QMI_ERR_NOT_SUPPORTED_V01 94
+/*
+ * Enumerate the IDs of the QMI services
+ */
+#define QMI_SERVICE_ID_TEST 0x0f /* 15 */
+#define QMI_SERVICE_ID_SSCTL 0x2b /* 43 */
+#define QMI_SERVICE_ID_IPA 0x31 /* 49 */
+#define QMI_SERVICE_ID_SERVREG_LOC 0x40 /* 64 */
+#define QMI_SERVICE_ID_SERVREG_NOTIF 0x42 /* 66 */
+#define QMI_SERVICE_ID_WLFW 0x45 /* 69 */
+#define QMI_SERVICE_ID_SLIMBUS 0x301 /* 769 */
+#define QMI_SERVICE_ID_USB_AUDIO_STREAM 0x41d /* 1053 */
+
/**
* struct qmi_response_type_v01 - common response header (decoded)
* @result: result of the transaction
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
index f052e241736c..f5d0e2341261 100644
--- a/include/linux/soc/qcom/ubwc.h
+++ b/include/linux/soc/qcom/ubwc.h
@@ -74,4 +74,29 @@ static inline bool qcom_ubwc_get_ubwc_mode(const struct qcom_ubwc_cfg_data *cfg)
return ret;
}
+/*
+ * This is the best guess, based on the MDSS driver, which worked so far.
+ */
+static inline bool qcom_ubwc_min_acc_length_64b(const struct qcom_ubwc_cfg_data *cfg)
+{
+ return cfg->ubwc_enc_version == UBWC_1_0 &&
+ (cfg->ubwc_dec_version == UBWC_2_0 ||
+ cfg->ubwc_dec_version == UBWC_3_0);
+}
+
+static inline bool qcom_ubwc_macrotile_mode(const struct qcom_ubwc_cfg_data *cfg)
+{
+ return cfg->macrotile_mode;
+}
+
+static inline bool qcom_ubwc_bank_spread(const struct qcom_ubwc_cfg_data *cfg)
+{
+ return cfg->ubwc_bank_spread;
+}
+
+static inline u32 qcom_ubwc_swizzle(const struct qcom_ubwc_cfg_data *cfg)
+{
+ return cfg->ubwc_swizzle;
+}
+
#endif /* __QCOM_UBWC_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec715ad4bf25..ec4a0a025793 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -415,7 +415,7 @@ struct __kernel_timespec;
struct old_timespec32;
struct scm_timestamping_internal {
- struct timespec64 ts[3];
+ ktime_t ts[3];
};
extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index f462717acf20..6147eb1fb210 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -532,7 +532,7 @@ struct sdw_slave_intr_status {
};
/**
- * sdw_reg_bank - SoundWire register banks
+ * enum sdw_reg_bank - SoundWire register banks
* @SDW_BANK0: Soundwire register bank 0
* @SDW_BANK1: Soundwire register bank 1
*/
@@ -751,7 +751,7 @@ struct sdw_port_params {
* struct sdw_transport_params: Data Port Transport Parameters
*
* @blk_grp_ctrl_valid: Port implements block group control
- * @num: Port number
+ * @port_num: Port number
* @blk_grp_ctrl: Block group control value
* @sample_interval: Sample interval
* @offset1: Blockoffset of the payload data
@@ -782,7 +782,7 @@ struct sdw_transport_params {
/**
* struct sdw_enable_ch: Enable/disable Data Port channel
*
- * @num: Port number
+ * @port_num: Port number
* @ch_mask: Active channel mask
* @enable: Enable (true) /disable (false) channel
*/
@@ -885,7 +885,7 @@ void sdw_bus_master_delete(struct sdw_bus *bus);
void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay);
/**
- * sdw_port_config: Master or Slave Port configuration
+ * struct sdw_port_config: Master or Slave Port configuration
*
* @num: Port number
* @ch_mask: channels mask for port
@@ -896,7 +896,7 @@ struct sdw_port_config {
};
/**
- * sdw_stream_config: Master or Slave stream configuration
+ * struct sdw_stream_config: Master or Slave stream configuration
*
* @frame_rate: Audio frame rate of the stream, in Hz
* @ch_count: Channel count of the stream
@@ -913,7 +913,7 @@ struct sdw_stream_config {
};
/**
- * sdw_stream_state: Stream states
+ * enum sdw_stream_state: Stream states
*
* @SDW_STREAM_ALLOCATED: New stream allocated.
* @SDW_STREAM_CONFIGURED: Stream configured
@@ -934,7 +934,7 @@ enum sdw_stream_state {
};
/**
- * sdw_stream_params: Stream parameters
+ * struct sdw_stream_params: Stream parameters
*
* @rate: Sampling frequency, in Hz
* @ch_count: Number of channels
@@ -947,7 +947,7 @@ struct sdw_stream_params {
};
/**
- * sdw_stream_runtime: Runtime stream parameters
+ * struct sdw_stream_runtime: Runtime stream parameters
*
* @name: SoundWire stream name
* @params: Stream parameters
@@ -983,7 +983,7 @@ struct sdw_stream_runtime {
* @defer_msg: Defer message
* @params: Current bus parameters
* @stream_refcount: number of streams currently using this bus
- * @btp_stream_refcount: number of BTP streams currently using this bus (should
+ * @bpt_stream_refcount: number of BTP streams currently using this bus (should
* be zero or one, multiple streams per link is not supported).
* @bpt_stream: pointer stored to handle BTP streams.
* @ops: Master callback ops
diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h
index fe31773d5210..470360a2723c 100644
--- a/include/linux/soundwire/sdw_amd.h
+++ b/include/linux/soundwire/sdw_amd.h
@@ -66,8 +66,10 @@ struct sdw_amd_dai_runtime {
* @status: peripheral devices status array
* @num_din_ports: number of input ports
* @num_dout_ports: number of output ports
+ * @max_ports: total number of input ports and output ports
* @cols_index: Column index in frame shape
* @rows_index: Rows index in frame shape
+ * @port_offset_map: dynamic array to map port block offset
* @instance: SoundWire manager instance
* @quirks: SoundWire manager quirks
* @wake_en_mask: wake enable mask per SoundWire manager
@@ -92,10 +94,12 @@ struct amd_sdw_manager {
int num_din_ports;
int num_dout_ports;
+ int max_ports;
int cols_index;
int rows_index;
+ int *port_offset_map;
u32 instance;
u32 quirks;
u32 wake_en_mask;
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 5774e554c0f0..c8e207522223 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -28,6 +28,14 @@
.dtr = true, \
}
+#define SPI_MEM_DTR_OP_PACKED_CMD(__opcode, __addr, __buswidth) \
+ { \
+ .nbytes = 2, \
+ .opcode = __opcode << 8 | __addr, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
{ \
.nbytes = __nbytes, \
@@ -130,11 +138,13 @@ enum spi_mem_data_dir {
/**
* struct spi_mem_op - describes a SPI memory operation
+ * @cmd: the complete command
* @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is
* sent MSB-first.
* @cmd.buswidth: number of IO lines used to transmit the command
* @cmd.opcode: operation opcode
* @cmd.dtr: whether the command opcode should be sent in DTR mode or not
+ * @addr: the address attributes
* @addr.nbytes: number of address bytes to send. Can be zero if the operation
* does not need to send an address
* @addr.buswidth: number of IO lines used to transmit the address cycles
@@ -143,10 +153,12 @@ enum spi_mem_data_dir {
* Note that only @addr.nbytes are taken into account in this
* address value, so users should make sure the value fits in the
* assigned number of bytes.
+ * @dummy: data for dummy operation
* @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
* @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not
+ * @data: the data attributes
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dtr: whether the data should be sent in DTR mode or not
* @data.ecc: whether error correction is required or not
@@ -273,7 +285,7 @@ struct spi_mem {
};
/**
- * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
+ * spi_mem_set_drvdata() - attach driver private data to a SPI mem
* device
* @mem: memory device
* @data: data to attach to the memory device
@@ -284,7 +296,7 @@ static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
}
/**
- * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
+ * spi_mem_get_drvdata() - get driver private data attached to a SPI mem
* device
* @mem: memory device
*
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 0dc671c07d3a..7587b1c5d7ec 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -382,6 +382,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
}
extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+extern struct spi_device *devm_spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
/* Use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e1e2f144af9b..241277cd34cf 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -178,7 +178,7 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
- extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
@@ -189,6 +189,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
}
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 1e84e71ca495..3a50976471d7 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -48,16 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->slock = 1;
}
-/*
- * Read-write spinlocks. No debug version.
- */
-#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
-
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
@@ -68,4 +58,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_contended(lock) (((void)(lock), 0))
+/*
+ * Read-write spinlocks. No debug version.
+ */
+#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index bb44a0bd7696..81b1938512d5 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -69,8 +69,8 @@ int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
// 0x4 // SRCU-lite is no longer with us.
-#define SRCU_READ_FLAVOR_FAST 0x4 // srcu_read_lock_fast().
-#define SRCU_READ_FLAVOR_FAST_UPDOWN 0x8 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_FAST 0x4 // srcu_read_lock_fast(), also NMI-safe.
+#define SRCU_READ_FLAVOR_FAST_UPDOWN 0x8 // srcu_read_lock_fast_updown().
#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
// All of the above.
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index be76fa4fc170..fd1a9270cb9a 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -260,7 +260,7 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
* srcu_read_unlock_fast().
*
* Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
- * critical sections either because they disables interrupts, because
+ * critical sections either because they disable interrupts, because
* they are a single instruction, or because they are read-modify-write
* atomic operations, depending on the whims of the architecture.
* This matters because the SRCU-fast grace-period mechanism uses either
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32352a216567..4430b967abde 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -84,61 +84,78 @@ struct stmmac_priv;
/* Platfrom data for platform device structure's platform_data field */
struct stmmac_mdio_bus_data {
- unsigned int phy_mask;
- unsigned int pcs_mask;
- unsigned int default_an_inband;
+ u32 phy_mask;
+ u32 pcs_mask;
int *irqs;
int probed_phy_irq;
bool needs_reset;
};
struct stmmac_dma_cfg {
+ /* pbl: programmable burst limit
+ * txpbl: transmit programmable burst limit
+ * rxpbl: receive programmable burst limit
+ * If txpbl or rxpbl are zero, the value of pbl will be substituted.
+ * Range 0 - 63.
+ */
int pbl;
int txpbl;
int rxpbl;
+ /* pblx8: multiplies pbl, txpbl, rxpbl by a factor of 8 for dwmac >=
+ * 3.50a, or a factor of 4 for previous versions.
+ */
bool pblx8;
- int fixed_burst;
- int mixed_burst;
+ /* fixed_burst:
+ * when set, AXI bursts defined by axi_blen_regval are permitted.
+ * AHB uses SINGLE, INCR4, INCR8 or INCR16 during burst transfers.
+ * when clear, AXI and AHB use SINGLE or INCR bursts.
+ */
+ bool fixed_burst;
+ /* mixed_burst:
+ * when set and fixed_burst is clear, AHB uses INCR for bursts > 16
+ * and SINGLE or INCRx for bursts <= 16.
+ */
+ bool mixed_burst;
+ /* aal: address aligned bursts for AHB and AXI master interface */
bool aal;
+ bool dche;
bool eame;
+ /* multi_msi_en: stmmac core internal */
bool multi_msi_en;
- bool dche;
+ /* atds: stmmac core internal */
bool atds;
};
#define AXI_BLEN 7
struct stmmac_axi {
- bool axi_lpi_en;
- bool axi_xit_frm;
u32 axi_wr_osr_lmt;
u32 axi_rd_osr_lmt;
- bool axi_kbbe;
u32 axi_blen_regval;
+ bool axi_lpi_en;
+ bool axi_xit_frm;
bool axi_fb;
- bool axi_mb;
- bool axi_rb;
};
struct stmmac_rxq_cfg {
- u8 mode_to_use;
u32 chan;
+ u32 prio;
+ u8 mode_to_use;
u8 pkt_route;
bool use_prio;
- u32 prio;
};
struct stmmac_txq_cfg {
u32 weight;
- bool coe_unsupported;
- u8 mode_to_use;
/* Credit Base Shaper parameters */
u32 send_slope;
u32 idle_slope;
u32 high_credit;
u32 low_credit;
- bool use_prio;
u32 prio;
int tbs_en;
+ bool use_prio;
+ bool coe_unsupported;
+ u8 mode_to_use;
};
struct stmmac_safety_feature_cfg {
@@ -187,11 +204,13 @@ enum dwmac_core_type {
#define STMMAC_FLAG_MULTI_MSI_EN BIT(7)
#define STMMAC_FLAG_EXT_SNAPSHOT_EN BIT(8)
#define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9)
-#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10)
-#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
-#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(12)
-#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(13)
-#define STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD BIT(14)
+#define STMMAC_FLAG_EEE_DISABLE BIT(10)
+#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(11)
+#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(12)
+#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(13)
+#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(14)
+#define STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD BIT(15)
+#define STMMAC_FLAG_SERDES_SUPPORTS_2500M BIT(16)
struct mac_device_info;
@@ -225,28 +244,28 @@ struct plat_stmmacenet_data {
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
- struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
- int enh_desc;
- int tx_coe;
+ bool default_an_inband;
+ bool enh_desc;
+ bool tx_coe;
+ bool bugged_jumbo;
+ bool pmt;
+ bool force_sf_dma_mode;
+ bool force_thresh_dma_mode;
+ bool riwt_off;
int rx_coe;
- int bugged_jumbo;
- int pmt;
- int force_sf_dma_mode;
- int force_thresh_dma_mode;
- int riwt_off;
int max_speed;
int maxmtu;
int multicast_filter_bins;
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
- u32 host_dma_width;
- u32 rx_queues_to_use;
- u32 tx_queues_to_use;
+ u8 host_dma_width;
+ u8 rx_queues_to_use;
+ u8 tx_queues_to_use;
u8 rx_sched_algorithm;
u8 tx_sched_algorithm;
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
@@ -256,7 +275,8 @@ struct plat_stmmacenet_data {
int (*set_phy_intf_sel)(void *priv, u8 phy_intf_sel);
int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
- void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
+ void (*fix_mac_speed)(void *priv, phy_interface_t interface,
+ int speed, unsigned int mode);
int (*fix_soc_reset)(struct stmmac_priv *priv);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
@@ -279,10 +299,41 @@ struct plat_stmmacenet_data {
struct phylink_pcs *(*select_pcs)(struct stmmac_priv *priv,
phy_interface_t interface);
void *bsp_priv;
+
+ /* stmmac clocks:
+ * stmmac_clk: CSR clock (which can be hclk_i, clk_csr_i, aclk_i,
+ * or clk_app_i depending on GMAC configuration). This clock
+ * generates the MDC clock.
+ *
+ * pclk: introduced for Imagination Technologies Pistachio board -
+ * see 5f9755d26fbf ("stmmac: Add an optional register interface
+ * clock"). This is probably used for cases where separate clocks
+ * are provided for the host interface and register interface. In
+ * this case, as the MDC clock is derived from stmmac_clk, pclk
+ * can only really be the "application clock" for the "host
+ * interface" and not the "register interface" aka CSR clock as
+ * it is never used when determining the divider for the MDC
+ * clock.
+ *
+ * clk_ptp_ref: optional PTP reference clock (clk_ptp_ref_i). When
+ * present, this clock increments the timestamp value. Otherwise,
+ * the rate of stmmac_clk will be used.
+ *
+ * clk_tx_i: MAC transmit clock, which will be 2.5MHz for 10M,
+ * 25MHz for 100M, or 125MHz for 1G irrespective of the interface
+ * mode. For the DWMAC PHY interface modes:
+ *
+ * GMII/MII PHY's transmit clock for 10M (2.5MHz) or 100M (25MHz),
+ * or 125MHz local clock for 1G mode
+ * RMII 50MHz RMII clock divided by 2 or 20.
+ * RGMII 125MHz local clock divided by 1, 5, or 50.
+ * SGMII 125MHz SerDes clock divided by 1, 5, or 50.
+ * TBI/RTBI 125MHz SerDes clock
+ */
struct clk *stmmac_clk;
struct clk *pclk;
struct clk *clk_ptp_ref;
- struct clk *clk_tx_i; /* clk_tx_i to MAC core */
+ struct clk *clk_tx_i;
unsigned long clk_ptp_rate;
unsigned long clk_ref_rate;
struct clk_bulk_data *clks;
@@ -296,7 +347,7 @@ struct plat_stmmacenet_data {
int rss_en;
int mac_port_sel_speed;
u8 vlan_fail_q;
- struct pci_dev *pdev;
+ bool provide_bus_info;
int int_snapshot_num;
int msi_mac_vec;
int msi_wol_vec;
@@ -306,5 +357,6 @@ struct plat_stmmacenet_data {
int msi_tx_base_vec;
const struct dwmac4_addrs *dwmac4_addrs;
unsigned int flags;
+ struct stmmac_dma_cfg __dma_cfg;
};
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 72820503514c..01011113d226 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -99,7 +99,7 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr to pass to @fn()
- * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
+ * @cpus: the cpus to run @fn() on (NULL = one unspecified online CPU)
*
* Description: This causes a thread to be scheduled on every CPU, which
* will run with interrupts disabled. Each CPU specified by @cpus will
@@ -133,7 +133,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
* stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr to pass to @fn()
- * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
+ * @cpus: the cpus to run @fn() on (NULL = one unspecified online CPU)
*
* Same as above. Avoids nested calls to cpus_read_lock().
*
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 4dc14c7a711b..a11acf5cd63b 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -20,7 +20,7 @@
#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
-#include <linux/pagevec.h>
+#include <linux/folio_batch.h>
#include <linux/kthread.h>
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 62fc7499b408..4b1f13b5bbad 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -20,8 +20,6 @@ struct notifier_block;
struct bio;
-struct pagevec;
-
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
@@ -208,7 +206,6 @@ enum {
SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
- SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
@@ -223,16 +220,6 @@ enum {
#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
-/* Bit flag in swap_map */
-#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
-
-/* Special value in first swap_map */
-#define SWAP_MAP_MAX 0x3e /* Max count */
-#define SWAP_MAP_BAD 0x3f /* Note page is bad */
-
-/* Special value in each swap_map continuation */
-#define SWAP_CONT_MAX 0x7f /* Max count */
-
/*
* The first page in the swap file is the swap header, which is always marked
* bad to prevent it from being allocated as an entry. This also prevents the
@@ -264,8 +251,7 @@ struct swap_info_struct {
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
signed char type; /* strange name for an index */
- unsigned int max; /* extent of the swap_map */
- unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned int max; /* size of this swap device */
unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
struct list_head free_clusters; /* free clusters list */
@@ -284,18 +270,14 @@ struct swap_info_struct {
struct completion comp; /* seldom referenced */
spinlock_t lock; /*
* protect map scan related fields like
- * swap_map, inuse_pages and all cluster
- * lists. other fields are only changed
+ * inuse_pages and all cluster lists.
+ * Other fields are only changed
* at swapon/swapoff, so are protected
* by swap_lock. changing flags need
* hold this lock and swap_lock. If
* both locks need hold, hold swap_lock
* first.
*/
- spinlock_t cont_lock; /*
- * protect swap count continuation page
- * list.
- */
struct work_struct discard_work; /* discard worker */
struct work_struct reclaim_work; /* reclaim worker */
struct list_head discard_clusters; /* discard clusters list */
@@ -451,7 +433,6 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern int add_swap_count_continuation(swp_entry_t, gfp_t);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
@@ -517,11 +498,6 @@ static inline void free_swap_cache(struct folio *folio)
{
}
-static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
-{
- return 0;
-}
-
static inline int swap_dup_entry_direct(swp_entry_t ent)
{
return 0;
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
index d9b3cf0f410c..f19f5cec18e2 100644
--- a/include/linux/sys_soc.h
+++ b/include/linux/sys_soc.h
@@ -37,6 +37,16 @@ void soc_device_unregister(struct soc_device *soc_dev);
*/
struct device *soc_device_to_device(struct soc_device *soc);
+/**
+ * soc_attr_read_machine - retrieve the machine model and store it in
+ * the soc_device_attribute structure
+ * @soc_dev_attr: SoC attribute structure to store the model in
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
+ */
+int soc_attr_read_machine(struct soc_device_attribute *soc_dev_attr);
+
#ifdef CONFIG_SOC_BUS
const struct soc_device_attribute *soc_device_match(
const struct soc_device_attribute *matches);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 02bd6ddb6278..f5639d5ac331 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1283,19 +1283,9 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
AT_SYMLINK_NOFOLLOW);
}
-int do_sys_ftruncate(unsigned int fd, loff_t length, int small);
-
-static inline long ksys_ftruncate(unsigned int fd, loff_t length)
-{
- return do_sys_ftruncate(fd, length, 1);
-}
-
-int do_sys_truncate(const char __user *pathname, loff_t length);
-
-static inline long ksys_truncate(const char __user *pathname, loff_t length)
-{
- return do_sys_truncate(pathname, length);
-}
+#define FTRUNCATE_LFS (1u << 0) /* allow truncating > 32-bit */
+int ksys_ftruncate(unsigned int fd, loff_t length, unsigned int flags);
+int ksys_truncate(const char __user *pathname, loff_t length);
static inline unsigned int ksys_personality(unsigned int personality)
{
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 99b775f3ff46..b1a3a1e6ad09 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -396,13 +396,13 @@ struct sysfs_ops {
#ifdef CONFIG_SYSFS
-int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
+int __must_check sysfs_create_dir_ns(struct kobject *kobj, const struct ns_common *ns);
void sysfs_remove_dir(struct kobject *kobj);
int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
- const void *new_ns);
+ const struct ns_common *new_ns);
int __must_check sysfs_move_dir_ns(struct kobject *kobj,
struct kobject *new_parent_kobj,
- const void *new_ns);
+ const struct ns_common *new_ns);
int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
const char *name);
void sysfs_remove_mount_point(struct kobject *parent_kobj,
@@ -410,7 +410,7 @@ void sysfs_remove_mount_point(struct kobject *parent_kobj,
int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
- const void *ns);
+ const struct ns_common *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute * const *attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
@@ -419,7 +419,7 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
const struct attribute *attr);
void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
- const void *ns);
+ const struct ns_common *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
@@ -437,7 +437,7 @@ void sysfs_remove_link(struct kobject *kobj, const char *name);
int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
const char *old_name, const char *new_name,
- const void *new_ns);
+ const struct ns_common *new_ns);
void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
const char *name);
@@ -445,15 +445,15 @@ void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
int __must_check sysfs_create_groups(struct kobject *kobj,
- const struct attribute_group **groups);
+ const struct attribute_group *const *groups);
int __must_check sysfs_update_groups(struct kobject *kobj,
- const struct attribute_group **groups);
+ const struct attribute_group *const *groups);
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_groups(struct kobject *kobj,
- const struct attribute_group **groups);
+ const struct attribute_group *const *groups);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
void sysfs_remove_file_from_group(struct kobject *kobj,
@@ -486,7 +486,7 @@ int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid);
int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ,
const char *name, kuid_t kuid, kgid_t kgid);
int sysfs_groups_change_owner(struct kobject *kobj,
- const struct attribute_group **groups,
+ const struct attribute_group *const *groups,
kuid_t kuid, kgid_t kgid);
int sysfs_group_change_owner(struct kobject *kobj,
const struct attribute_group *groups, kuid_t kuid,
@@ -502,7 +502,7 @@ ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj,
#else /* CONFIG_SYSFS */
-static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+static inline int sysfs_create_dir_ns(struct kobject *kobj, const struct ns_common *ns)
{
return 0;
}
@@ -512,14 +512,14 @@ static inline void sysfs_remove_dir(struct kobject *kobj)
}
static inline int sysfs_rename_dir_ns(struct kobject *kobj,
- const char *new_name, const void *new_ns)
+ const char *new_name, const struct ns_common *new_ns)
{
return 0;
}
static inline int sysfs_move_dir_ns(struct kobject *kobj,
struct kobject *new_parent_kobj,
- const void *new_ns)
+ const struct ns_common *new_ns)
{
return 0;
}
@@ -537,7 +537,7 @@ static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
static inline int sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
- const void *ns)
+ const struct ns_common *ns)
{
return 0;
}
@@ -567,7 +567,7 @@ static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
- const void *ns)
+ const struct ns_common *ns)
{
}
@@ -612,7 +612,7 @@ static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
const char *old_name,
- const char *new_name, const void *ns)
+ const char *new_name, const struct ns_common *ns)
{
return 0;
}
@@ -629,13 +629,13 @@ static inline int sysfs_create_group(struct kobject *kobj,
}
static inline int sysfs_create_groups(struct kobject *kobj,
- const struct attribute_group **groups)
+ const struct attribute_group *const *groups)
{
return 0;
}
static inline int sysfs_update_groups(struct kobject *kobj,
- const struct attribute_group **groups)
+ const struct attribute_group *const *groups)
{
return 0;
}
@@ -652,7 +652,7 @@ static inline void sysfs_remove_group(struct kobject *kobj,
}
static inline void sysfs_remove_groups(struct kobject *kobj,
- const struct attribute_group **groups)
+ const struct attribute_group *const *groups)
{
}
@@ -733,7 +733,7 @@ static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t k
}
static inline int sysfs_groups_change_owner(struct kobject *kobj,
- const struct attribute_group **groups,
+ const struct attribute_group *const *groups,
kuid_t kuid, kgid_t kgid)
{
return 0;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index f72eef31fa23..6982f10e826b 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -228,8 +228,7 @@ struct tcp_sock {
u32 sacked_out; /* SACK'd packets */
u16 tcp_header_len; /* Bytes of tcp header to send */
u8 scaling_ratio; /* see tcp_win_from_space() */
- u8 chrono_type : 2, /* current chronograph type */
- repair : 1,
+ u8 repair : 1,
tcp_usec_ts : 1, /* TSval values in usec */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
@@ -264,6 +263,7 @@ struct tcp_sock {
* total number of data bytes sent.
*/
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
+ u8 chrono_type; /* current chronograph type */
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -316,6 +316,9 @@ struct tcp_sock {
*/
u32 app_limited; /* limited until "delivered" reaches this val */
u32 rcv_wnd; /* Current receiver window */
+ u32 rcv_mwnd_seq; /* Maximum window sequence number (RFC 7323,
+ * section 2.4, receiver requirements)
+ */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
/*
* Options received (usually on last packet, some only on SYN packets).
@@ -548,6 +551,13 @@ enum tsq_flags {
TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED),
};
+/* Flags of interest for tcp_release_cb() */
+#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \
+ TCPF_WRITE_TIMER_DEFERRED | \
+ TCPF_DELACK_TIMER_DEFERRED | \
+ TCPF_MTU_REDUCED_DEFERRED | \
+ TCPF_ACK_DEFERRED)
+
#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
/* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket.
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index ee5f0bd41f43..f993d5118edd 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -50,7 +50,7 @@ enum tee_dma_heap_id {
* @dev: embedded basic device structure
* @cdev: embedded cdev
* @num_users: number of active users of this device
- * @c_no_user: completion used when unregistering the device
+ * @c_no_users: completion used when unregistering the device
* @mutex: mutex protecting @num_users and @idr
* @idr: register of user space shared memory objects allocated or
* registered on this device
@@ -132,6 +132,7 @@ struct tee_driver_ops {
/* Size for TEE revision string buffer used by get_tee_revision(). */
#define TEE_REVISION_STR_SIZE 128
+#define TEE_DESC_PRIVILEGED 0x1
/**
* struct tee_desc - Describes the TEE driver to the subsystem
* @name: name of driver
@@ -139,7 +140,6 @@ struct tee_driver_ops {
* @owner: module providing the driver
* @flags: Extra properties of driver, defined by TEE_DESC_* below
*/
-#define TEE_DESC_PRIVILEGED 0x1
struct tee_desc {
const char *name;
const struct tee_driver_ops *ops;
@@ -187,7 +187,7 @@ struct tee_protmem_pool_ops {
* Allocates a new struct tee_device instance. The device is
* removed by tee_device_unregister().
*
- * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ * @returns: a pointer to a 'struct tee_device' or an ERR_PTR on failure
*/
struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
struct device *dev,
@@ -201,7 +201,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
* tee_device_unregister() need to be called to remove the @teedev if
* this function fails.
*
- * @returns < 0 on failure
+ * @returns: < 0 on failure
*/
int tee_device_register(struct tee_device *teedev);
@@ -254,14 +254,14 @@ void tee_device_set_dev_groups(struct tee_device *teedev,
* tee_session_calc_client_uuid() - Calculates client UUID for session
* @uuid: Resulting UUID
* @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
- * @connectuon_data: Connection data for opening session
+ * @connection_data: Connection data for opening session
*
* Based on connection method calculates UUIDv5 based client UUID.
*
* For group based logins verifies that calling process has specified
* credentials.
*
- * @return < 0 on failure
+ * @returns: < 0 on failure
*/
int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
const u8 connection_data[TEE_IOCTL_UUID_LEN]);
@@ -295,7 +295,7 @@ struct tee_shm_pool_ops {
* @paddr: Physical address of start of pool
* @size: Size in bytes of the pool
*
- * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ * @returns: pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
*/
struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
phys_addr_t paddr, size_t size,
@@ -318,14 +318,16 @@ static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
* @paddr: Physical address of start of pool
* @size: Size in bytes of the pool
*
- * @returns pointer to a 'struct tee_protmem_pool' or an ERR_PTR on failure.
+ * @returns: pointer to a 'struct tee_protmem_pool' or an ERR_PTR on failure.
*/
struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
size_t size);
/**
* tee_get_drvdata() - Return driver_data pointer
- * @returns the driver_data pointer supplied to tee_register().
+ * @teedev: Pointer to the tee_device
+ *
+ * @returns: the driver_data pointer supplied to tee_register().
*/
void *tee_get_drvdata(struct tee_device *teedev);
@@ -334,7 +336,7 @@ void *tee_get_drvdata(struct tee_device *teedev);
* TEE driver
* @ctx: The TEE context for shared memory allocation
* @size: Shared memory allocation size
- * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ * @returns: a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
@@ -354,7 +356,7 @@ void tee_dyn_shm_free_helper(struct tee_shm *shm,
/**
* tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
* @shm: Shared memory handle
- * @returns true if object is dynamic shared memory
+ * @returns: true if object is dynamic shared memory
*/
static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
{
@@ -370,7 +372,7 @@ void tee_shm_put(struct tee_shm *shm);
/**
* tee_shm_get_id() - Get id of a shared memory object
* @shm: Shared memory handle
- * @returns id
+ * @returns: id
*/
static inline int tee_shm_get_id(struct tee_shm *shm)
{
@@ -382,7 +384,7 @@ static inline int tee_shm_get_id(struct tee_shm *shm)
* count
* @ctx: Context owning the shared memory
* @id: Id of shared memory object
- * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ * @returns: a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
@@ -402,7 +404,7 @@ static inline bool tee_param_is_memref(struct tee_param *param)
* teedev_open() - Open a struct tee_device
* @teedev: Device to open
*
- * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
+ * @returns: pointer to struct tee_context on success or an ERR_PTR on failure.
*/
struct tee_context *teedev_open(struct tee_device *teedev);
diff --git a/include/linux/tegra-mipi-cal.h b/include/linux/tegra-mipi-cal.h
new file mode 100644
index 000000000000..2a540b50f65d
--- /dev/null
+++ b/include/linux/tegra-mipi-cal.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TEGRA_MIPI_CAL_H_
+#define __TEGRA_MIPI_CAL_H_
+
+struct tegra_mipi_device {
+ const struct tegra_mipi_ops *ops;
+ struct platform_device *pdev;
+ unsigned long pads;
+};
+
+/**
+ * Operations for Tegra MIPI calibration device
+ */
+struct tegra_mipi_ops {
+ /**
+ * @enable:
+ *
+ * Enable MIPI calibration device
+ */
+ int (*enable)(struct tegra_mipi_device *device);
+
+ /**
+ * @disable:
+ *
+ * Disable MIPI calibration device
+ */
+ int (*disable)(struct tegra_mipi_device *device);
+
+ /**
+ * @start_calibration:
+ *
+ * Start MIPI calibration
+ */
+ int (*start_calibration)(struct tegra_mipi_device *device);
+
+ /**
+ * @finish_calibration:
+ *
+ * Finish MIPI calibration
+ */
+ int (*finish_calibration)(struct tegra_mipi_device *device);
+};
+
+int devm_tegra_mipi_add_provider(struct device *device, struct device_node *np,
+ const struct tegra_mipi_ops *ops);
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np);
+void tegra_mipi_free(struct tegra_mipi_device *device);
+
+int tegra_mipi_enable(struct tegra_mipi_device *device);
+int tegra_mipi_disable(struct tegra_mipi_device *device);
+int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
+int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
+
+#endif /* __TEGRA_MIPI_CAL_H_ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 0b5ed6821080..0ddc77aeeca2 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -273,6 +273,9 @@ bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
void thermal_zone_device_critical(struct thermal_zone_device *tz);
+
+void thermal_pm_prepare(void);
+void thermal_pm_complete(void);
#else
static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
const char *type,
@@ -350,6 +353,9 @@ static inline int thermal_zone_device_enable(struct thermal_zone_device *tz)
static inline int thermal_zone_device_disable(struct thermal_zone_device *tz)
{ return -ENODEV; }
+
+static inline void thermal_pm_prepare(void) {}
+static inline void thermal_pm_complete(void) {}
#endif /* CONFIG_THERMAL */
#endif /* __THERMAL_H__ */
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
index 3faf5a6bb13e..74f5e73bf6db 100644
--- a/include/linux/timb_gpio.h
+++ b/include/linux/timb_gpio.h
@@ -9,10 +9,10 @@
/**
* struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
- * @gpio_base The number of the first GPIO pin, set to -1 for
+ * @gpio_base: The number of the first GPIO pin, set to -1 for
* dynamic number allocation.
- * @nr_pins Number of pins that is supported by the hardware (1-32)
- * @irq_base If IRQ is supported by the hardware, this is the base
+ * @nr_pins: Number of pins that is supported by the hardware (1-32)
+ * @irq_base: If IRQ is supported by the hardware, this is the base
* number of IRQ:s. One IRQ per pin will be used. Set to
* -1 if IRQ:s is not supported.
*/
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index c514d0e5a45c..58bd9728df58 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -8,6 +8,7 @@
#include <linux/ns_common.h>
#include <linux/err.h>
#include <linux/time64.h>
+#include <linux/cleanup.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
@@ -25,7 +26,9 @@ struct time_namespace {
struct ucounts *ucounts;
struct ns_common ns;
struct timens_offsets offsets;
+#ifdef CONFIG_TIME_NS_VDSO
struct page *vvar_page;
+#endif
/* If set prevents changing offsets after any task joined namespace. */
bool frozen_offsets;
} __randomize_layout;
@@ -38,9 +41,6 @@ static inline struct time_namespace *to_time_ns(struct ns_common *ns)
return container_of(ns, struct time_namespace, ns);
}
void __init time_ns_init(void);
-extern int vdso_join_timens(struct task_struct *task,
- struct time_namespace *ns);
-extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
@@ -53,7 +53,6 @@ struct time_namespace *copy_time_ns(u64 flags,
struct time_namespace *old_ns);
void free_time_ns(struct time_namespace *ns);
void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
-struct page *find_timens_vvar_page(struct vm_area_struct *vma);
static inline void put_time_ns(struct time_namespace *ns)
{
@@ -117,17 +116,6 @@ static inline void __init time_ns_init(void)
{
}
-static inline int vdso_join_timens(struct task_struct *task,
- struct time_namespace *ns)
-{
- return 0;
-}
-
-static inline void timens_commit(struct task_struct *tsk,
- struct time_namespace *ns)
-{
-}
-
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
return NULL;
@@ -154,11 +142,6 @@ static inline void timens_on_fork(struct nsproxy *nsproxy,
return;
}
-static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
-{
- return NULL;
-}
-
static inline void timens_add_monotonic(struct timespec64 *ts) { }
static inline void timens_add_boottime(struct timespec64 *ts) { }
@@ -175,4 +158,20 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
}
#endif
+#ifdef CONFIG_TIME_NS_VDSO
+extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
+struct page *find_timens_vvar_page(struct vm_area_struct *vma);
+#else /* !CONFIG_TIME_NS_VDSO */
+static inline void timens_commit(struct task_struct *tsk, struct time_namespace *ns)
+{
+}
+
+static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+#endif /* CONFIG_TIME_NS_VDSO */
+
+DEFINE_FREE(time_ns, struct time_namespace *, if (_T) put_time_ns(_T))
+
#endif /* _LINUX_TIMENS_H */
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index b8ae89ea28ab..e36d11e33e0c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -72,6 +72,10 @@ struct tk_read_base {
* @id: The timekeeper ID
* @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
* @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
+ * @cs_id: The ID of the current clocksource
+ * @cs_ns_to_cyc_mult: Multiplicator for nanoseconds to cycles conversion
+ * @cs_ns_to_cyc_shift: Shift value for nanoseconds to cycles conversion
+ * @cs_ns_to_cyc_maxns: Maximum nanoseconds to cyles conversion range
* @clock_was_set_seq: The sequence number of clock was set events
* @cs_was_changed_seq: The sequence number of clocksource change events
* @clock_valid: Indicator for valid clock
@@ -159,6 +163,10 @@ struct timekeeper {
u64 raw_sec;
/* Cachline 3 and 4 (timekeeping internal variables): */
+ enum clocksource_ids cs_id;
+ u32 cs_ns_to_cyc_mult;
+ u32 cs_ns_to_cyc_shift;
+ u64 cs_ns_to_cyc_maxns;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
u8 clock_valid;
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index d306d9dd2207..7d0aaa766580 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -5,12 +5,11 @@
#include <linux/rbtree.h>
#include <linux/timerqueue_types.h>
-extern bool timerqueue_add(struct timerqueue_head *head,
- struct timerqueue_node *node);
-extern bool timerqueue_del(struct timerqueue_head *head,
- struct timerqueue_node *node);
-extern struct timerqueue_node *timerqueue_iterate_next(
- struct timerqueue_node *node);
+bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node);
+bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node);
+struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node);
+
+bool timerqueue_linked_add(struct timerqueue_linked_head *head, struct timerqueue_linked_node *node);
/**
* timerqueue_getnext - Returns the timer with the earliest expiration time
@@ -19,8 +18,7 @@ extern struct timerqueue_node *timerqueue_iterate_next(
*
* Returns a pointer to the timer node that has the earliest expiration time.
*/
-static inline
-struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
+static inline struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
struct rb_node *leftmost = rb_first_cached(&head->rb_root);
@@ -41,4 +39,46 @@ static inline void timerqueue_init_head(struct timerqueue_head *head)
{
head->rb_root = RB_ROOT_CACHED;
}
+
+/* Timer queues with linked nodes */
+
+static __always_inline
+struct timerqueue_linked_node *timerqueue_linked_first(struct timerqueue_linked_head *head)
+{
+ return rb_entry_safe(head->rb_root.rb_leftmost, struct timerqueue_linked_node, node);
+}
+
+static __always_inline
+struct timerqueue_linked_node *timerqueue_linked_next(struct timerqueue_linked_node *node)
+{
+ return rb_entry_safe(node->node.next, struct timerqueue_linked_node, node);
+}
+
+static __always_inline
+struct timerqueue_linked_node *timerqueue_linked_prev(struct timerqueue_linked_node *node)
+{
+ return rb_entry_safe(node->node.prev, struct timerqueue_linked_node, node);
+}
+
+static __always_inline
+bool timerqueue_linked_del(struct timerqueue_linked_head *head, struct timerqueue_linked_node *node)
+{
+ return rb_erase_linked(&node->node, &head->rb_root);
+}
+
+static __always_inline void timerqueue_linked_init(struct timerqueue_linked_node *node)
+{
+ RB_CLEAR_LINKED_NODE(&node->node);
+}
+
+static __always_inline bool timerqueue_linked_node_queued(struct timerqueue_linked_node *node)
+{
+ return !RB_EMPTY_LINKED_NODE(&node->node);
+}
+
+static __always_inline void timerqueue_linked_init_head(struct timerqueue_linked_head *head)
+{
+ head->rb_root = RB_ROOT_LINKED;
+}
+
#endif /* _LINUX_TIMERQUEUE_H */
diff --git a/include/linux/timerqueue_types.h b/include/linux/timerqueue_types.h
index dc298d0923e3..be2218b147c4 100644
--- a/include/linux/timerqueue_types.h
+++ b/include/linux/timerqueue_types.h
@@ -6,12 +6,21 @@
#include <linux/types.h>
struct timerqueue_node {
- struct rb_node node;
- ktime_t expires;
+ struct rb_node node;
+ ktime_t expires;
};
struct timerqueue_head {
- struct rb_root_cached rb_root;
+ struct rb_root_cached rb_root;
+};
+
+struct timerqueue_linked_node {
+ struct rb_node_linked node;
+ ktime_t expires;
+};
+
+struct timerqueue_linked_head {
+ struct rb_root_linked rb_root;
};
#endif /* _LINUX_TIMERQUEUE_TYPES_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 37eb2f0f3dd8..40a43a4c7caf 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -22,20 +22,23 @@ union bpf_attr;
const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
- const struct trace_print_flags *flag_array);
+ const struct trace_print_flags *flag_array,
+ size_t flag_array_size);
const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
- const struct trace_print_flags *symbol_array);
+ const struct trace_print_flags *symbol_array,
+ size_t symbol_array_size);
#if BITS_PER_LONG == 32
const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
unsigned long long flags,
- const struct trace_print_flags_u64 *flag_array);
+ const struct trace_print_flags_u64 *flag_array,
+ size_t flag_array_size);
const char *trace_print_symbols_seq_u64(struct trace_seq *p,
unsigned long long val,
- const struct trace_print_flags_u64
- *symbol_array);
+ const struct trace_print_flags_u64 *symbol_array,
+ size_t symbol_array_size);
#endif
struct trace_iterator;
diff --git a/include/linux/trace_printk.h b/include/linux/trace_printk.h
index bb5874097f24..2670ec7f4262 100644
--- a/include/linux/trace_printk.h
+++ b/include/linux/trace_printk.h
@@ -107,7 +107,6 @@ do { \
__trace_printk(_THIS_IP_, fmt, ##args); \
} while (0)
-extern __printf(2, 3)
int __trace_bprintk(unsigned long ip, const char *fmt, ...);
extern __printf(2, 3)
diff --git a/include/linux/trace_remote.h b/include/linux/trace_remote.h
new file mode 100644
index 000000000000..fcd1d46ea466
--- /dev/null
+++ b/include/linux/trace_remote.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_TRACE_REMOTE_H
+#define _LINUX_TRACE_REMOTE_H
+
+#include <linux/dcache.h>
+#include <linux/ring_buffer.h>
+#include <linux/trace_remote_event.h>
+
+/**
+ * struct trace_remote_callbacks - Callbacks used by Tracefs to control the remote
+ * @init: Called once the remote has been registered. Allows the
+ * caller to extend the Tracefs remote directory
+ * @load_trace_buffer: Called before Tracefs accesses the trace buffer for the first
+ * time. Must return a &trace_buffer_desc
+ * (most likely filled with trace_remote_alloc_buffer())
+ * @unload_trace_buffer:
+ * Called once Tracefs has no use for the trace buffer
+ * (most likely call trace_remote_free_buffer())
+ * @enable_tracing: Called on Tracefs tracing_on. It is expected from the
+ * remote to allow writing.
+ * @swap_reader_page: Called when Tracefs consumes a new page from a
+ * ring-buffer. It is expected from the remote to isolate a
+ * @reset: Called on `echo 0 > trace`. It is expected from the
+ * remote to reset all ring-buffer pages.
+ * new reader-page from the @cpu ring-buffer.
+ * @enable_event: Called on events/event_name/enable. It is expected from
+ * the remote to allow the writing event @id.
+ */
+struct trace_remote_callbacks {
+ int (*init)(struct dentry *d, void *priv);
+ struct trace_buffer_desc *(*load_trace_buffer)(unsigned long size, void *priv);
+ void (*unload_trace_buffer)(struct trace_buffer_desc *desc, void *priv);
+ int (*enable_tracing)(bool enable, void *priv);
+ int (*swap_reader_page)(unsigned int cpu, void *priv);
+ int (*reset)(unsigned int cpu, void *priv);
+ int (*enable_event)(unsigned short id, bool enable, void *priv);
+};
+
+int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv,
+ struct remote_event *events, size_t nr_events);
+
+int trace_remote_alloc_buffer(struct trace_buffer_desc *desc, size_t desc_size, size_t buffer_size,
+ const struct cpumask *cpumask);
+
+void trace_remote_free_buffer(struct trace_buffer_desc *desc);
+
+#endif
diff --git a/include/linux/trace_remote_event.h b/include/linux/trace_remote_event.h
new file mode 100644
index 000000000000..c8ae1e1f5e72
--- /dev/null
+++ b/include/linux/trace_remote_event.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_TRACE_REMOTE_EVENTS_H
+#define _LINUX_TRACE_REMOTE_EVENTS_H
+
+struct trace_remote;
+struct trace_event_fields;
+struct trace_seq;
+
+struct remote_event_hdr {
+ unsigned short id;
+};
+
+#define REMOTE_EVENT_NAME_MAX 30
+struct remote_event {
+ char name[REMOTE_EVENT_NAME_MAX];
+ unsigned short id;
+ bool enabled;
+ struct trace_remote *remote;
+ struct trace_event_fields *fields;
+ char *print_fmt;
+ void (*print)(void *evt, struct trace_seq *seq);
+};
+
+#define RE_STRUCT(__args...) __args
+#define re_field(__type, __field) __type __field;
+
+#define REMOTE_EVENT_FORMAT(__name, __struct) \
+ struct remote_event_format_##__name { \
+ struct remote_event_hdr hdr; \
+ __struct \
+ }
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 22ca1c8b54f3..578e520b6ee6 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -122,6 +122,22 @@ static inline bool tracepoint_is_faultable(struct tracepoint *tp)
{
return tp->ext && tp->ext->faultable;
}
+/*
+ * Run RCU callback with the appropriate grace period wait for non-faultable
+ * tracepoints, e.g., those used in atomic context.
+ */
+static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func)
+{
+ call_srcu(&tracepoint_srcu, rcu, func);
+}
+/*
+ * Run RCU callback with the appropriate grace period wait for faultable
+ * tracepoints, e.g., those used in syscall context.
+ */
+static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func)
+{
+ call_rcu_tasks_trace(rcu, func);
+}
#else
static inline void tracepoint_synchronize_unregister(void)
{ }
@@ -129,6 +145,10 @@ static inline bool tracepoint_is_faultable(struct tracepoint *tp)
{
return false;
}
+static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func)
+{ }
+static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func)
+{ }
#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
@@ -294,6 +314,10 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
WARN_ONCE(!rcu_is_watching(), \
"RCU not watching for tracepoint"); \
} \
+ } \
+ static inline void trace_call__##name(proto) \
+ { \
+ __do_trace_##name(args); \
}
#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
@@ -313,6 +337,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
WARN_ONCE(!rcu_is_watching(), \
"RCU not watching for tracepoint"); \
} \
+ } \
+ static inline void trace_call__##name(proto) \
+ { \
+ might_fault(); \
+ __do_trace_##name(args); \
}
/*
@@ -398,6 +427,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \
static inline void trace_##name(proto) \
{ } \
+ static inline void trace_call__##name(proto) \
+ { } \
static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
diff --git a/include/linux/types.h b/include/linux/types.h
index 7e71d260763c..608050dbca6a 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -239,7 +239,7 @@ struct ustat {
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
- * - the structure shares storage space in struct page with @compound_head,
+ * - the structure shares storage space in struct page with @compound_info,
* which encode PageTail() in bit 0. The guarantee is needed to avoid
* false-positive PageTail().
*/
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 4fe63169d5a2..56328601218c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -331,16 +331,21 @@ static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
-#ifndef ARCH_HAS_NOCACHE_UACCESS
+#ifndef ARCH_HAS_NONTEMPORAL_UACCESS
static inline __must_check unsigned long
-__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+copy_from_user_inatomic_nontemporal(void *to, const void __user *from,
unsigned long n)
{
+ if (can_do_masked_user_access())
+ from = mask_user_address(from);
+ else
+ if (!access_ok(from, n))
+ return n;
return __copy_from_user_inatomic(to, from, n);
}
-#endif /* ARCH_HAS_NOCACHE_UACCESS */
+#endif /* ARCH_HAS_NONTEMPORAL_UACCESS */
extern __must_check int check_zeroed_user(const void __user *from, size_t size);
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 1cbf6b4d3aab..ce56ebcee5cb 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -40,8 +40,6 @@ enum {
UDP_FLAGS_ACCEPT_FRAGLIST,
UDP_FLAGS_ACCEPT_L4,
UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
- UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
- UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
};
/* per NUMA structure for lockless producer usage. */
@@ -74,11 +72,7 @@ struct udp_sock {
*/
__u16 len; /* total length of pending frames */
__u16 gso_size;
- /*
- * Fields specific to UDP-Lite.
- */
- __u16 pcslen;
- __u16 pcrlen;
+
/*
* For encapsulation sockets.
*/
@@ -236,8 +230,6 @@ static inline void udp_allow_gso(struct sock *sk)
hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
#endif
-#define IS_UDPLITE(__sk) (unlikely(__sk->sk_protocol == IPPROTO_UDPLITE))
-
static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
{
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 334641e20fb1..02eaac47ac44 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -97,7 +97,7 @@ struct uio_device {
* @irq_flags: flags for request_irq()
* @priv: optional private data
* @handler: the device's irq handler
- * @mmap: mmap operation for this uio device
+ * @mmap_prepare: mmap_prepare operation for this uio device
* @open: open operation for this uio device
* @release: release operation for this uio device
* @irqcontrol: disable/enable irqs when 0/1 is written to /dev/uioX
@@ -112,7 +112,7 @@ struct uio_info {
unsigned long irq_flags;
void *priv;
irqreturn_t (*handler)(int irq, struct uio_info *dev_info);
- int (*mmap)(struct uio_info *info, struct vm_area_struct *vma);
+ int (*mmap_prepare)(struct uio_info *info, struct vm_area_desc *desc);
int (*open)(struct uio_info *info, struct inode *inode);
int (*release)(struct uio_info *info, struct inode *inode);
int (*irqcontrol)(struct uio_info *info, s32 irq_on);
diff --git a/include/linux/units.h b/include/linux/units.h
index 80d57c50b9e3..c6d78988613a 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -57,6 +57,9 @@
#define MICROWATT_PER_MILLIWATT 1000UL
#define MICROWATT_PER_WATT 1000000UL
+#define MICROJOULE_PER_JOULE 1000000UL
+#define NANOJOULE_PER_JOULE 1000000000UL
+
#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE)
#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE)
#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 04277af4bb9d..4aab20015851 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -21,6 +21,7 @@
#include <linux/completion.h> /* for struct completion */
#include <linux/sched.h> /* for current && schedule_timeout */
#include <linux/mutex.h> /* for struct mutex */
+#include <linux/spinlock.h> /* for spinlock_t */
#include <linux/pm_runtime.h> /* for runtime PM */
struct usb_device;
@@ -636,8 +637,9 @@ struct usb3_lpm_parameters {
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
- * @offload_at_suspend: offload activities during suspend is enabled.
+ * @offload_pm_locked: prevents offload_usage changes during PM transitions.
* @offload_usage: number of offload activities happening on this usb device.
+ * @offload_lock: protects offload_usage and offload_pm_locked
* @slot_id: Slot ID assigned by xHCI
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
* @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
@@ -726,8 +728,9 @@ struct usb_device {
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned port_is_suspended:1;
- unsigned offload_at_suspend:1;
+ unsigned offload_pm_locked:1;
int offload_usage;
+ spinlock_t offload_lock;
enum usb_link_tunnel_mode tunnel_mode;
struct device_link *usb4_link;
@@ -849,6 +852,7 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
int usb_offload_get(struct usb_device *udev);
int usb_offload_put(struct usb_device *udev);
bool usb_offload_check(struct usb_device *udev);
+void usb_offload_set_pm_locked(struct usb_device *udev, bool locked);
#else
static inline int usb_offload_get(struct usb_device *udev)
@@ -857,6 +861,8 @@ static inline int usb_offload_put(struct usb_device *udev)
{ return 0; }
static inline bool usb_offload_check(struct usb_device *udev)
{ return false; }
+static inline void usb_offload_set_pm_locked(struct usb_device *udev, bool locked)
+{ }
#endif
extern int usb_disable_lpm(struct usb_device *udev);
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index ea92ac623a45..05bfebab42b6 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -138,6 +138,9 @@
#define UVC_GUID_FORMAT_M420 \
{ 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_P010 \
+ { 'P', '0', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_H264 \
{ 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index fd5f42765497..d83e349900a3 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -23,6 +23,9 @@
/* The set of all possible UFFD-related VM flags. */
#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
+#define __VMA_UFFD_FLAGS mk_vma_flags(VMA_UFFD_MISSING_BIT, VMA_UFFD_WP_BIT, \
+ VMA_UFFD_MINOR_BIT)
+
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
* new flags, since they might collide with O_* ones. We want
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 2bfe3baa63f4..782c42d25db1 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -72,9 +72,6 @@ struct vdpa_mgmt_dev;
* struct vdpa_device - representation of a vDPA device
* @dev: underlying device
* @vmap: the metadata passed to upper layer to be used for mapping
- * @driver_override: driver name to force a match; do not set directly,
- * because core frees it; use driver_set_override() to
- * set or clear it.
* @config: the configuration ops for this device.
* @map: the map ops for this device
* @cf_lock: Protects get and set access to configuration layout.
@@ -90,7 +87,6 @@ struct vdpa_mgmt_dev;
struct vdpa_device {
struct device dev;
union virtio_map vmap;
- const char *driver_override;
const struct vdpa_config_ops *config;
const struct virtio_map_ops *map;
struct rw_semaphore cf_lock; /* Protects get/set config */
diff --git a/include/linux/vdso_datastore.h b/include/linux/vdso_datastore.h
index a91fa24b06e0..0b530428db71 100644
--- a/include/linux/vdso_datastore.h
+++ b/include/linux/vdso_datastore.h
@@ -2,9 +2,15 @@
#ifndef _LINUX_VDSO_DATASTORE_H
#define _LINUX_VDSO_DATASTORE_H
+#ifdef CONFIG_HAVE_GENERIC_VDSO
#include <linux/mm_types.h>
extern const struct vm_special_mapping vdso_vvar_mapping;
struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr);
+void __init vdso_setup_data_pages(void);
+#else /* !CONFIG_HAVE_GENERIC_VDSO */
+static inline void vdso_setup_data_pages(void) { }
+#endif /* CONFIG_HAVE_GENERIC_VDSO */
+
#endif /* _LINUX_VDSO_DATASTORE_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e90859956514..31b826efba00 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -16,6 +16,7 @@
#include <linux/cdev.h>
#include <uapi/linux/vfio.h>
#include <linux/iova_bitmap.h>
+#include <linux/uaccess.h>
struct kvm;
struct iommufd_ctx;
@@ -52,6 +53,7 @@ struct vfio_device {
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
unsigned int migration_flags;
+ u8 precopy_info_v2;
struct kvm *kvm;
/* Members below here are private, not for driver use */
@@ -72,13 +74,11 @@ struct vfio_device {
u8 iommufd_attached:1;
#endif
u8 cdev_opened:1;
-#ifdef CONFIG_DEBUG_FS
/*
* debug_root is a static property of the vfio_device
* which must be set prior to registering the vfio_device.
*/
struct dentry *debug_root;
-#endif
};
/**
@@ -284,6 +284,44 @@ static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
return 1;
}
+/**
+ * vfio_check_precopy_ioctl - Validate user input for the VFIO_MIG_GET_PRECOPY_INFO ioctl
+ * @vdev: The vfio device
+ * @cmd: Cmd from the ioctl
+ * @arg: Arg from the ioctl
+ * @info: Driver pointer to hold the userspace input to the ioctl
+ *
+ * For use in a driver's get_precopy_info. Checks that the inputs to the
+ * VFIO_MIG_GET_PRECOPY_INFO ioctl are correct.
+
+ * Returns 0 on success, otherwise errno.
+ */
+
+static inline int
+vfio_check_precopy_ioctl(struct vfio_device *vdev, unsigned int cmd,
+ unsigned long arg, struct vfio_precopy_info *info)
+{
+ unsigned long minsz;
+
+ if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
+ return -ENOTTY;
+
+ minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
+
+ if (copy_from_user(info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info->argsz < minsz)
+ return -EINVAL;
+
+ /* keep v1 behaviour as is for compatibility reasons */
+ if (vdev->precopy_info_v2)
+ /* flags are output, set its initial value to 0 */
+ info->flags = 0;
+
+ return 0;
+}
+
struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
const struct vfio_device_ops *ops);
#define vfio_alloc_device(dev_struct, member, dev, ops) \
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 22a139f82d75..03fe95f5a020 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -38,21 +38,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
- PGREFILL,
PGREUSE,
- PGSTEAL_KSWAPD,
- PGSTEAL_DIRECT,
- PGSTEAL_KHUGEPAGED,
- PGSTEAL_PROACTIVE,
- PGSCAN_KSWAPD,
- PGSCAN_DIRECT,
- PGSCAN_KHUGEPAGED,
- PGSCAN_PROACTIVE,
PGSCAN_DIRECT_THROTTLE,
- PGSCAN_ANON,
- PGSCAN_FILE,
- PGSTEAL_ANON,
- PGSTEAL_FILE,
#ifdef CONFIG_NUMA
PGSCAN_ZONE_RECLAIM_SUCCESS,
PGSCAN_ZONE_RECLAIM_FAILED,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index e8e94f90d686..3b02c0c6b371 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -286,8 +286,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb);
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-unsigned long vmalloc_nr_pages(void);
-
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
@@ -304,7 +302,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
#else /* !CONFIG_MMU */
#define VMALLOC_TOTAL 0UL
-static inline unsigned long vmalloc_nr_pages(void) { return 0; }
static inline void set_vm_flush_reset_perms(void *addr) {}
#endif /* CONFIG_MMU */
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 9e29d79fc790..ace7379d627d 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -406,7 +406,7 @@ do { \
schedule())
/**
- * wait_var_event_killable - wait for a variable to be updated and notified
+ * wait_var_event_interruptible - wait for a variable to be updated and notified
* @var: the address of variable being waited on
* @condition: the condition to wait for
*
@@ -492,7 +492,7 @@ do { \
* wait_var_event_mutex - wait for a variable to be updated under a mutex
* @var: the address of the variable being waited on
* @condition: condition to wait for
- * @mutex: the mutex which protects updates to the variable
+ * @lock: the mutex which protects updates to the variable
*
* Wait for a condition which can only be reliably tested while holding
* a mutex. The variables assessed in the condition will normal be
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 75cb0c7cfe57..14fb644e1701 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -18,16 +18,12 @@
* struct wmi_device - WMI device structure
* @dev: Device associated with this WMI device
* @setable: True for devices implementing the Set Control Method
- * @driver_override: Driver name to force a match; do not set directly,
- * because core frees it; use driver_set_override() to
- * set or clear it.
*
* This represents WMI devices discovered by the WMI driver core.
*/
struct wmi_device {
struct device dev;
bool setable;
- const char *driver_override;
};
/**
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a4749f56398f..ab6cb70ca1a5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -131,8 +131,9 @@ struct rcu_work {
enum wq_affn_scope {
WQ_AFFN_DFL, /* use system default */
WQ_AFFN_CPU, /* one pod per CPU */
- WQ_AFFN_SMT, /* one pod poer SMT */
+ WQ_AFFN_SMT, /* one pod per SMT */
WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_CACHE_SHARD, /* synthetic sub-LLC shards */
WQ_AFFN_NUMA, /* one pod per NUMA node */
WQ_AFFN_SYSTEM, /* one pod across the whole system */
@@ -440,6 +441,9 @@ enum wq_consts {
* system_long_wq is similar to system_percpu_wq but may host long running
* works. Queue flushing might take relatively long.
*
+ * system_dfl_long_wq is similar to system_dfl_wq but it may host long running
+ * works.
+ *
* system_dfl_wq is unbound workqueue. Workers are not bound to
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
@@ -468,6 +472,7 @@ extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
extern struct workqueue_struct *system_bh_wq;
extern struct workqueue_struct *system_bh_highpri_wq;
+extern struct workqueue_struct *system_dfl_long_wq;
void workqueue_softirq_action(bool highpri);
void workqueue_softirq_dead(unsigned int cpu);
@@ -512,6 +517,26 @@ __printf(1, 4) struct workqueue_struct *
alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...);
#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__))
+/**
+ * devm_alloc_workqueue - Resource-managed allocate a workqueue
+ * @dev: Device to allocate workqueue for
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @...: args for @fmt
+ *
+ * Resource managed workqueue, see alloc_workqueue() for details.
+ *
+ * The workqueue will be automatically destroyed on driver detach. Typically
+ * this should be used in drivers already relying on devm interafaces.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+__printf(2, 5) struct workqueue_struct *
+devm_alloc_workqueue(struct device *dev, const char *fmt, unsigned int flags,
+ int max_active, ...);
+
#ifdef CONFIG_LOCKDEP
/**
* alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
@@ -568,6 +593,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+#define devm_alloc_ordered_workqueue(dev, fmt, flags, args...) \
+ devm_alloc_workqueue(dev, fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name))
@@ -712,14 +739,14 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
}
/**
- * schedule_work - put work task in global workqueue
+ * schedule_work - put work task in per-CPU workqueue
* @work: job to be done
*
- * Returns %false if @work was already on the kernel-global workqueue and
+ * Returns %false if @work was already on the system per-CPU workqueue and
* %true otherwise.
*
- * This puts a job in the kernel-global workqueue if it was not already
- * queued and leaves it in the same position on the kernel-global
+ * This puts a job in the system per-CPU workqueue if it was not already
+ * queued and leaves it in the same position on the system per-CPU
* workqueue otherwise.
*
* Shares the same memory-ordering properties of queue_work(), cf. the
@@ -783,6 +810,8 @@ extern void __warn_flushing_systemwide_wq(void)
_wq == system_highpri_wq) || \
(__builtin_constant_p(_wq == system_long_wq) && \
_wq == system_long_wq) || \
+ (__builtin_constant_p(_wq == system_dfl_long_wq) && \
+ _wq == system_dfl_long_wq) || \
(__builtin_constant_p(_wq == system_dfl_wq) && \
_wq == system_dfl_wq) || \
(__builtin_constant_p(_wq == system_freezable_wq) && \
@@ -796,12 +825,12 @@ extern void __warn_flushing_systemwide_wq(void)
})
/**
- * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
+ * schedule_delayed_work_on - queue work in per-CPU workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done
* @delay: number of jiffies to wait
*
- * After waiting for a given time this puts a job in the kernel-global
+ * After waiting for a given time this puts a job in the system per-CPU
* workqueue on the specified CPU.
*/
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
@@ -811,11 +840,11 @@ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
}
/**
- * schedule_delayed_work - put work task in global workqueue after delay
+ * schedule_delayed_work - put work task in per-CPU workqueue after delay
* @dwork: job to be done
* @delay: number of jiffies to wait or 0 for immediate execution
*
- * After waiting for a given time this puts a job in the kernel-global
+ * After waiting for a given time this puts a job in the system per-CPU
* workqueue.
*/
static inline bool schedule_delayed_work(struct delayed_work *dwork,
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index e530112c4b3a..62552a2ce5b9 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -11,7 +11,7 @@
#include <linux/flex_proportions.h>
#include <linux/backing-dev-defs.h>
#include <linux/blk_types.h>
-#include <linux/pagevec.h>
+#include <linux/folio_batch.h>
struct bio;
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 85b1fff02fde..0c95ead5a297 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -181,7 +181,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
- __releases(ctx) __acquires_shared(ctx) __no_context_analysis
+ __must_hold(ctx)
{
#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
@@ -199,7 +199,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
- __releases_shared(ctx) __no_context_analysis
+ __releases(ctx) __no_context_analysis
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 296b5ee5c979..8b6601367eae 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/rhashtable-types.h>
#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
@@ -106,31 +107,65 @@ static inline const char *xattr_prefix(const struct xattr_handler *handler)
}
struct simple_xattrs {
- struct rb_root rb_root;
- rwlock_t lock;
+ struct rhashtable ht;
};
struct simple_xattr {
- struct rb_node rb_node;
+ struct rhash_head hash_node;
+ struct rcu_head rcu;
char *name;
size_t size;
char value[] __counted_by(size);
};
-void simple_xattrs_init(struct simple_xattrs *xattrs);
+#define SIMPLE_XATTR_MAX_NR 128
+#define SIMPLE_XATTR_MAX_SIZE (128 << 10)
+
+struct simple_xattr_limits {
+ atomic_t nr_xattrs; /* current user.* xattr count */
+ atomic_t xattr_size; /* current total user.* value bytes */
+};
+
+static inline void simple_xattr_limits_init(struct simple_xattr_limits *limits)
+{
+ atomic_set(&limits->nr_xattrs, 0);
+ atomic_set(&limits->xattr_size, 0);
+}
+
+int simple_xattrs_init(struct simple_xattrs *xattrs);
+struct simple_xattrs *simple_xattrs_alloc(void);
+struct simple_xattrs *simple_xattrs_lazy_alloc(struct simple_xattrs **xattrsp,
+ const void *value, int flags);
void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space);
size_t simple_xattr_space(const char *name, size_t size);
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
void simple_xattr_free(struct simple_xattr *xattr);
+void simple_xattr_free_rcu(struct simple_xattr *xattr);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
const char *name, const void *value,
size_t size, int flags);
+int simple_xattr_set_limited(struct simple_xattrs *xattrs,
+ struct simple_xattr_limits *limits,
+ const char *name, const void *value,
+ size_t size, int flags);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size);
-void simple_xattr_add(struct simple_xattrs *xattrs,
- struct simple_xattr *new_xattr);
+int simple_xattr_add(struct simple_xattrs *xattrs,
+ struct simple_xattr *new_xattr);
int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name);
+DEFINE_CLASS(simple_xattr,
+ struct simple_xattr *,
+ if (!IS_ERR_OR_NULL(_T)) simple_xattr_free(_T),
+ simple_xattr_alloc(value, size),
+ const void *value, size_t size)
+
+DEFINE_CLASS(simple_xattrs,
+ struct simple_xattrs *,
+ if (!IS_ERR_OR_NULL(_T)) { simple_xattrs_free(_T, NULL); kfree(_T); },
+ simple_xattrs_alloc(),
+ void)
+
#endif /* _LINUX_XATTR_H */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 35c7a0546f02..d37fffc5dc3c 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -81,7 +81,6 @@ struct lirc_fh {
/**
* struct rc_dev - represents a remote control device
* @dev: driver model's view of this device
- * @managed_alloc: devm_rc_allocate_device was used to create rc_dev
* @registered: set to true by rc_register_device(), false by
* rc_unregister_device
* @idle: used to keep track of RX state
@@ -156,7 +155,6 @@ struct lirc_fh {
*/
struct rc_dev {
struct device dev;
- bool managed_alloc;
bool registered;
bool idle;
bool encode_wakeup;
@@ -303,7 +301,7 @@ struct ir_raw_event {
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
-#define IR_MAX_DURATION MS_TO_US(500)
+#define IR_MAX_DURATION MS_TO_US(1000)
#define IR_DEFAULT_TIMEOUT MS_TO_US(125)
#define IR_MAX_TIMEOUT LIRC_VALUE_MASK
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index cd82e70ccbaa..d7abbd76a421 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -218,8 +218,9 @@ enum v4l2_fwnode_bus_type {
*
* Return: %0 on success or a negative error code on failure:
* %-ENOMEM on memory allocation failure
- * %-EINVAL on parsing failure, including @fwnode == NULL
+ * %-EINVAL on parsing failure
* %-ENXIO on mismatching bus types
+ * %-EPROBE_DEFER on NULL @fwnode
*/
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep);
@@ -276,8 +277,9 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
*
* Return: %0 on success or a negative error code on failure:
* %-ENOMEM on memory allocation failure
- * %-EINVAL on parsing failure, including @fwnode == NULL
+ * %-EINVAL on parsing failure
* %-ENXIO on mismatching bus types
+ * %-EPROBE_DEFER on NULL @fwnode
*/
int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index a37d9a847196..d256b7ec8f84 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1722,6 +1722,62 @@ int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
*/
int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable);
+/**
+ * __v4l2_subdev_get_frame_desc_passthrough - Helper to implement the
+ * subdev get_frame_desc operation in simple passthrough cases
+ * @sd: The subdevice
+ * @state: The locked subdevice active state
+ * @pad: The source pad index
+ * @fd: The mbus frame desc
+ *
+ * This helper implements the get_frame_desc operation for subdevices that pass
+ * streams through without modification.
+ *
+ * The helper iterates over the subdevice's sink pads, calls get_frame_desc on
+ * the remote subdevice connected to each sink pad, and collects the frame desc
+ * entries for streams that are routed to the given source pad according to the
+ * subdevice's routing table. Each entry is copied as-is from the upstream
+ * source, with the exception of the 'stream' field which is remapped to the
+ * source stream ID from the routing table.
+ *
+ * The frame desc type is taken from the first upstream source. If multiple
+ * sink pads are involved and the upstream sources report different frame desc
+ * types, -EPIPE is returned.
+ *
+ * The caller must hold the subdevice's active state lock. This variant is
+ * intended for drivers that need to perform additional work around the
+ * passthrough frame descriptor collection. Drivers that do not need any
+ * customization should use v4l2_subdev_get_frame_desc_passthrough() instead.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+int __v4l2_subdev_get_frame_desc_passthrough(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd);
+
+/**
+ * v4l2_subdev_get_frame_desc_passthrough() - Helper to implement the subdev
+ * get_frame_desc operation in simple passthrough cases
+ * @sd: The subdevice
+ * @pad: The source pad index
+ * @fd: The mbus frame desc
+ *
+ * This function locks the subdevice's active state, calls
+ * __v4l2_subdev_get_frame_desc_passthrough(), and unlocks the state.
+ *
+ * This function can be assigned directly as the .get_frame_desc callback in
+ * &v4l2_subdev_pad_ops for subdevices that pass streams through without
+ * modification. Drivers that need to perform additional work should use
+ * __v4l2_subdev_get_frame_desc_passthrough() in their custom
+ * .get_frame_desc implementation instead.
+ *
+ * Return: 0 on success, or a negative error code otherwise.
+ */
+int v4l2_subdev_get_frame_desc_passthrough(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd);
+
#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 533d8e75f7bb..4e40063adab4 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -179,6 +179,15 @@ struct vsock_transport {
/* Addressing. */
u32 (*get_local_cid)(void);
+ /* Check if this transport serves a specific remote CID.
+ * For H2G transports: return true if the CID belongs to a registered
+ * guest. If not implemented, all CIDs > VMADDR_CID_HOST go to H2G.
+ * For G2H transports: return true if the transport can reach arbitrary
+ * CIDs via the hypervisor (i.e. supports the fallback overlay). VMCI
+ * does not implement this as it only serves CIDs 0 and 2.
+ */
+ bool (*has_remote_cid)(struct vsock_sock *vsk, u32 remote_cid);
+
/* Read a single skb */
int (*read_skb)(struct vsock_sock *, skb_read_actor_t);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 89ad9470fa71..572b1c620c5d 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1468,8 +1468,12 @@ struct hci_rp_read_data_block_size {
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
-struct hci_std_codecs {
+struct hci_std_codecs_hdr {
__u8 num;
+} __packed;
+
+struct hci_std_codecs {
+ struct hci_std_codecs_hdr;
__u8 codec[];
} __packed;
@@ -1487,7 +1491,7 @@ struct hci_vnd_codecs {
struct hci_rp_read_local_supported_codecs {
__u8 status;
- struct hci_std_codecs std_codecs;
+ struct hci_std_codecs_hdr std_codecs;
struct hci_vnd_codecs vnd_codecs;
} __packed;
@@ -1504,8 +1508,12 @@ struct hci_std_codec_v2 {
__u8 transport;
} __packed;
-struct hci_std_codecs_v2 {
+struct hci_std_codecs_v2_hdr {
__u8 num;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ struct hci_std_codecs_v2_hdr;
struct hci_std_codec_v2 codec[];
} __packed;
@@ -1522,7 +1530,7 @@ struct hci_vnd_codecs_v2 {
struct hci_rp_read_local_supported_codecs_v2 {
__u8 status;
- struct hci_std_codecs_v2 std_codecs;
+ struct hci_std_codecs_v2_hdr std_codecs;
struct hci_vnd_codecs_v2 vendor_codecs;
} __packed;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 395c6e281c5f..edd1942dcd73 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -69,9 +69,6 @@
#define bond_first_slave_rcu(bond) \
netdev_lower_get_first_private_rcu(bond->dev)
-#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
-#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
-
/**
* bond_for_each_slave - iterate over all slaves
* @bond: the bond holding this list
@@ -91,22 +88,22 @@
NETIF_F_GSO_ESP)
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern atomic_t netpoll_block_tx;
+DECLARE_STATIC_KEY_FALSE(netpoll_block_tx);
static inline void block_netpoll_tx(void)
{
- atomic_inc(&netpoll_block_tx);
+ static_branch_inc(&netpoll_block_tx);
}
static inline void unblock_netpoll_tx(void)
{
- atomic_dec(&netpoll_block_tx);
+ static_branch_dec(&netpoll_block_tx);
}
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
- if (unlikely(netpoll_tx_running(dev)))
- return atomic_read(&netpoll_block_tx);
+ if (static_branch_unlikely(&netpoll_block_tx))
+ return netpoll_tx_running(dev);
return 0;
}
#else
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fc01de19c798..9d3639ff9c28 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -190,6 +190,8 @@ enum ieee80211_channel_flags {
* on this channel.
* @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
* @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
+ * @cac_start_time: timestamp (CLOCK_BOOTTIME, nanoseconds) when CAC was
+ * started on this channel. Zero when CAC is not in progress.
* @psd: power spectral density (in dBm)
*/
struct ieee80211_channel {
@@ -207,6 +209,7 @@ struct ieee80211_channel {
enum nl80211_dfs_state dfs_state;
unsigned long dfs_state_entered;
unsigned int dfs_cac_ms;
+ u64 cac_start_time;
s8 psd;
};
@@ -1828,6 +1831,7 @@ struct cfg80211_ttlm_params {
* @eml_cap: EML capabilities of this station
* @link_sta_params: link related params.
* @epp_peer: EPP peer indication
+ * @nmi_mac: MAC address of the NMI station of the NAN peer
*/
struct station_parameters {
struct net_device *vlan;
@@ -1855,6 +1859,7 @@ struct station_parameters {
u16 eml_cap;
struct link_station_parameters link_sta_params;
bool epp_peer;
+ const u8 *nmi_mac;
};
/**
@@ -1894,6 +1899,8 @@ struct station_del_parameters {
* entry that is operating, has been marked authorized by userspace)
* @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed)
* @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed)
+ * @CFG80211_STA_NAN_MGMT: NAN management interface station
+ * @CFG80211_STA_NAN_DATA: NAN data path station
*/
enum cfg80211_station_type {
CFG80211_STA_AP_CLIENT,
@@ -1905,6 +1912,8 @@ enum cfg80211_station_type {
CFG80211_STA_TDLS_PEER_ACTIVE,
CFG80211_STA_MESH_PEER_KERNEL,
CFG80211_STA_MESH_PEER_USER,
+ CFG80211_STA_NAN_MGMT,
+ CFG80211_STA_NAN_DATA,
};
/**
@@ -3978,6 +3987,77 @@ struct cfg80211_qos_map {
};
/**
+ * DOC: Neighbor Awareness Networking (NAN)
+ *
+ * NAN uses two interface types:
+ *
+ * - %NL80211_IFTYPE_NAN: a non-netdev interface. This has two roles: (1) holds
+ * the configuration of all NAN activities (DE parameters, synchronisation
+ * parameters, local schedule, etc.), and (2) uses as the NAN Management
+ * Interface (NMI), which is used for NAN management communication.
+ *
+ * - %NL80211_IFTYPE_NAN_DATA: The NAN Data Interface (NDI), used for data
+ * communication with NAN peers.
+ *
+ * An NDI interface can only be started (IFF_UP) if the NMI one is running and
+ * NAN is started. Before NAN is stopped, all associated NDI interfaces
+ * must be stopped first.
+ *
+ * The local schedule specifies which channels the device is available on and
+ * when. Must be cancelled before NAN is stopped.
+ *
+ * NAN Stations
+ * ~~~~~~~~~~~~
+ *
+ * There are two types of stations corresponding to the two interface types:
+ *
+ * - NMI station: Represents the NAN peer. Peer-specific data such as the peer's
+ * schedule and the HT, VHT and HE capabilities belongs to the NMI station.
+ * Also used for Tx/Rx of NAN management frames to/from the peer.
+ * Added on the %NL80211_IFTYPE_NAN interface.
+ *
+ * - NDI station: Used for Tx/Rx of data frames (and non-NAN management frames)
+ * for a specific NDP established with the NAN peer. Added on the
+ * %NL80211_IFTYPE_NAN_DATA interface.
+ *
+ * A peer may reuse its NMI address as the NDI address. In that case, two
+ * separate stations should be added even though they share the same MAC
+ * address.
+ *
+ * HT, VHT and HE capabilities should not changes after it was set. It is the
+ * driver's responsibility to check that.
+ *
+ * An NDI station can only be added if the corresponding NMI station has already
+ * been configured with HT (and possibly VHT and HE) capabilities. It is the
+ * driver's responsibility to check that.
+ *
+ * All NDI stations must be removed before corresponding NMI station is removed.
+ * Therefore, removing a NMI station implies that the associated NDI station(s)
+ * (if any) will be removed first.
+ *
+ * NAN Dependencies
+ * ~~~~~~~~~~~~~~~~
+ *
+ * The following diagram shows the dependencies between NAN components.
+ * An arrow from A to B means A must be started/added before B, and B must be
+ * stopped/removed before A:
+ *
+ * +-------------+
+ * | NMI iface |---(local schedule)
+ * +------+------+
+ * / \
+ * v v
+ * +-----------+ +-------------+
+ * | NDI iface | | NMI sta |---(peer schedule)
+ * +-----+-----+ +------+------+
+ * \ /
+ * v v
+ * +----------+
+ * | NDI sta |
+ * +----------+
+ */
+
+/**
* struct cfg80211_nan_band_config - NAN band specific configuration
*
* @chan: Pointer to the IEEE 802.11 channel structure. The channel to be used
@@ -4020,7 +4100,6 @@ struct cfg80211_nan_band_config {
* (i.e. BIT(NL80211_BAND_2GHZ)).
* @cluster_id: cluster ID used for NAN synchronization. This is a MAC address
* that can take a value from 50-6F-9A-01-00-00 to 50-6F-9A-01-FF-FF.
- * If NULL, the device will pick a random Cluster ID.
* @scan_period: period (in seconds) between NAN scans.
* @scan_dwell_time: dwell time (in milliseconds) for NAN scans.
* @discovery_beacon_interval: interval (in TUs) for discovery beacons.
@@ -4036,7 +4115,7 @@ struct cfg80211_nan_band_config {
struct cfg80211_nan_conf {
u8 master_pref;
u8 bands;
- const u8 *cluster_id;
+ u8 cluster_id[ETH_ALEN] __aligned(2);
u16 scan_period;
u16 scan_dwell_time;
u8 discovery_beacon_interval;
@@ -4048,6 +4127,102 @@ struct cfg80211_nan_conf {
u16 vendor_elems_len;
};
+#define CFG80211_NAN_SCHED_NUM_TIME_SLOTS 32
+
+/**
+ * struct cfg80211_nan_channel - NAN channel configuration
+ *
+ * This struct defines a NAN channel configuration
+ *
+ * @chandef: the channel definition
+ * @channel_entry: pointer to the Channel Entry blob as defined in Wi-Fi Aware
+ * (TM) 4.0 specification Table 100 (Channel Entry format for the NAN
+ * Availability attribute).
+ * @rx_nss: number of spatial streams supported on this channel
+ */
+struct cfg80211_nan_channel {
+ struct cfg80211_chan_def chandef;
+ const u8 *channel_entry;
+ u8 rx_nss;
+};
+
+/**
+ * struct cfg80211_nan_local_sched - NAN local schedule
+ *
+ * This struct defines NAN local schedule parameters
+ *
+ * @schedule: a mapping of time slots to chandef indexes in %nan_channels.
+ * An unscheduled slot will be set to %NL80211_NAN_SCHED_NOT_AVAIL_SLOT.
+ * @n_channels: number of channel definitions in %nan_channels.
+ * @nan_avail_blob: pointer to NAN Availability attribute blob.
+ * See %NL80211_ATTR_NAN_AVAIL_BLOB for more details.
+ * @nan_avail_blob_len: length of the @nan_avail_blob in bytes.
+ * @deferred: if true, the command containing this schedule configuration is a
+ * request from the device to perform an announced schedule update. This
+ * means that it needs to send the updated NAN availability to the peers,
+ * and do the actual switch on the right time (i.e. at the end of the slot
+ * after the slot in which the updated NAN Availability was sent).
+ * See %NL80211_ATTR_NAN_SCHED_DEFERRED for more details.
+ * If false, the schedule is applied immediately.
+ * @nan_channels: array of NAN channel definitions that can be scheduled.
+ */
+struct cfg80211_nan_local_sched {
+ u8 schedule[CFG80211_NAN_SCHED_NUM_TIME_SLOTS];
+ u8 n_channels;
+ const u8 *nan_avail_blob;
+ u16 nan_avail_blob_len;
+ bool deferred;
+ struct cfg80211_nan_channel nan_channels[] __counted_by(n_channels);
+};
+
+/**
+ * struct cfg80211_nan_peer_map - NAN peer schedule map
+ *
+ * This struct defines a single NAN peer schedule map
+ *
+ * @map_id: map ID of this schedule map
+ * @schedule: a mapping of time slots to chandef indexes in the schedule's
+ * @nan_channels. Each slot lasts 16TUs. An unscheduled slot will be
+ * set to %NL80211_NAN_SCHED_NOT_AVAIL_SLOT.
+ */
+struct cfg80211_nan_peer_map {
+ u8 map_id;
+ u8 schedule[CFG80211_NAN_SCHED_NUM_TIME_SLOTS];
+};
+
+#define CFG80211_NAN_MAX_PEER_MAPS 2
+#define CFG80211_NAN_INVALID_MAP_ID 0xff
+
+/**
+ * struct cfg80211_nan_peer_sched - NAN peer schedule
+ *
+ * This struct defines NAN peer schedule parameters for a peer.
+ *
+ * @peer_addr: MAC address of the peer (NMI address)
+ * @seq_id: sequence ID of the peer schedule.
+ * @committed_dw: committed DW as published by the peer.
+ * See %NL80211_ATTR_NAN_COMMITTED_DW
+ * @max_chan_switch: maximum channel switch time in microseconds as published
+ * by the peer. See %NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME.
+ * @init_ulw: initial ULWs as published by the peer.
+ * @ulw_size: number of bytes in @init_ulw.
+ * @n_channels: number of channel definitions in @nan_channels.
+ * @nan_channels: array of NAN channel definitions for this schedule.
+ * @maps: array of peer schedule maps. Unused entries have
+ * map_id = %CFG80211_NAN_INVALID_MAP_ID.
+ */
+struct cfg80211_nan_peer_sched {
+ const u8 *peer_addr;
+ u8 seq_id;
+ u16 committed_dw;
+ u16 max_chan_switch;
+ const u8 *init_ulw;
+ u16 ulw_size;
+ u8 n_channels;
+ struct cfg80211_nan_channel *nan_channels;
+ struct cfg80211_nan_peer_map maps[CFG80211_NAN_MAX_PEER_MAPS];
+};
+
/**
* enum cfg80211_nan_conf_changes - indicates changed fields in NAN
* configuration
@@ -4828,6 +5003,19 @@ struct mgmt_frame_regs {
* @nan_change_conf: changes NAN configuration. The changed parameters must
* be specified in @changes (using &enum cfg80211_nan_conf_changes);
* All other parameters must be ignored.
+ * @nan_set_local_sched: configure the local schedule for NAN. The schedule
+ * consists of an array of %cfg80211_nan_channel and the schedule itself,
+ * in which each entry maps each time slot to the channel on which the
+ * radio should operate on. If the chandef of a NAN channel is not
+ * changed, the channel entry must also remain unchanged. It is the
+ * driver's responsibility to verify this.
+ * @nan_set_peer_sched: configure the peer schedule for NAN. The schedule
+ * consists of an array of %cfg80211_nan_channel and the schedule itself,
+ * in which each entry maps each time slot to a channel on which the
+ * radio should operate on. In addition, it contains more peer's schedule
+ * information such as committed DW, etc. When updating an existing peer
+ * schedule, the full new schedule is provided - partial updates are not
+ * supported, and the new schedule completely replaces the previous one.
*
* @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
*
@@ -4922,24 +5110,24 @@ struct cfg80211_ops {
struct wireless_dev *wdev,
unsigned int link_id);
- int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
+ int (*add_key)(struct wiphy *wiphy, struct wireless_dev *wdev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, struct key_params *params);
- int (*get_key)(struct wiphy *wiphy, struct net_device *netdev,
+ int (*get_key)(struct wiphy *wiphy, struct wireless_dev *wdev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*));
- int (*del_key)(struct wiphy *wiphy, struct net_device *netdev,
+ int (*del_key)(struct wiphy *wiphy, struct wireless_dev *wdev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr);
int (*set_default_key)(struct wiphy *wiphy,
struct net_device *netdev, int link_id,
u8 key_index, bool unicast, bool multicast);
int (*set_default_mgmt_key)(struct wiphy *wiphy,
- struct net_device *netdev, int link_id,
+ struct wireless_dev *wdev, int link_id,
u8 key_index);
int (*set_default_beacon_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct wireless_dev *wdev,
int link_id,
u8 key_index);
@@ -4951,17 +5139,17 @@ struct cfg80211_ops {
unsigned int link_id);
- int (*add_station)(struct wiphy *wiphy, struct net_device *dev,
+ int (*add_station)(struct wiphy *wiphy, struct wireless_dev *wdev,
const u8 *mac,
struct station_parameters *params);
- int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
+ int (*del_station)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct station_del_parameters *params);
- int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
+ int (*change_station)(struct wiphy *wiphy, struct wireless_dev *wdev,
const u8 *mac,
struct station_parameters *params);
- int (*get_station)(struct wiphy *wiphy, struct net_device *dev,
+ int (*get_station)(struct wiphy *wiphy, struct wireless_dev *wdev,
const u8 *mac, struct station_info *sinfo);
- int (*dump_station)(struct wiphy *wiphy, struct net_device *dev,
+ int (*dump_station)(struct wiphy *wiphy, struct wireless_dev *wdev,
int idx, u8 *mac, struct station_info *sinfo);
int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev,
@@ -5205,7 +5393,12 @@ struct cfg80211_ops {
struct wireless_dev *wdev,
struct cfg80211_nan_conf *conf,
u32 changes);
-
+ int (*nan_set_local_sched)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_local_sched *sched);
+ int (*nan_set_peer_sched)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_peer_sched *sched);
int (*set_multicast_to_unicast)(struct wiphy *wiphy,
struct net_device *dev,
const bool enabled);
@@ -5834,6 +6027,12 @@ enum wiphy_nan_flags {
* @max_channel_switch_time: maximum channel switch time in milliseconds.
* @dev_capabilities: NAN device capabilities as defined in Wi-Fi Aware (TM)
* specification Table 79 (Capabilities field).
+ * @phy: Band-agnostic capabilities for NAN data interfaces. Since NAN
+ * operates on multiple channels simultaneously, these capabilities apply
+ * across all bands. Valid only if NL80211_IFTYPE_NAN_DATA is supported.
+ * @phy.ht: HT capabilities (mandatory for NAN data)
+ * @phy.vht: VHT capabilities (optional)
+ * @phy.he: HE capabilities (optional)
*/
struct wiphy_nan_capa {
u32 flags;
@@ -5841,6 +6040,11 @@ struct wiphy_nan_capa {
u8 n_antennas;
u16 max_channel_switch_time;
u8 dev_capabilities;
+ struct {
+ struct ieee80211_sta_ht_cap ht;
+ struct ieee80211_sta_vht_cap vht;
+ struct ieee80211_sta_he_cap he;
+ } phy;
};
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
@@ -6734,8 +6938,8 @@ enum ieee80211_ap_reg_power {
* the P2P Device.
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
- * @ap_unexpected_nlportid: (private) netlink port ID of application
- * registered for unexpected class 3 frames (AP mode)
+ * @unexpected_nlportid: (private) netlink port ID of application
+ * registered for unexpected frames (AP mode or NAN_DATA mode)
* @conn: (private) cfg80211 software SME connection state machine data
* @connect_keys: (private) keys to set after connection is established
* @conn_bss_type: connecting/connected BSS type
@@ -6797,7 +7001,7 @@ struct wireless_dev {
bool ps;
int ps_timeout;
- u32 ap_unexpected_nlportid;
+ u32 unexpected_nlportid;
u32 owner_nlportid;
bool nl_owner_dead;
@@ -6857,6 +7061,9 @@ struct wireless_dev {
} ocb;
struct {
u8 cluster_id[ETH_ALEN] __aligned(2);
+ u8 n_channels;
+ struct cfg80211_chan_def *chandefs;
+ bool sched_update_pending;
} nan;
} u;
@@ -8962,35 +9169,35 @@ static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
/**
* cfg80211_new_sta - notify userspace about station
*
- * @dev: the netdev
+ * @wdev: the wireless device
* @mac_addr: the station's address
* @sinfo: the station information
* @gfp: allocation flags
*/
-void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
+void cfg80211_new_sta(struct wireless_dev *wdev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
* cfg80211_del_sta_sinfo - notify userspace about deletion of a station
- * @dev: the netdev
+ * @wdev: the wireless device
* @mac_addr: the station's address. For MLD station, MLD address is used.
* @sinfo: the station information/statistics
* @gfp: allocation flags
*/
-void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
+void cfg80211_del_sta_sinfo(struct wireless_dev *wdev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
* cfg80211_del_sta - notify userspace about deletion of a station
*
- * @dev: the netdev
+ * @wdev: the wireless device
* @mac_addr: the station's address. For MLD station, MLD address is used.
* @gfp: allocation flags
*/
-static inline void cfg80211_del_sta(struct net_device *dev,
+static inline void cfg80211_del_sta(struct wireless_dev *wdev,
const u8 *mac_addr, gfp_t gfp)
{
- cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp);
+ cfg80211_del_sta_sinfo(wdev, mac_addr, NULL, gfp);
}
/**
@@ -9365,9 +9572,10 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
* @addr: the transmitter address
* @gfp: context flags
*
- * This function is used in AP mode (only!) to inform userspace that
- * a spurious class 3 frame was received, to be able to deauth the
- * sender.
+ * This function is used in AP mode to inform userspace that a spurious
+ * class 3 frame was received, to be able to deauth the sender.
+ * It is also used in NAN_DATA mode to report frames from unknown peers
+ * (A2 not assigned to any active NDP), per Wi-Fi Aware (TM) 4.0 specification 6.2.5.
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
@@ -10014,6 +10222,18 @@ void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
enum nl80211_nan_func_term_reason reason,
u64 cookie, gfp_t gfp);
+/**
+ * cfg80211_nan_sched_update_done - notify deferred schedule update completion
+ * @wdev: the wireless device reporting the event
+ * @success: whether or not the schedule update was successful
+ * @gfp: allocation flags
+ *
+ * This function notifies user space that a deferred local NAN schedule update
+ * (requested with %NL80211_ATTR_NAN_SCHED_DEFERRED) has been completed.
+ */
+void cfg80211_nan_sched_update_done(struct wireless_dev *wdev, bool success,
+ gfp_t gfp);
+
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
@@ -10354,6 +10574,39 @@ void cfg80211_nan_cluster_joined(struct wireless_dev *wdev,
const u8 *cluster_id, bool new_cluster,
gfp_t gfp);
+/**
+ * cfg80211_nan_ulw_update - Notify user space about ULW update
+ * @wdev: Pointer to the wireless device structure
+ * @ulw: Pointer to the ULW blob data
+ * @ulw_len: Length of the ULW blob in bytes
+ * @gfp: Memory allocation flags
+ *
+ * This function is used by drivers to notify user space when the device's
+ * ULW (Unaligned Schedule) blob has been updated. User space can use this
+ * blob to attach to frames sent to peers.
+ */
+void cfg80211_nan_ulw_update(struct wireless_dev *wdev,
+ const u8 *ulw, size_t ulw_len, gfp_t gfp);
+
+/**
+ * cfg80211_nan_channel_evac - Notify user space about NAN channel evacuation
+ * @wdev: Pointer to the wireless device structure
+ * @chandef: Pointer to the channel definition of the NAN channel that was
+ * evacuated
+ * @gfp: Memory allocation flags
+ *
+ * This function is used by drivers to notify user space when a NAN
+ * channel has been evacuated (i.e. ULWed) due to channel resource conflicts
+ * with other interfaces.
+ * This can happen when another interface sharing the channel resource with NAN
+ * needs to move to a different channel (e.g. due to channel switch or link
+ * switch). User space may reconfigure the local schedule to exclude the
+ * evacuated channel.
+ */
+void cfg80211_nan_channel_evac(struct wireless_dev *wdev,
+ const struct cfg80211_chan_def *chandef,
+ gfp_t gfp);
+
#ifdef CONFIG_CFG80211_DEBUGFS
/**
* wiphy_locked_debugfs_read - do a locked read in debugfs
@@ -10472,4 +10725,27 @@ cfg80211_s1g_get_primary_sibling(struct wiphy *wiphy,
return ieee80211_get_channel_khz(wiphy, sibling_1mhz_khz);
}
+
+/**
+ * cfg80211_incumbent_signal_notify - Notify userspace of incumbent signal detection
+ * @wiphy: the wiphy to use
+ * @chandef: channel definition in which the interference was detected
+ * @signal_interference_bitmap: bitmap indicating interference across 20 MHz segments
+ * @gfp: allocation context for message creation and multicast; pass GFP_ATOMIC
+ * if called from atomic context (e.g. firmware event handler), otherwise
+ * GFP_KERNEL
+ *
+ * Use this function to notify userspace when an incumbent signal is detected on
+ * the operating channel in the 6 GHz band. The notification includes the
+ * current channel definition and a bitmap representing interference across
+ * the operating bandwidth. Each bit in the bitmap corresponds to a 20 MHz
+ * segment, with the lowest bit representing the lowest frequency segment.
+ * Punctured sub-channels are included in the bitmap structure but are always
+ * set to zero since interference detection is not performed on them.
+ */
+void cfg80211_incumbent_signal_notify(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 signal_interference_bitmap,
+ gfp_t gfp);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index b2c359c6dd1b..2c1f0ec309e9 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -120,10 +120,10 @@ static bool codel_should_drop(const struct sk_buff *skb,
}
skb_len = skb_len_func(skb);
- vars->ldelay = now - skb_time_func(skb);
+ WRITE_ONCE(vars->ldelay, now - skb_time_func(skb));
if (unlikely(skb_len > stats->maxpacket))
- stats->maxpacket = skb_len;
+ WRITE_ONCE(stats->maxpacket, skb_len);
if (codel_time_before(vars->ldelay, params->target) ||
*backlog <= params->mtu) {
@@ -159,7 +159,7 @@ static struct sk_buff *codel_dequeue(void *ctx,
if (!skb) {
vars->first_above_time = 0;
- vars->dropping = false;
+ WRITE_ONCE(vars->dropping, false);
return skb;
}
now = codel_get_time();
@@ -168,7 +168,7 @@ static struct sk_buff *codel_dequeue(void *ctx,
if (vars->dropping) {
if (!drop) {
/* sojourn time below target - leave dropping state */
- vars->dropping = false;
+ WRITE_ONCE(vars->dropping, false);
} else if (codel_time_after_eq(now, vars->drop_next)) {
/* It's time for the next drop. Drop the current
* packet and dequeue the next. The dequeue might
@@ -180,16 +180,18 @@ static struct sk_buff *codel_dequeue(void *ctx,
*/
while (vars->dropping &&
codel_time_after_eq(now, vars->drop_next)) {
- vars->count++; /* dont care of possible wrap
- * since there is no more divide
- */
+ /* dont care of possible wrap
+ * since there is no more divide.
+ */
+ WRITE_ONCE(vars->count, vars->count + 1);
codel_Newton_step(vars);
if (params->ecn && INET_ECN_set_ce(skb)) {
- stats->ecn_mark++;
- vars->drop_next =
+ WRITE_ONCE(stats->ecn_mark,
+ stats->ecn_mark + 1);
+ WRITE_ONCE(vars->drop_next,
codel_control_law(vars->drop_next,
params->interval,
- vars->rec_inv_sqrt);
+ vars->rec_inv_sqrt));
goto end;
}
stats->drop_len += skb_len_func(skb);
@@ -202,13 +204,13 @@ static struct sk_buff *codel_dequeue(void *ctx,
skb_time_func,
backlog, now)) {
/* leave dropping state */
- vars->dropping = false;
+ WRITE_ONCE(vars->dropping, false);
} else {
/* and schedule the next drop */
- vars->drop_next =
+ WRITE_ONCE(vars->drop_next,
codel_control_law(vars->drop_next,
params->interval,
- vars->rec_inv_sqrt);
+ vars->rec_inv_sqrt));
}
}
}
@@ -216,7 +218,7 @@ static struct sk_buff *codel_dequeue(void *ctx,
u32 delta;
if (params->ecn && INET_ECN_set_ce(skb)) {
- stats->ecn_mark++;
+ WRITE_ONCE(stats->ecn_mark, stats->ecn_mark + 1);
} else {
stats->drop_len += skb_len_func(skb);
drop_func(skb, ctx);
@@ -227,7 +229,7 @@ static struct sk_buff *codel_dequeue(void *ctx,
stats, skb_len_func,
skb_time_func, backlog, now);
}
- vars->dropping = true;
+ WRITE_ONCE(vars->dropping, true);
/* if min went above target close to when we last went below it
* assume that the drop rate that controlled the queue on the
* last cycle is a good starting point to control it now.
@@ -236,19 +238,20 @@ static struct sk_buff *codel_dequeue(void *ctx,
if (delta > 1 &&
codel_time_before(now - vars->drop_next,
16 * params->interval)) {
- vars->count = delta;
+ WRITE_ONCE(vars->count, delta);
/* we dont care if rec_inv_sqrt approximation
* is not very precise :
* Next Newton steps will correct it quadratically.
*/
codel_Newton_step(vars);
} else {
- vars->count = 1;
+ WRITE_ONCE(vars->count, 1);
vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
}
- vars->lastcount = vars->count;
- vars->drop_next = codel_control_law(now, params->interval,
- vars->rec_inv_sqrt);
+ WRITE_ONCE(vars->lastcount, vars->count);
+ WRITE_ONCE(vars->drop_next,
+ codel_control_law(now, params->interval,
+ vars->rec_inv_sqrt));
}
end:
if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
@@ -262,7 +265,7 @@ end:
params->ce_threshold_selector));
}
if (set_ce && INET_ECN_set_ce(skb))
- stats->ce_mark++;
+ WRITE_ONCE(stats->ce_mark, stats->ce_mark + 1);
}
return skb;
}
diff --git a/include/net/devlink.h b/include/net/devlink.h
index cb839e0435a1..bcd31de1f890 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -129,6 +129,7 @@ struct devlink_rate {
struct devlink_port {
struct list_head list;
struct list_head region_list;
+ struct list_head resource_list;
struct devlink *devlink;
const struct devlink_port_ops *ops;
unsigned int index;
@@ -1611,6 +1612,9 @@ struct devlink_ops {
void *devlink_priv(struct devlink *devlink);
struct devlink *priv_to_devlink(void *priv);
struct device *devlink_to_dev(const struct devlink *devlink);
+const char *devlink_bus_name(const struct devlink *devlink);
+const char *devlink_dev_name(const struct devlink *devlink);
+const char *devlink_dev_driver_name(const struct devlink *devlink);
/* Devlink instance explicit locking */
void devl_lock(struct devlink *devlink);
@@ -1644,6 +1648,13 @@ void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
+struct devlink *devlink_shd_get(const char *id,
+ const struct devlink_ops *ops,
+ size_t priv_size,
+ const struct device_driver *driver);
+void devlink_shd_put(struct devlink *devlink);
+void *devlink_shd_get_priv(struct devlink *devlink);
+
/**
* struct devlink_port_ops - Port operations
* @port_split: Callback used to split the port into multiple ones.
@@ -1875,12 +1886,19 @@ int devl_resource_register(struct devlink *devlink,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params);
+ const struct devlink_resource_size_params *params);
void devl_resources_unregister(struct devlink *devlink);
void devlink_resources_unregister(struct devlink *devlink);
int devl_resource_size_get(struct devlink *devlink,
u64 resource_id,
u64 *p_resource_size);
+int
+devl_port_resource_register(struct devlink_port *devlink_port,
+ const char *resource_name,
+ u64 resource_size, u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *params);
+void devl_port_resources_unregister(struct devlink_port *devlink_port);
int devl_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units);
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index a7b7abd66e21..e0ca3904ff8e 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -68,18 +68,15 @@
FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
FN(QDISC_BURST_DROP) \
- FN(QDISC_OVERLIMIT) \
- FN(QDISC_CONGESTED) \
- FN(CAKE_FLOOD) \
- FN(FQ_BAND_LIMIT) \
- FN(FQ_HORIZON_LIMIT) \
- FN(FQ_FLOW_LIMIT) \
FN(CPU_BACKLOG) \
+ FN(MACVLAN_BROADCAST_BACKLOG) \
+ FN(IPVLAN_MULTICAST_BACKLOG) \
FN(XDP) \
FN(TC_INGRESS) \
FN(UNHANDLED_PROTO) \
FN(SKB_CSUM) \
FN(SKB_GSO_SEG) \
+ FN(SKB_BAD_GSO) \
FN(SKB_UCOPY_FAULT) \
FN(DEV_HDR) \
FN(DEV_READY) \
@@ -127,9 +124,9 @@
FN(CANFD_RX_INVALID_FRAME) \
FN(CANXL_RX_INVALID_FRAME) \
FN(PFMEMALLOC) \
- FN(DUALPI2_STEP_DROP) \
FN(PSP_INPUT) \
FN(PSP_OUTPUT) \
+ FN(RECURSION_LIMIT) \
FNe(MAX)
/**
@@ -371,8 +368,10 @@ enum skb_drop_reason {
/** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
SKB_DROP_REASON_SECURITY_HOOK,
/**
- * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
- * failed to enqueue to current qdisc)
+ * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc during enqueue or
+ * dequeue. More specific drop reasons are available via the
+ * qdisc:qdisc_drop tracepoint, which also provides qdisc handle
+ * and name for identifying the source.
*/
SKB_DROP_REASON_QDISC_DROP,
/**
@@ -381,41 +380,21 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_QDISC_BURST_DROP,
/**
- * @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc
- * instance exceeds its total buffer size limit.
- */
- SKB_DROP_REASON_QDISC_OVERLIMIT,
- /**
- * @SKB_DROP_REASON_QDISC_CONGESTED: dropped by a qdisc AQM algorithm
- * due to congestion.
- */
- SKB_DROP_REASON_QDISC_CONGESTED,
- /**
- * @SKB_DROP_REASON_CAKE_FLOOD: dropped by the flood protection part of
- * CAKE qdisc AQM algorithm (BLUE).
- */
- SKB_DROP_REASON_CAKE_FLOOD,
- /**
- * @SKB_DROP_REASON_FQ_BAND_LIMIT: dropped by fq qdisc when per band
- * limit is reached.
- */
- SKB_DROP_REASON_FQ_BAND_LIMIT,
- /**
- * @SKB_DROP_REASON_FQ_HORIZON_LIMIT: dropped by fq qdisc when packet
- * timestamp is too far in the future.
- */
- SKB_DROP_REASON_FQ_HORIZON_LIMIT,
- /**
- * @SKB_DROP_REASON_FQ_FLOW_LIMIT: dropped by fq qdisc when a flow
- * exceeds its limits.
- */
- SKB_DROP_REASON_FQ_FLOW_LIMIT,
- /**
* @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU
* backlog queue. This can be caused by backlog queue full (see
* netdev_max_backlog in net.rst) or RPS flow limit
*/
SKB_DROP_REASON_CPU_BACKLOG,
+ /**
+ * @SKB_DROP_REASON_MACVLAN_BROADCAST_BACKLOG: failed to enqueue the skb
+ * to macvlan broadcast queue.
+ */
+ SKB_DROP_REASON_MACVLAN_BROADCAST_BACKLOG,
+ /**
+ * @SKB_DROP_REASON_IPVLAN_MULTICAST_BACKLOG: failed to enqueue the skb
+ * to ipvlan multicast queue.
+ */
+ SKB_DROP_REASON_IPVLAN_MULTICAST_BACKLOG,
/** @SKB_DROP_REASON_XDP: dropped by XDP in input path */
SKB_DROP_REASON_XDP,
/** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */
@@ -426,6 +405,8 @@ enum skb_drop_reason {
SKB_DROP_REASON_SKB_CSUM,
/** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */
SKB_DROP_REASON_SKB_GSO_SEG,
+ /** @SKB_DROP_REASON_SKB_BAD_GSO: malicious gso packet. */
+ SKB_DROP_REASON_SKB_BAD_GSO,
/**
* @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space,
* e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx()
@@ -613,15 +594,12 @@ enum skb_drop_reason {
* reached a path or socket not eligible for use of memory reserves
*/
SKB_DROP_REASON_PFMEMALLOC,
- /**
- * @SKB_DROP_REASON_DUALPI2_STEP_DROP: dropped by the step drop
- * threshold of DualPI2 qdisc.
- */
- SKB_DROP_REASON_DUALPI2_STEP_DROP,
/** @SKB_DROP_REASON_PSP_INPUT: PSP input checks failed */
SKB_DROP_REASON_PSP_INPUT,
/** @SKB_DROP_REASON_PSP_OUTPUT: PSP output checks failed */
SKB_DROP_REASON_PSP_OUTPUT,
+ /** @SKB_DROP_REASON_RECURSION_LIMIT: Dead loop on virtual device. */
+ SKB_DROP_REASON_RECURSION_LIMIT,
/**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
diff --git a/include/net/dropreason-qdisc.h b/include/net/dropreason-qdisc.h
new file mode 100644
index 000000000000..fb151cd31751
--- /dev/null
+++ b/include/net/dropreason-qdisc.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_DROPREASON_QDISC_H
+#define _LINUX_DROPREASON_QDISC_H
+#include <net/dropreason.h>
+
+#define DEFINE_QDISC_DROP_REASON(FN, FNe) \
+ FN(UNSPEC) \
+ FN(GENERIC) \
+ FN(OVERLIMIT) \
+ FN(CONGESTED) \
+ FN(MAXFLOWS) \
+ FN(FLOOD_PROTECTION) \
+ FN(BAND_LIMIT) \
+ FN(HORIZON_LIMIT) \
+ FN(FLOW_LIMIT) \
+ FN(L4S_STEP_NON_ECN) \
+ FNe(MAX)
+
+#undef FN
+#undef FNe
+#define FN(reason) QDISC_DROP_##reason,
+#define FNe(reason) QDISC_DROP_##reason
+
+/**
+ * enum qdisc_drop_reason - reason why a qdisc dropped a packet
+ *
+ * Qdisc-specific drop reasons for packet drops that occur within the
+ * traffic control (TC) queueing discipline layer. These reasons provide
+ * detailed diagnostics about why packets were dropped by various qdisc
+ * algorithms, enabling fine-grained monitoring and troubleshooting of
+ * queue behavior.
+ */
+enum qdisc_drop_reason {
+ /**
+ * @QDISC_DROP_UNSPEC: unspecified/invalid qdisc drop reason.
+ * Value 0 serves as analogous to SKB_NOT_DROPPED_YET for enum skb_drop_reason.
+ * Used for catching zero-initialized drop_reason fields.
+ */
+ QDISC_DROP_UNSPEC = 0,
+ /**
+ * @__QDISC_DROP_REASON: subsystem base value for qdisc drop reasons
+ */
+ __QDISC_DROP_REASON = SKB_DROP_REASON_SUBSYS_QDISC <<
+ SKB_DROP_REASON_SUBSYS_SHIFT,
+ /**
+ * @QDISC_DROP_GENERIC: generic/default qdisc drop, used when no
+ * more specific reason applies
+ */
+ QDISC_DROP_GENERIC,
+ /**
+ * @QDISC_DROP_OVERLIMIT: packet dropped because the qdisc queue
+ * length exceeded its configured limit (sch->limit). This typically
+ * indicates the queue is full and cannot accept more packets.
+ */
+ QDISC_DROP_OVERLIMIT,
+ /**
+ * @QDISC_DROP_CONGESTED: packet dropped due to active congestion
+ * control algorithms (e.g., CoDel, PIE, RED) detecting network
+ * congestion. The qdisc proactively dropped the packet to signal
+ * congestion to the sender and prevent bufferbloat.
+ */
+ QDISC_DROP_CONGESTED,
+ /**
+ * @QDISC_DROP_MAXFLOWS: packet dropped because the qdisc's flow
+ * tracking table is full and no free slots are available to allocate
+ * for a new flow. This indicates flow table exhaustion in flow-based
+ * qdiscs that maintain per-flow state (e.g., SFQ).
+ */
+ QDISC_DROP_MAXFLOWS,
+ /**
+ * @QDISC_DROP_FLOOD_PROTECTION: packet dropped by flood protection
+ * mechanism detecting unresponsive flows (potential DoS/flood).
+ * Used by qdiscs implementing probabilistic drop algorithms like
+ * BLUE (e.g., CAKE's Cobalt AQM).
+ */
+ QDISC_DROP_FLOOD_PROTECTION,
+ /**
+ * @QDISC_DROP_BAND_LIMIT: packet dropped because the priority band's
+ * limit was reached. Used by qdiscs with priority bands that have
+ * per-band packet limits (e.g., FQ).
+ */
+ QDISC_DROP_BAND_LIMIT,
+ /**
+ * @QDISC_DROP_HORIZON_LIMIT: packet dropped because its timestamp
+ * is too far in the future (beyond the configured horizon).
+ * Used by qdiscs with time-based scheduling (e.g., FQ).
+ */
+ QDISC_DROP_HORIZON_LIMIT,
+ /**
+ * @QDISC_DROP_FLOW_LIMIT: packet dropped because an individual flow
+ * exceeded its per-flow packet/depth limit. Used by FQ and SFQ qdiscs
+ * to enforce per-flow fairness and prevent a single flow from
+ * monopolizing queue resources.
+ */
+ QDISC_DROP_FLOW_LIMIT,
+ /**
+ * @QDISC_DROP_L4S_STEP_NON_ECN: DualPI2 qdisc dropped a non-ECN-capable
+ * packet because the L4S queue delay exceeded the step threshold.
+ * Since the packet cannot be ECN-marked, it must be dropped to signal
+ * congestion. See RFC 9332 for the DualQ Coupled AQM step mechanism.
+ */
+ QDISC_DROP_L4S_STEP_NON_ECN,
+ /**
+ * @QDISC_DROP_MAX: the maximum of qdisc drop reasons, which
+ * shouldn't be used as a real 'reason' - only for tracing code gen
+ */
+ QDISC_DROP_MAX,
+};
+
+#undef FN
+#undef FNe
+
+#endif
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 7d3b1a2a6fec..1df60645fb27 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -23,6 +23,12 @@ enum skb_drop_reason_subsys {
*/
SKB_DROP_REASON_SUBSYS_OPENVSWITCH,
+ /**
+ * @SKB_DROP_REASON_SUBSYS_QDISC: TC qdisc drop reasons,
+ * see include/net/dropreason-qdisc.h
+ */
+ SKB_DROP_REASON_SUBSYS_QDISC,
+
/** @SKB_DROP_REASON_SUBSYS_NUM: number of subsystems defined */
SKB_DROP_REASON_SUBSYS_NUM
};
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6c17446f3dcc..8b6d34e8a6f0 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -831,6 +831,22 @@ dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
return false;
}
+#define dsa_switch_for_each_bridge_member(_dp, _ds, _bdev) \
+ dsa_switch_for_each_user_port(_dp, _ds) \
+ if (dsa_port_offloads_bridge_dev(_dp, _bdev))
+
+static inline u32
+dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev)
+{
+ struct dsa_port *dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_bridge_member(dp, ds, bdev)
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
static inline bool dsa_port_tree_same(const struct dsa_port *a,
const struct dsa_port *b)
{
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index 6632b1aa7584..62534d1f3c70 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -6,6 +6,9 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <net/protocol.h>
+#ifdef CONFIG_RPS
+#include <net/rps-types.h>
+#endif
struct skb_defer_node {
struct llist_head defer_list;
@@ -33,7 +36,7 @@ struct net_hotdata {
struct kmem_cache *skbuff_fclone_cache;
struct kmem_cache *skb_small_head_cache;
#ifdef CONFIG_RPS
- struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+ rps_tag_ptr rps_sock_flow_table;
u32 rps_cpu_mask;
#endif
struct skb_defer_node __percpu *skb_defer_nodes;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index ece8dabd209a..b814e1acc512 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -18,6 +18,9 @@ struct sk_buff;
struct sock;
struct sockaddr;
+struct dst_entry *inet6_csk_route_socket(struct sock *sk,
+ struct flowi6 *fl6);
+
struct dst_entry *inet6_csk_route_req(const struct sock *sk,
struct dst_entry *dst,
struct flowi6 *fl6,
@@ -25,5 +28,4 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
-struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index c16de5b7963f..2cc5d416bbb5 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -24,6 +24,8 @@
struct inet_hashinfo;
+void inet6_init_ehash_secret(void);
+
static inline unsigned int __inet6_ehashfn(const u32 lhash,
const u16 lport,
const u32 fhash,
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 5dd2bf24449e..3d747896be30 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -59,8 +59,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol,
struct net *net);
-int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
- int *addr_len);
+int inet_recv_error(struct sock *sk, struct msghdr *msg, int len);
struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
int inet_gro_complete(struct sk_buff *skb, int nhoff);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5cb3056d6ddc..433c2df23076 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -34,7 +34,7 @@ struct tcp_congestion_ops;
*/
struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
- void (*send_check)(struct sock *sk, struct sk_buff *skb);
+ u16 net_header_len;
int (*rebuild_header)(struct sock *sk);
void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
@@ -45,7 +45,6 @@ struct inet_connection_sock_af_ops {
bool *own_req,
void (*opt_child_init)(struct sock *newsk,
const struct sock *sk));
- u16 net_header_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 6d936e9f2fd3..6e2fe186d0dc 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -300,7 +300,6 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
-int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
diff --git a/include/net/ip.h b/include/net/ip.h
index 7f9abd457e01..7f2fe1a8401b 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -692,14 +692,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
-#if IS_MODULE(CONFIG_IPV6)
-#define EXPORT_IPV6_MOD(X) EXPORT_SYMBOL(X)
-#define EXPORT_IPV6_MOD_GPL(X) EXPORT_SYMBOL_GPL(X)
-#else
-#define EXPORT_IPV6_MOD(X)
-#define EXPORT_IPV6_MOD_GPL(X)
-#endif
-
static inline unsigned int ipv4_addr_hash(__be32 ip)
{
return (__force unsigned int) ip;
@@ -812,7 +804,7 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *));
-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index c8a96b888277..6677b3cc3972 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -82,6 +82,4 @@ static inline __sum16 udp_v6_check(int len,
void udp6_set_csum(bool nocheck, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr, int len);
-
-int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9f8b6814a96a..9cd27e1b9b69 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -486,11 +486,30 @@ void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
rcu_read_unlock();
}
+#if IS_ENABLED(CONFIG_IPV6)
int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib6_nh_release(struct fib6_nh *fib6_nh);
void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
+#else
+static inline int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
+ return -EAFNOSUPPORT;
+}
+
+static inline void fib6_nh_release(struct fib6_nh *fib6_nh)
+{
+}
+
+static inline void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
+{
+}
+#endif
+
int call_fib6_entry_notifiers(struct net *net,
enum fib_event_type event_type,
@@ -502,8 +521,15 @@ int call_fib6_multipath_entry_notifiers(struct net *net,
unsigned int nsiblings,
struct netlink_ext_ack *extack);
int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt);
+#if IS_ENABLED(CONFIG_IPV6)
void fib6_rt_update(struct net *net, struct fib6_info *rt,
struct nl_info *info);
+#else
+static inline void fib6_rt_update(struct net *net, struct fib6_info *rt,
+ struct nl_info *info)
+{
+}
+#endif
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int flags);
@@ -588,8 +614,13 @@ int fib6_tables_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
void fib6_update_sernum(struct net *net, struct fib6_info *rt);
+#if IS_ENABLED(CONFIG_IPV6)
void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
-void fib6_update_sernum_stub(struct net *net, struct fib6_info *f6i);
+#else
+static inline void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt)
+{
+}
+#endif
void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val);
static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
@@ -599,7 +630,7 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
bool offload, bool trap, bool offload_failed);
-#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
+#if IS_ENABLED(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
struct bpf_iter__ipv6_route {
__bpf_md_ptr(struct bpf_iter_meta *, meta);
__bpf_md_ptr(struct fib6_info *, rt);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index a55f9bf95fe3..09ffe0f13ce7 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -77,7 +77,14 @@ static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
f6i->fib6_nh->fib_nh_gw_family;
}
+#if IS_ENABLED(CONFIG_IPV6)
void ip6_route_input(struct sk_buff *skb);
+#else
+static inline void ip6_route_input(struct sk_buff *skb)
+{
+}
+#endif
+
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
struct flowi6 *fl6,
@@ -119,7 +126,15 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd,
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
int ip6_ins_rt(struct net *net, struct fib6_info *f6i);
+#if IS_ENABLED(CONFIG_IPV6)
int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify);
+#else
+static inline int ip6_del_rt(struct net *net, struct fib6_info *f6i,
+ bool skip_notify)
+{
+ return -EAFNOSUPPORT;
+}
+#endif
void rt6_flush_exceptions(struct fib6_info *f6i);
void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
@@ -252,19 +267,37 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
return rt->rt6i_flags & RTF_LOCAL;
}
+static inline bool __ipv6_anycast_destination(const struct rt6key *rt6i_dst,
+ u32 rt6i_flags,
+ const struct in6_addr *daddr)
+{
+ return rt6i_flags & RTF_ANYCAST ||
+ (rt6i_dst->plen < 127 &&
+ !(rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
+ ipv6_addr_equal(&rt6i_dst->addr, daddr));
+}
+
static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
const struct in6_addr *daddr)
{
const struct rt6_info *rt = dst_rt6_info(dst);
- return rt->rt6i_flags & RTF_ANYCAST ||
- (rt->rt6i_dst.plen < 127 &&
- !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
- ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
+ return __ipv6_anycast_destination(&rt->rt6i_dst, rt->rt6i_flags, daddr);
}
+#if IS_ENABLED(CONFIG_IPV6)
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
+#else
+static inline int ip6_fragment(struct net *net, struct sock *sk,
+ struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *,
+ struct sk_buff *))
+{
+ kfree_skb(skb);
+ return -EAFNOSUPPORT;
+}
+#endif
/* Variant of dst_mtu() for IPv6 users */
static inline u32 dst6_mtu(const struct dst_entry *dst)
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 359b595f1df9..b99805ee2fd1 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -162,7 +162,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
dev->name);
DEV_STATS_INC(dev, tx_errors);
}
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_RECURSION_LIMIT);
return;
}
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 1f577a4f8ce9..d708b66e55cd 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -32,7 +32,7 @@
* recursion involves route lookups and full IP output, consuming much
* more stack per level, so a lower limit is needed.
*/
-#define IP_TUNNEL_RECURSION_LIMIT 4
+#define IP_TUNNEL_RECURSION_LIMIT 5
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 29a36709e7f3..72d325c81313 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -11,6 +11,7 @@
#include <asm/types.h> /* for __uXX types */
#include <linux/list.h> /* for struct list_head */
+#include <linux/rculist_bl.h> /* for struct hlist_bl_head */
#include <linux/spinlock.h> /* for struct rwlock_t */
#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/refcount.h> /* for struct refcount_t */
@@ -30,10 +31,23 @@
#endif
#include <net/net_namespace.h> /* Netw namespace */
#include <linux/sched/isolation.h>
+#include <linux/siphash.h>
#define IP_VS_HDR_INVERSE 1
#define IP_VS_HDR_ICMP 2
+/* conn_tab limits (as per Kconfig) */
+#define IP_VS_CONN_TAB_MIN_BITS 8
+#if BITS_PER_LONG > 32
+#define IP_VS_CONN_TAB_MAX_BITS 27
+#else
+#define IP_VS_CONN_TAB_MAX_BITS 20
+#endif
+
+/* svc_table limits */
+#define IP_VS_SVC_TAB_MIN_BITS 4
+#define IP_VS_SVC_TAB_MAX_BITS 20
+
/* Generic access of ipvs struct */
static inline struct netns_ipvs *net_ipvs(struct net* net)
{
@@ -43,8 +57,6 @@ static inline struct netns_ipvs *net_ipvs(struct net* net)
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
-extern struct mutex __ip_vs_mutex;
-
struct ip_vs_iphdr {
int hdr_flags; /* ipvs flags */
__u32 off; /* Where IP or IPv4 header starts */
@@ -265,6 +277,29 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
pr_err(msg, ##__VA_ARGS__); \
} while (0)
+struct ip_vs_aligned_lock {
+ spinlock_t l; /* Protect buckets */
+} ____cacheline_aligned_in_smp;
+
+/* For arrays per family */
+enum {
+ IP_VS_AF_INET,
+ IP_VS_AF_INET6,
+ IP_VS_AF_MAX
+};
+
+static inline int ip_vs_af_index(int af)
+{
+ return af == AF_INET6 ? IP_VS_AF_INET6 : IP_VS_AF_INET;
+}
+
+/* work_flags */
+enum {
+ IP_VS_WORK_SVC_RESIZE, /* Schedule svc_resize_work */
+ IP_VS_WORK_SVC_NORESIZE, /* Stopping svc_resize_work */
+ IP_VS_WORK_CONN_RESIZE, /* Schedule conn_resize_work */
+};
+
/* The port number of FTP service (in network order). */
#define FTPPORT cpu_to_be16(21)
#define FTPDATA cpu_to_be16(20)
@@ -466,6 +501,198 @@ struct ip_vs_est_kt_data {
int est_row; /* estimated row */
};
+/* IPVS resizable hash tables */
+struct ip_vs_rht {
+ struct hlist_bl_head *buckets;
+ struct ip_vs_rht __rcu *new_tbl; /* New/Same table */
+ seqcount_t *seqc; /* Protects moves */
+ struct ip_vs_aligned_lock *lock; /* Protect seqc */
+ int mask; /* Buckets mask */
+ int size; /* Buckets */
+ int seqc_mask; /* seqc mask */
+ int lock_mask; /* lock mask */
+ u32 table_id;
+ int u_thresh; /* upper threshold */
+ int l_thresh; /* lower threshold */
+ int lfactor; /* Load Factor (shift)*/
+ int bits; /* size = 1 << bits */
+ siphash_key_t hash_key;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * ip_vs_rht_for_each_table() - Walk the hash tables
+ * @table: struct ip_vs_rht __rcu *table
+ * @t: current table, used as cursor, struct ip_vs_rht *var
+ * @p: previous table, temp struct ip_vs_rht *var
+ *
+ * Walk tables assuming others can not change the installed tables
+ */
+#define ip_vs_rht_for_each_table(table, t, p) \
+ for (p = NULL, t = rcu_dereference_protected(table, 1); \
+ t != p; \
+ p = t, t = rcu_dereference_protected(t->new_tbl, 1))
+
+/**
+ * ip_vs_rht_for_each_table_rcu() - Walk the hash tables under RCU reader lock
+ * @table: struct ip_vs_rht __rcu *table
+ * @t: current table, used as cursor, struct ip_vs_rht *var
+ * @p: previous table, temp struct ip_vs_rht *var
+ *
+ * We usually search in one table and also in second table on resizing
+ */
+#define ip_vs_rht_for_each_table_rcu(table, t, p) \
+ for (p = NULL, t = rcu_dereference(table); \
+ t != p; \
+ p = t, t = rcu_dereference(t->new_tbl))
+
+/**
+ * ip_vs_rht_for_each_bucket() - Walk all table buckets
+ * @t: current table, used as cursor, struct ip_vs_rht *var
+ * @bucket: bucket index, used as cursor, u32 var
+ * @head: bucket address, used as cursor, struct hlist_bl_head *var
+ */
+#define ip_vs_rht_for_each_bucket(t, bucket, head) \
+ for (bucket = 0, head = (t)->buckets; \
+ bucket < t->size; bucket++, head++)
+
+/**
+ * ip_vs_rht_for_bucket_retry() - Retry bucket if entries are moved
+ * @t: current table, used as cursor, struct ip_vs_rht *var
+ * @bucket: index of current bucket or hash key
+ * @sc: temp seqcount_t *var
+ * @seq: temp unsigned int var for sequence count
+ * @retry: temp int var
+ */
+#define ip_vs_rht_for_bucket_retry(t, bucket, sc, seq, retry) \
+ for (retry = 1, sc = &(t)->seqc[(bucket) & (t)->seqc_mask]; \
+ retry && ({ seq = read_seqcount_begin(sc); 1; }); \
+ retry = read_seqcount_retry(sc, seq))
+
+/**
+ * DECLARE_IP_VS_RHT_WALK_BUCKETS_RCU() - Declare variables
+ *
+ * Variables for ip_vs_rht_walk_buckets_rcu
+ */
+#define DECLARE_IP_VS_RHT_WALK_BUCKETS_RCU() \
+ struct ip_vs_rht *_t, *_p; \
+ unsigned int _seq; \
+ seqcount_t *_sc; \
+ u32 _bucket; \
+ int _retry
+/**
+ * ip_vs_rht_walk_buckets_rcu() - Walk all buckets under RCU read lock
+ * @table: struct ip_vs_rht __rcu *table
+ * @head: bucket address, used as cursor, struct hlist_bl_head *var
+ *
+ * Can be used while others add/delete/move entries
+ * Not suitable if duplicates are not desired
+ * Possible cases for reader that uses cond_resched_rcu() in the loop:
+ * - new table can not be installed, no need to repeat
+ * - new table can be installed => check and repeat if new table is
+ * installed, needed for !PREEMPT_RCU
+ */
+#define ip_vs_rht_walk_buckets_rcu(table, head) \
+ ip_vs_rht_for_each_table_rcu(table, _t, _p) \
+ ip_vs_rht_for_each_bucket(_t, _bucket, head) \
+ ip_vs_rht_for_bucket_retry(_t, _bucket, _sc, \
+ _seq, _retry)
+
+/**
+ * DECLARE_IP_VS_RHT_WALK_BUCKET_RCU() - Declare variables
+ *
+ * Variables for ip_vs_rht_walk_bucket_rcu
+ */
+#define DECLARE_IP_VS_RHT_WALK_BUCKET_RCU() \
+ unsigned int _seq; \
+ seqcount_t *_sc; \
+ int _retry
+/**
+ * ip_vs_rht_walk_bucket_rcu() - Walk bucket under RCU read lock
+ * @t: current table, struct ip_vs_rht *var
+ * @bucket: index of current bucket or hash key
+ * @head: bucket address, used as cursor, struct hlist_bl_head *var
+ *
+ * Can be used while others add/delete/move entries
+ * Not suitable if duplicates are not desired
+ * Possible cases for reader that uses cond_resched_rcu() in the loop:
+ * - new table can not be installed, no need to repeat
+ * - new table can be installed => check and repeat if new table is
+ * installed, needed for !PREEMPT_RCU
+ */
+#define ip_vs_rht_walk_bucket_rcu(t, bucket, head) \
+ if (({ head = (t)->buckets + ((bucket) & (t)->mask); 0; })) \
+ {} \
+ else \
+ ip_vs_rht_for_bucket_retry(t, (bucket), _sc, _seq, _retry)
+
+/**
+ * DECLARE_IP_VS_RHT_WALK_BUCKETS_SAFE_RCU() - Declare variables
+ *
+ * Variables for ip_vs_rht_walk_buckets_safe_rcu
+ */
+#define DECLARE_IP_VS_RHT_WALK_BUCKETS_SAFE_RCU() \
+ struct ip_vs_rht *_t, *_p; \
+ u32 _bucket
+/**
+ * ip_vs_rht_walk_buckets_safe_rcu() - Walk all buckets under RCU read lock
+ * @table: struct ip_vs_rht __rcu *table
+ * @head: bucket address, used as cursor, struct hlist_bl_head *var
+ *
+ * Can be used while others add/delete entries but moving is disabled
+ * Using cond_resched_rcu() should be safe if tables do not change
+ */
+#define ip_vs_rht_walk_buckets_safe_rcu(table, head) \
+ ip_vs_rht_for_each_table_rcu(table, _t, _p) \
+ ip_vs_rht_for_each_bucket(_t, _bucket, head)
+
+/**
+ * DECLARE_IP_VS_RHT_WALK_BUCKETS() - Declare variables
+ *
+ * Variables for ip_vs_rht_walk_buckets
+ */
+#define DECLARE_IP_VS_RHT_WALK_BUCKETS() \
+ struct ip_vs_rht *_t, *_p; \
+ u32 _bucket
+
+/**
+ * ip_vs_rht_walk_buckets() - Walk all buckets
+ * @table: struct ip_vs_rht __rcu *table
+ * @head: bucket address, used as cursor, struct hlist_bl_head *var
+ *
+ * Use if others can not add/delete/move entries
+ */
+#define ip_vs_rht_walk_buckets(table, head) \
+ ip_vs_rht_for_each_table(table, _t, _p) \
+ ip_vs_rht_for_each_bucket(_t, _bucket, head)
+
+/* Entries can be in one of two tables, so we flip bit when new table is
+ * created and store it as highest bit in hash keys
+ */
+#define IP_VS_RHT_TABLE_ID_MASK BIT(31)
+
+/* Check if hash key is from this table */
+static inline bool ip_vs_rht_same_table(struct ip_vs_rht *t, u32 hash_key)
+{
+ return !((t->table_id ^ hash_key) & IP_VS_RHT_TABLE_ID_MASK);
+}
+
+/* Build per-table hash key from hash value */
+static inline u32 ip_vs_rht_build_hash_key(struct ip_vs_rht *t, u32 hash)
+{
+ return t->table_id | (hash & ~IP_VS_RHT_TABLE_ID_MASK);
+}
+
+void ip_vs_rht_free(struct ip_vs_rht *t);
+void ip_vs_rht_rcu_free(struct rcu_head *head);
+struct ip_vs_rht *ip_vs_rht_alloc(int buckets, int scounts, int locks);
+int ip_vs_rht_desired_size(struct netns_ipvs *ipvs, struct ip_vs_rht *t, int n,
+ int lfactor, int min_bits, int max_bits);
+void ip_vs_rht_set_thresholds(struct ip_vs_rht *t, int size, int lfactor,
+ int min_bits, int max_bits);
+u32 ip_vs_rht_hash_linfo(struct ip_vs_rht *t, int af,
+ const union nf_inet_addr *addr, u32 v1, u32 v2);
+
struct dst_entry;
struct iphdr;
struct ip_vs_conn;
@@ -559,50 +786,48 @@ struct ip_vs_conn_param {
__u8 pe_data_len;
};
+/* Hash node in conn_tab */
+struct ip_vs_conn_hnode {
+ struct hlist_bl_node node; /* node in conn_tab */
+ u32 hash_key; /* Key for the hash table */
+ u8 dir; /* 0=out->in, 1=in->out */
+} __packed;
+
/* IP_VS structure allocated for each dynamically scheduled connection */
struct ip_vs_conn {
- struct hlist_node c_list; /* hashed list heads */
- /* Protocol, addresses and port numbers */
+ /* Cacheline for hash table nodes - rarely modified */
+
+ struct ip_vs_conn_hnode hn0; /* Original direction */
+ u8 af; /* address family */
__be16 cport;
+ struct ip_vs_conn_hnode hn1; /* Reply direction */
+ u8 daf; /* Address family of the dest */
__be16 dport;
- __be16 vport;
- u16 af; /* address family */
- union nf_inet_addr caddr; /* client address */
- union nf_inet_addr vaddr; /* virtual address */
- union nf_inet_addr daddr; /* destination address */
+ struct ip_vs_dest *dest; /* real server */
+ atomic_t n_control; /* Number of controlled ones */
volatile __u32 flags; /* status flags */
- __u16 protocol; /* Which protocol (TCP/UDP) */
- __u16 daf; /* Address family of the dest */
- struct netns_ipvs *ipvs;
-
- /* counter and timer */
- refcount_t refcnt; /* reference count */
- struct timer_list timer; /* Expiration timer */
- volatile unsigned long timeout; /* timeout */
+ /* 44/64 */
- /* Flags and state transition */
- spinlock_t lock; /* lock for state transition */
+ struct ip_vs_conn *control; /* Master control connection */
+ const struct ip_vs_pe *pe;
+ char *pe_data;
+ __u8 pe_data_len;
volatile __u16 state; /* state info */
volatile __u16 old_state; /* old state, to be used for
* state transition triggered
* synchronization
*/
- __u32 fwmark; /* Fire wall mark from skb */
- unsigned long sync_endtime; /* jiffies + sent_retries */
+ /* 2-byte hole */
+ /* 64/96 */
- /* Control members */
- struct ip_vs_conn *control; /* Master control connection */
- atomic_t n_control; /* Number of controlled ones */
- struct ip_vs_dest *dest; /* real server */
- atomic_t in_pkts; /* incoming packet counter */
+ union nf_inet_addr caddr; /* client address */
+ union nf_inet_addr vaddr; /* virtual address */
+ /* 96/128 */
- /* Packet transmitter for different forwarding methods. If it
- * mangles the packet, it must return NF_DROP or better NF_STOLEN,
- * otherwise this must be changed to a sk_buff **.
- * NF_ACCEPT can be returned when destination is local.
- */
- int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
- struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+ union nf_inet_addr daddr; /* destination address */
+ __u32 fwmark; /* Fire wall mark from skb */
+ __be16 vport;
+ __u16 protocol; /* Which protocol (TCP/UDP) */
/* Note: we can group the following members into a structure,
* in order to save more space, and the following members are
@@ -610,14 +835,31 @@ struct ip_vs_conn {
*/
struct ip_vs_app *app; /* bound ip_vs_app object */
void *app_data; /* Application private data */
+ /* 128/168 */
struct_group(sync_conn_opt,
struct ip_vs_seq in_seq; /* incoming seq. struct */
struct ip_vs_seq out_seq; /* outgoing seq. struct */
);
+ /* 152/192 */
- const struct ip_vs_pe *pe;
- char *pe_data;
- __u8 pe_data_len;
+ struct timer_list timer; /* Expiration timer */
+ volatile unsigned long timeout; /* timeout */
+ spinlock_t lock; /* lock for state transition */
+ refcount_t refcnt; /* reference count */
+ atomic_t in_pkts; /* incoming packet counter */
+ /* 64-bit: 4-byte gap */
+
+ /* 188/256 */
+ unsigned long sync_endtime; /* jiffies + sent_retries */
+ struct netns_ipvs *ipvs;
+
+ /* Packet transmitter for different forwarding methods. If it
+ * mangles the packet, it must return NF_DROP or better NF_STOLEN,
+ * otherwise this must be changed to a sk_buff **.
+ * NF_ACCEPT can be returned when destination is local.
+ */
+ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
struct rcu_head rcu_head;
};
@@ -673,15 +915,15 @@ struct ip_vs_dest_user_kern {
* forwarding entries.
*/
struct ip_vs_service {
- struct hlist_node s_list; /* for normal service table */
- struct hlist_node f_list; /* for fwmark-based service table */
- atomic_t refcnt; /* reference counter */
-
+ struct hlist_bl_node s_list; /* node in service table */
+ u32 hash_key; /* Key for the hash table */
u16 af; /* address family */
__u16 protocol; /* which protocol (TCP/UDP) */
+
union nf_inet_addr addr; /* IP address for virtual service */
- __be16 port; /* port number for the service */
__u32 fwmark; /* firewall mark of the service */
+ atomic_t refcnt; /* reference counter */
+ __be16 port; /* port number for the service */
unsigned int flags; /* service status flags */
unsigned int timeout; /* persistent timeout in ticks */
__be32 netmask; /* grouping granularity, mask/plen */
@@ -791,8 +1033,8 @@ struct ip_vs_pe {
int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb);
bool (*ct_match)(const struct ip_vs_conn_param *p,
struct ip_vs_conn *ct);
- u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
- bool inverse);
+ u32 (*hashkey_raw)(const struct ip_vs_conn_param *p,
+ struct ip_vs_rht *t, bool inverse);
int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
/* create connections for real-server outgoing packets */
struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc,
@@ -931,21 +1173,27 @@ struct netns_ipvs {
#endif
/* ip_vs_conn */
atomic_t conn_count; /* connection counter */
+ atomic_t no_cport_conns[IP_VS_AF_MAX];
+ struct delayed_work conn_resize_work;/* resize conn_tab */
/* ip_vs_ctl */
struct ip_vs_stats_rcu *tot_stats; /* Statistics & est. */
- int num_services; /* no of virtual services */
- int num_services6; /* IPv6 virtual services */
-
/* Trash for destinations */
struct list_head dest_trash;
spinlock_t dest_trash_lock;
struct timer_list dest_trash_timer; /* expiration timer */
+ struct mutex service_mutex; /* service reconfig */
+ struct rw_semaphore svc_resize_sem; /* svc_table resizing */
+ struct delayed_work svc_resize_work; /* resize svc_table */
+ atomic_t svc_table_changes;/* ++ on new table */
/* Service counters */
- atomic_t ftpsvc_counter;
- atomic_t nullsvc_counter;
- atomic_t conn_out_counter;
+ atomic_t num_services[IP_VS_AF_MAX]; /* Services */
+ atomic_t fwm_services[IP_VS_AF_MAX]; /* Services */
+ atomic_t nonfwm_services[IP_VS_AF_MAX];/* Services */
+ atomic_t ftpsvc_counter[IP_VS_AF_MAX]; /* FTPPORT */
+ atomic_t nullsvc_counter[IP_VS_AF_MAX];/* Zero port */
+ atomic_t conn_out_counter[IP_VS_AF_MAX];/* out conn */
#ifdef CONFIG_SYSCTL
/* delayed work for expiring no dest connections */
@@ -956,6 +1204,7 @@ struct netns_ipvs {
int drop_counter;
int old_secure_tcp;
atomic_t dropentry;
+ s8 dropentry_counters[8];
/* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */
spinlock_t droppacket_lock; /* drop packet handling */
@@ -1002,6 +1251,8 @@ struct netns_ipvs {
int sysctl_est_nice; /* kthread nice */
int est_stopped; /* stop tasks */
#endif
+ int sysctl_conn_lfactor;
+ int sysctl_svc_lfactor;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1011,6 +1262,7 @@ struct netns_ipvs {
int sysctl_lblcr_expiration;
struct ctl_table_header *lblcr_ctl_header;
struct ctl_table *lblcr_ctl_table;
+ unsigned long work_flags; /* IP_VS_WORK_* flags */
/* ip_vs_est */
struct delayed_work est_reload_work;/* Reload kthread tasks */
struct mutex est_mutex; /* protect kthread tasks */
@@ -1041,6 +1293,10 @@ struct netns_ipvs {
*/
unsigned int mixed_address_family_dests;
unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */
+
+ struct ip_vs_rht __rcu *svc_table; /* Services */
+ struct ip_vs_rht __rcu *conn_tab; /* Connections */
+ atomic_t conn_tab_changes;/* ++ on new table */
};
#define DEFAULT_SYNC_THRESHOLD 3
@@ -1290,6 +1546,24 @@ static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
#endif
+/* Get load factor to map conn_count/u_thresh to t->size */
+static inline int sysctl_conn_lfactor(struct netns_ipvs *ipvs)
+{
+ return READ_ONCE(ipvs->sysctl_conn_lfactor);
+}
+
+/* Get load factor to map num_services/u_thresh to t->size
+ * Smaller value decreases u_thresh to reduce collisions but increases
+ * the table size
+ * Returns factor where:
+ * - <0: u_thresh = size >> -factor, eg. lfactor -2 = 25% load
+ * - >=0: u_thresh = size << factor, eg. lfactor 1 = 200% load
+ */
+static inline int sysctl_svc_lfactor(struct netns_ipvs *ipvs)
+{
+ return READ_ONCE(ipvs->sysctl_svc_lfactor);
+}
+
/* IPVS core functions
* (from ip_vs_core.c)
*/
@@ -1363,6 +1637,23 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
}
void ip_vs_conn_put(struct ip_vs_conn *cp);
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+int ip_vs_conn_desired_size(struct netns_ipvs *ipvs, struct ip_vs_rht *t,
+ int lfactor);
+struct ip_vs_rht *ip_vs_conn_tab_alloc(struct netns_ipvs *ipvs, int buckets,
+ int lfactor);
+
+static inline struct ip_vs_conn *
+ip_vs_hn0_to_conn(struct ip_vs_conn_hnode *hn)
+{
+ return container_of(hn, struct ip_vs_conn, hn0);
+}
+
+static inline struct ip_vs_conn *
+ip_vs_hn_to_conn(struct ip_vs_conn_hnode *hn)
+{
+ return hn->dir ? container_of(hn, struct ip_vs_conn, hn1) :
+ container_of(hn, struct ip_vs_conn, hn0);
+}
struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr,
@@ -1716,6 +2007,13 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
return fwd;
}
+/* Check if connection uses double hashing */
+static inline bool ip_vs_conn_use_hash2(struct ip_vs_conn *cp)
+{
+ return IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ &&
+ !(cp->flags & IP_VS_CONN_F_TEMPLATE);
+}
+
void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, int dir);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 53c5056508be..d042afe7a245 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1044,8 +1044,18 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
+#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
+#else
+static inline struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk,
+ struct flowi6 *fl6,
+ const struct in6_addr *final_dst)
+{
+ return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
@@ -1129,10 +1139,8 @@ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr_unsized *addr,
int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
void ip6_datagram_release_cb(struct sock *sk);
-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
- int *addr_len);
-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
- int *addr_len);
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
@@ -1141,6 +1149,8 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
void inet6_cleanup_sock(struct sock *sk);
void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
+int __inet6_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
+ u32 flags);
int inet6_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len);
int inet6_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
@@ -1181,8 +1191,6 @@ int tcp6_proc_init(struct net *net);
void tcp6_proc_exit(struct net *net);
int udp6_proc_init(struct net *net);
void udp6_proc_exit(struct net *net);
-int udplite6_proc_init(void);
-void udplite6_proc_exit(void);
int ipv6_misc_proc_init(void);
void ipv6_misc_proc_exit(void);
int snmp6_register_dev(struct inet6_dev *idev);
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
deleted file mode 100644
index d3013e721b14..000000000000
--- a/include/net/ipv6_stubs.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IPV6_STUBS_H
-#define _IPV6_STUBS_H
-
-#include <linux/in6.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/neighbour.h>
-#include <net/sock.h>
-#include <net/ipv6.h>
-
-/* structs from net/ip6_fib.h */
-struct fib6_info;
-struct fib6_nh;
-struct fib6_config;
-struct fib6_result;
-
-/* This is ugly, ideally these symbols should be built
- * into the core kernel.
- */
-struct ipv6_stub {
- int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
- int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
- struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
- const struct sock *sk,
- struct flowi6 *fl6,
- const struct in6_addr *final_dst);
- int (*ipv6_route_input)(struct sk_buff *skb);
-
- struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
- int (*fib6_lookup)(struct net *net, int oif, struct flowi6 *fl6,
- struct fib6_result *res, int flags);
- int (*fib6_table_lookup)(struct net *net, struct fib6_table *table,
- int oif, struct flowi6 *fl6,
- struct fib6_result *res, int flags);
- void (*fib6_select_path)(const struct net *net, struct fib6_result *res,
- struct flowi6 *fl6, int oif, bool oif_match,
- const struct sk_buff *skb, int strict);
- u32 (*ip6_mtu_from_fib6)(const struct fib6_result *res,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr);
-
- int (*fib6_nh_init)(struct net *net, struct fib6_nh *fib6_nh,
- struct fib6_config *cfg, gfp_t gfp_flags,
- struct netlink_ext_ack *extack);
- void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
- void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
- void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
- int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
- void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
- struct nl_info *info);
-
- void (*udpv6_encap_enable)(void);
- void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
- const struct in6_addr *solicited_addr,
- bool router, bool solicited, bool override, bool inc_opt);
-#if IS_ENABLED(CONFIG_XFRM)
- void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu);
- int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb);
- struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk,
- struct list_head *head,
- struct sk_buff *skb);
- int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type);
-#endif
- struct neigh_table *nd_tbl;
-
- int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
- int (*output)(struct net *, struct sock *, struct sk_buff *));
- struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
- struct net_device *dev);
- int (*ip6_xmit)(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
-};
-extern const struct ipv6_stub *ipv6_stub __read_mostly;
-
-/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
-struct ipv6_bpf_stub {
- int (*inet6_bind)(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
- u32 flags);
- struct sock *(*udp6_lib_lookup)(const struct net *net,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif, int sdif, struct udp_table *tbl,
- struct sk_buff *skb);
- int (*ipv6_setsockopt)(struct sock *sk, int level, int optname,
- sockptr_t optval, unsigned int optlen);
- int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
- sockptr_t optval, sockptr_t optlen);
- int (*ipv6_dev_get_saddr)(struct net *net,
- const struct net_device *dst_dev,
- const struct in6_addr *daddr,
- unsigned int prefs,
- struct in6_addr *saddr);
-};
-extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
-
-#endif
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index adce2144a678..40cb20d9309c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -365,6 +365,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
* @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed
* @BSS_CHANGED_TPE: transmit power envelope changed
+ * @BSS_CHANGED_NAN_LOCAL_SCHED: NAN local schedule changed (NAN mode only)
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -402,6 +403,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
BSS_CHANGED_TPE = BIT_ULL(35),
+ BSS_CHANGED_NAN_LOCAL_SCHED = BIT_ULL(36),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -866,6 +868,74 @@ struct ieee80211_bss_conf {
u8 s1g_long_beacon_period;
};
+#define IEEE80211_NAN_MAX_CHANNELS 3
+
+/**
+ * struct ieee80211_nan_channel - NAN channel information
+ *
+ * @chanreq: channel request for this NAN channel. Even though this chanreq::ap
+ * is irrelevant for NAN, still store it for convenience - some functions
+ * require it as an argument.
+ * @needed_rx_chains: number of RX chains needed for this NAN channel
+ * @chanctx_conf: chanctx_conf assigned to this NAN channel.
+ * If a local channel is being ULWed (because we needed this chanctx for
+ * something else), the local NAN channel that used this chanctx,
+ * will have this pointer set to %NULL.
+ * A peer NAN channel should never have this pointer set to %NULL.
+ * @channel_entry: the Channel Entry blob as defined in Wi-Fi Aware
+ * (TM) 4.0 specification Table 100 (Channel Entry format for the NAN
+ * Availability attribute).
+ */
+struct ieee80211_nan_channel {
+ struct ieee80211_chan_req chanreq;
+ u8 needed_rx_chains;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ u8 channel_entry[6];
+};
+
+/**
+ * struct ieee80211_nan_peer_map - NAN peer schedule map
+ *
+ * This stores a single map from a peer's schedule. Each peer can have
+ * multiple maps.
+ *
+ * @map_id: the map ID from the peer schedule, %CFG80211_NAN_INVALID_MAP_ID
+ * if unused
+ * @slots: mapping of time slots to channel configurations in the schedule's
+ * channels array
+ */
+struct ieee80211_nan_peer_map {
+ u8 map_id;
+ struct ieee80211_nan_channel *slots[CFG80211_NAN_SCHED_NUM_TIME_SLOTS];
+};
+
+/**
+ * struct ieee80211_nan_peer_sched - NAN peer schedule
+ *
+ * This stores the complete schedule from a peer. Contains peer-level
+ * parameters and an array of schedule maps.
+ *
+ * @seq_id: the sequence ID from the peer schedule
+ * @committed_dw: committed DW as published by the peer
+ * @max_chan_switch: maximum channel switch time in microseconds
+ * @init_ulw: initial ULWs as published by the peer (copied)
+ * @ulw_size: number of bytes in @init_ulw
+ * @maps: array of peer schedule maps. Invalid slots have map_id set to
+ * %CFG80211_NAN_INVALID_MAP_ID.
+ * @n_channels: number of valid channel entries in @channels
+ * @channels: flexible array of negotiated peer channels for this schedule
+ */
+struct ieee80211_nan_peer_sched {
+ u8 seq_id;
+ u16 committed_dw;
+ u16 max_chan_switch;
+ const u8 *init_ulw;
+ u16 ulw_size;
+ struct ieee80211_nan_peer_map maps[CFG80211_NAN_MAX_PEER_MAPS];
+ u8 n_channels;
+ struct ieee80211_nan_channel channels[] __counted_by(n_channels);
+};
+
/**
* enum mac80211_tx_info_flags - flags to describe transmission information/status
*
@@ -1917,6 +1987,8 @@ enum ieee80211_offload_flags {
IEEE80211_OFFLOAD_DECAP_ENABLED = BIT(2),
};
+#define IEEE80211_NAN_AVAIL_BLOB_MAX_LEN 54
+
/**
* struct ieee80211_eml_params - EHT Operating mode notification parameters
*
@@ -1943,6 +2015,32 @@ struct ieee80211_eml_params {
};
/**
+ * struct ieee80211_nan_sched_cfg - NAN schedule configuration
+ * @channels: array of NAN channels. A channel entry is in use if
+ * channels[i].chanreq.oper.chan is not NULL.
+ * @schedule: NAN local schedule - mapping of each 16TU time slot to
+ * the NAN channel on which the radio will operate. NULL if unscheduled.
+ * @avail_blob: NAN Availability attribute blob.
+ * @avail_blob_len: length of the @avail_blob in bytes.
+ * @deferred: indicates that the driver should notify peers before applying the
+ * new NAN schedule, and apply the new schedule the second NAN Slot
+ * boundary after it notified the peers, as defined in Wi-Fi Aware (TM) 4.0
+ * specification, section 5.2.2.
+ * The driver must call ieee80211_nan_sched_update_done() after the
+ * schedule has been applied.
+ * If a HW restart happened while a deferred schedule update was pending,
+ * mac80211 will reconfigure the deferred schedule (and wait for the driver
+ * to notify that the schedule has been applied).
+ */
+struct ieee80211_nan_sched_cfg {
+ struct ieee80211_nan_channel channels[IEEE80211_NAN_MAX_CHANNELS];
+ struct ieee80211_nan_channel *schedule[CFG80211_NAN_SCHED_NUM_TIME_SLOTS];
+ u8 avail_blob[IEEE80211_NAN_AVAIL_BLOB_MAX_LEN];
+ u16 avail_blob_len;
+ bool deferred;
+};
+
+/**
* struct ieee80211_vif_cfg - interface configuration
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS or not
@@ -1970,6 +2068,7 @@ struct ieee80211_eml_params {
* your driver/device needs to do.
* @ap_addr: AP MLD address, or BSSID for non-MLO connections
* (station mode only)
+ * @nan_sched: NAN schedule parameters. &struct ieee80211_nan_sched_cfg
*/
struct ieee80211_vif_cfg {
/* association related data */
@@ -1988,6 +2087,8 @@ struct ieee80211_vif_cfg {
bool s1g;
bool idle;
u8 ap_addr[ETH_ALEN] __aligned(2);
+ /* Protected by the wiphy mutex */
+ struct ieee80211_nan_sched_cfg nan_sched;
};
#define IEEE80211_TTLM_NUM_TIDS 8
@@ -2074,6 +2175,7 @@ enum ieee80211_neg_ttlm_res {
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void \*).
* @txq: the multicast data TX queue
+ * @txq_mgmt: the mgmt frame TX queue, currently only exists for NAN devices
* @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
* &enum ieee80211_offload_flags.
*/
@@ -2092,6 +2194,7 @@ struct ieee80211_vif {
u8 hw_queue[IEEE80211_NUM_ACS];
struct ieee80211_txq *txq;
+ struct ieee80211_txq *txq_mgmt;
netdev_features_t netdev_features;
u32 driver_flags;
@@ -2477,11 +2580,15 @@ struct ieee80211_sta_aggregates {
* @uhr_cap: UHR capabilities of this STA
* @s1g_cap: S1G capabilities of this STA
* @agg: per-link data for multi-link aggregation
- * @bandwidth: current bandwidth the station can receive with
+ * @bandwidth: current bandwidth the station can receive with.
+ * This is the minimum between the peer's capabilities and our own
+ * operating channel width; Invalid for NAN since that is operating on
+ * multiple channels.
* @rx_nss: in HT/VHT, the maximum number of spatial streams the
* station can receive at the moment, changed by operating mode
* notifications and capabilities. The value is only valid after
- * the station moves to associated state.
+ * the station moves to associated state. Invalid for NAN since it
+ * operates on multiple configurations of rx_nss.
* @txpwr: the station tx power configuration
*
*/
@@ -2563,6 +2670,8 @@ struct ieee80211_link_sta {
* @valid_links: bitmap of valid links, or 0 for non-MLO
* @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
* @epp_peer: indicates that the peer is an EPP peer.
+ * @nmi: For NDI stations, pointer to the NMI station of the peer.
+ * @nan_sched: NAN peer schedule for this station. Valid only for NMI stations.
*/
struct ieee80211_sta {
u8 addr[ETH_ALEN] __aligned(2);
@@ -2591,6 +2700,11 @@ struct ieee80211_sta {
struct ieee80211_link_sta deflink;
struct ieee80211_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ieee80211_sta __rcu *nmi;
+
+ /* should only be accessed with the wiphy mutex held */
+ struct ieee80211_nan_peer_sched *nan_sched;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -2824,6 +2938,8 @@ struct ieee80211_txq {
* station has a unique address, i.e. each station entry can be identified
* by just its MAC address; this prevents, for example, the same station
* from connecting to two virtual AP interfaces at the same time.
+ * Note that this doesn't apply for NAN, in which the peer's NMI address
+ * can be equal to its NDI address.
*
* @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the
* reordering buffer internally, guaranteeing mac80211 receives frames in
@@ -2913,6 +3029,9 @@ struct ieee80211_txq {
* HW flag so drivers can opt in according to their own control, e.g. in
* testing.
*
+ * @IEEE80211_HW_SUPPORTS_NDP_BLOCKACK: HW can transmit/receive S1G NDP
+ * BlockAck frames.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2973,6 +3092,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DISALLOW_PUNCTURING,
IEEE80211_HW_HANDLES_QUIET_CSA,
IEEE80211_HW_STRICT,
+ IEEE80211_HW_SUPPORTS_NDP_BLOCKACK,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -4486,6 +4606,12 @@ struct ieee80211_prep_tx_info {
* @del_nan_func: Remove a NAN function. The driver must call
* ieee80211_nan_func_terminated() with
* NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
+ * @nan_peer_sched_changed: Notifies the driver that the peer NAN schedule
+ * has changed. The new schedule is available via sta->nan_sched.
+ * Note that the channel_entry blob might not match the actual chandef
+ * since the bandwidth of the chandef is the minimum of the local and peer
+ * bandwidth. It is the driver responsibility to remove the peer schedule
+ * when the NMI station is removed.
* @can_aggregate_in_amsdu: Called in order to determine if HW supports
* aggregating two specific frames in the same A-MSDU. The relation
* between the skbs should be symmetric and transitive. Note that while
@@ -4891,6 +5017,8 @@ struct ieee80211_ops {
void (*del_nan_func)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 instance_id);
+ int (*nan_peer_sched_changed)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta);
bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw,
struct sk_buff *head,
struct sk_buff *skb);
@@ -7388,6 +7516,24 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id);
/**
+ * ieee80211_calculate_rx_timestamp - calculate timestamp in frame
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @status: RX status
+ * @mpdu_len: total MPDU length (including FCS)
+ * @mpdu_offset: offset into MPDU to calculate timestamp at
+ *
+ * This function calculates the RX timestamp at the given MPDU offset, taking
+ * into account what the RX timestamp was. An offset of 0 will just normalize
+ * the timestamp to TSF at beginning of MPDU reception.
+ *
+ * Returns: the calculated timestamp
+ */
+u64 ieee80211_calculate_rx_timestamp(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ unsigned int mpdu_len,
+ unsigned int mpdu_offset);
+
+/**
* ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
* @vif: virtual interface
* @wakeup: wakeup reason(s)
@@ -7733,6 +7879,17 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif,
gfp_t gfp);
/**
+ * ieee80211_nan_sched_update_done - notify that NAN schedule update is done
+ *
+ * This function is called by the driver to notify mac80211 that the NAN
+ * schedule update has been applied.
+ * Must be called with wiphy mutex held. May sleep.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ */
+void ieee80211_nan_sched_update_done(struct ieee80211_vif *vif);
+
+/**
* ieee80211_calc_rx_airtime - calculate estimated transmission airtime for RX.
*
* This function calculates the estimated airtime usage of a frame based on the
@@ -7768,19 +7925,22 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
* ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO.
*
* The driver is responsible for freeing the returned skb.
*
* Return: FILS discovery template. %NULL on error.
*/
struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
* probe response template.
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO.
*
* The driver is responsible for freeing the returned skb.
*
@@ -7788,7 +7948,8 @@ struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
*/
struct sk_buff *
ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_obss_color_collision_notify - notify userland about a BSS color
@@ -7964,4 +8125,11 @@ int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
* Return: %true iff the vif is a NAN interface and NAN is started
*/
bool ieee80211_vif_nan_started(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_encrypt_tx_skb - Encrypt the transmit skb
+ * @skb: the skb
+ * Return: 0 if success and non-zero on error
+ */
+int ieee80211_encrypt_tx_skb(struct sk_buff *skb);
#endif /* MAC80211_H */
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 766f4fb25e26..7fe3a1b61b2d 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -215,6 +215,12 @@ enum gdma_page_type {
#define GDMA_INVALID_DMA_REGION 0
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
struct gdma_mem_info {
struct device *dev;
@@ -386,6 +392,7 @@ struct gdma_irq_context {
enum gdma_context_flags {
GC_PROBE_SUCCEEDED = 0,
+ GC_IN_SERVICE = 1,
};
struct gdma_context {
@@ -411,14 +418,15 @@ struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
- bool in_service;
phys_addr_t bar0_pa;
void __iomem *bar0_va;
+ resource_size_t bar0_size;
void __iomem *shm_base;
void __iomem *db_page_base;
phys_addr_t phys_db_page_base;
- u32 db_page_size;
+ u64 db_page_off;
+ u64 db_page_size;
int numa_node;
/* Shared memory chanenl (used to bootstrap HWC) */
@@ -473,6 +481,8 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type);
+
struct gdma_wqe {
u32 reserved :24;
u32 last_vbytes :8;
@@ -615,6 +625,9 @@ enum {
/* Driver can handle hardware recovery events during probe */
#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
+/* Driver supports self recovery on Hardware Channel timeouts */
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY BIT(25)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
@@ -628,7 +641,8 @@ enum {
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
- GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY)
+ GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)
#define GDMA_DRV_CAP_FLAGS2 0
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index a078af283bdd..96d21cbbdee2 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -61,8 +61,11 @@ enum TRI_STATE {
#define MAX_PORTS_IN_MANA_DEV 256
+/* Maximum number of packets per coalesced CQE */
+#define MANA_RXCOMP_OOB_NUM_PPI 4
+
/* Update this count whenever the respective structures are changed */
-#define MANA_STATS_RX_COUNT 5
+#define MANA_STATS_RX_COUNT (6 + MANA_RXCOMP_OOB_NUM_PPI - 1)
#define MANA_STATS_TX_COUNT 11
#define MANA_RX_FRAG_ALIGNMENT 64
@@ -73,6 +76,8 @@ struct mana_stats_rx {
u64 xdp_drop;
u64 xdp_tx;
u64 xdp_redirect;
+ u64 pkt_len0_err;
+ u64 coalesced_cqe[MANA_RXCOMP_OOB_NUM_PPI - 1];
struct u64_stats_sync syncp;
};
@@ -227,8 +232,6 @@ struct mana_rxcomp_perpkt_info {
u32 pkt_hash;
}; /* HW DATA */
-#define MANA_RXCOMP_OOB_NUM_PPI 4
-
/* Receive completion OOB */
struct mana_rxcomp_oob {
struct mana_cqe_header cqe_hdr;
@@ -378,7 +381,6 @@ struct mana_ethtool_stats {
u64 tx_cqe_err;
u64 tx_cqe_unknown_type;
u64 tx_linear_pkt_cnt;
- u64 rx_coalesced_err;
u64 rx_cqe_unknown_type;
};
@@ -557,6 +559,9 @@ struct mana_port_context {
bool port_is_up;
bool port_st_save; /* Saved port state */
+ u8 cqe_coalescing_enable;
+ u32 cqe_coalescing_timeout_ns;
+
struct mana_ethtool_stats eth_stats;
struct mana_ethtool_phy_stats phy_stats;
@@ -902,6 +907,10 @@ struct mana_cfg_rx_steer_req_v2 {
struct mana_cfg_rx_steer_resp {
struct gdma_resp_hdr hdr;
+
+ /* V2 */
+ u32 cqe_coalescing_timeout_ns;
+ u32 reserved1;
}; /* HW DATA */
/* Register HW vPort */
@@ -998,6 +1007,7 @@ struct mana_deregister_filter_resp {
#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
#define MANA_MAX_NUM_QUEUES 64
+#define MANA_DEF_NUM_QUEUES 16
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
diff --git a/include/net/mctp.h b/include/net/mctp.h
index c3207ce98f07..e1e0a69afdce 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -270,6 +270,7 @@ struct mctp_dst {
struct mctp_dev *dev;
unsigned int mtu;
mctp_eid_t nexthop;
+ mctp_eid_t saddr;
/* set for direct addressing */
unsigned char halen;
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index d38783a2ce57..3da1a6f8d3f9 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -2,8 +2,6 @@
#ifndef _NDISC_H
#define _NDISC_H
-#include <net/ipv6_stubs.h>
-
/*
* ICMP codes for neighbour discovery messages
*/
@@ -359,14 +357,6 @@ static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev
return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
}
-static inline
-struct neighbour *__ipv6_neigh_lookup_noref_stub(struct net_device *dev,
- const void *pkey)
-{
- return ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
- ndisc_hashfn, pkey, dev);
-}
-
static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
{
struct neighbour *n;
@@ -391,28 +381,20 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
rcu_read_unlock();
}
-static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
- const void *pkey)
-{
- struct neighbour *n;
-
- rcu_read_lock();
- n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
- neigh_confirm(n);
- rcu_read_unlock();
-}
-
-/* uses ipv6_stub and is meant for use outside of IPv6 core */
static inline struct neighbour *ip_neigh_gw6(struct net_device *dev,
const void *addr)
{
+#if IS_ENABLED(CONFIG_IPV6)
struct neighbour *neigh;
- neigh = __ipv6_neigh_lookup_noref_stub(dev, addr);
+ neigh = __ipv6_neigh_lookup_noref(dev, addr);
if (unlikely(!neigh))
- neigh = __neigh_create(ipv6_stub->nd_tbl, addr, dev, false);
+ neigh = __neigh_create(&nd_tbl, addr, dev, false);
return neigh;
+#else
+ return ERR_PTR(-EAFNOSUPPORT);
+#endif
}
int ndisc_init(void);
@@ -434,6 +416,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
+
void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
const struct in6_addr *solicited_addr,
bool router, bool solicited, bool override, bool inc_opt);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index d7bec49ee9ea..80de5e98a66d 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -264,14 +264,14 @@ void ipx_unregister_sysctl(void);
#define ipx_unregister_sysctl()
#endif
-#ifdef CONFIG_NET_NS
-void __put_net(struct net *net);
-
static inline struct net *to_net_ns(struct ns_common *ns)
{
return container_of(ns, struct net, ns);
}
+#ifdef CONFIG_NET_NS
+void __put_net(struct net *net);
+
/* Try using get_net_track() instead */
static inline struct net *get_net(struct net *net)
{
@@ -309,7 +309,7 @@ static inline int check_net(const struct net *net)
return ns_ref_read(net) != 0;
}
-void net_drop_ns(void *);
+void net_drop_ns(struct ns_common *);
void net_passive_dec(struct net *net);
#else
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 95ed28212f4e..70c9fe9e83cc 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -150,6 +150,11 @@ enum {
* When NIC-wide config is changed the callback will
* be invoked for all queues.
*
+ * @ndo_queue_create: Create a new RX queue on a virtual device that will
+ * be paired with a physical device's queue via leasing.
+ * Return the new queue id on success, negative error
+ * on failure.
+ *
* @supported_params: Bitmask of supported parameters, see QCFG_*.
*
* Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
@@ -178,6 +183,8 @@ struct netdev_queue_mgmt_ops {
struct netlink_ext_ack *extack);
struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
int idx);
+ int (*ndo_queue_create)(struct net_device *dev,
+ struct netlink_ext_ack *extack);
unsigned int supported_params;
};
@@ -185,7 +192,7 @@ struct netdev_queue_mgmt_ops {
void netdev_queue_config(struct net_device *dev, int rxq,
struct netdev_queue_config *qcfg);
-bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx);
/**
* DOC: Lockless queue stopping / waking helpers.
@@ -373,6 +380,14 @@ static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq)
get_desc, start_thrs); \
})
-struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
-
-#endif
+struct device *netdev_queue_get_dma_dev(struct net_device *dev,
+ unsigned int idx,
+ enum netdev_queue_type type);
+bool netdev_can_create_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+bool netdev_can_lease_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+bool netdev_queue_busy(struct net_device *dev, unsigned int idx,
+ enum netdev_queue_type type,
+ struct netlink_ext_ack *extack);
+#endif /* _LINUX_NET_QUEUES_H */
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index cfa72c485387..9415a94d333d 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -8,13 +8,14 @@
#include <net/xdp.h>
#include <net/page_pool/types.h>
#include <net/netdev_queues.h>
+#include <net/rps-types.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_RPS
struct rps_map __rcu *rps_map;
- struct rps_dev_flow_table __rcu *rps_flow_table;
+ rps_tag_ptr rps_flow_table;
#endif
struct kobject kobj;
const struct attribute_group **groups;
@@ -30,6 +31,14 @@ struct netdev_rx_queue {
struct napi_struct *napi;
struct netdev_queue_config qcfg;
struct pp_memory_provider_params mp_params;
+
+ /* If a queue is leased, then the lease pointer is always
+ * valid. From the physical device it points to the virtual
+ * queue, and from the virtual device it points to the
+ * physical queue.
+ */
+ struct netdev_rx_queue *lease;
+ netdevice_tracker lease_tracker;
} ____cacheline_aligned_in_smp;
/*
@@ -58,6 +67,18 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
return index;
}
-int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+enum netif_lease_dir {
+ NETIF_VIRT_TO_PHYS,
+ NETIF_PHYS_TO_VIRT,
+};
-#endif
+struct netdev_rx_queue *
+__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq,
+ enum netif_lease_dir dir);
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src);
+void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src);
+#endif /* _LINUX_NETDEV_RX_QUEUE_H */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 8d65ffbf57de..b39417ad955e 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -16,9 +16,6 @@ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_SCTP
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
-#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre;
#endif
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index cd5020835a6d..fde2427ceb8f 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -107,11 +107,6 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state);
-int nf_conntrack_udplite_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state);
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
@@ -139,8 +134,6 @@ void nf_conntrack_icmpv6_init_net(struct net *net);
/* Existing built-in generic protocol */
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
-#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
-
const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto);
/* Generic netlink helpers */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 9fdaba911de6..3a66d4abb6d6 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -14,6 +14,7 @@
struct nf_ct_timeout {
__u16 l3num;
const struct nf_conntrack_l4proto *l4proto;
+ struct rcu_head rcu;
char data[];
};
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 45eb26b2e95b..d17035d14d96 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -23,7 +23,6 @@ struct nf_queue_entry {
struct nf_hook_state state;
bool nf_ct_is_unconfirmed;
u16 size; /* sizeof(entry) + saved route keys */
- u16 queue_num;
/* extra space to store route keys */
};
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index ec8a8ec9c0aa..2c0173d9309c 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -31,7 +31,9 @@ struct nft_pktinfo {
const struct nf_hook_state *state;
u8 flags;
u8 tprot;
+ __be16 ethertype;
u16 fragoff;
+ u16 nhoff;
u16 thoff;
u16 inneroff;
};
@@ -83,6 +85,8 @@ static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
{
pkt->flags = 0;
pkt->tprot = 0;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = 0;
pkt->thoff = 0;
pkt->fragoff = 0;
}
@@ -122,17 +126,6 @@ struct nft_regs {
};
};
-struct nft_regs_track {
- struct {
- const struct nft_expr *selector;
- const struct nft_expr *bitwise;
- u8 num_reg;
- } regs[NFT_REG32_NUM];
-
- const struct nft_expr *cur;
- const struct nft_expr *last;
-};
-
/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
*
* Note, when using concatenations, register allocation happens at 32-bit
@@ -425,8 +418,6 @@ int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src, gfp_t gfp);
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
const struct nft_expr *expr, bool reset);
-bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
- const struct nft_expr *expr);
struct nft_set_ext;
@@ -941,7 +932,6 @@ struct nft_offload_ctx;
* @destroy_clone: destruction clone function
* @dump: function to dump parameters
* @validate: validate expression, called during loop detection
- * @reduce: reduce expression
* @gc: garbage collection expression
* @offload: hardware offload expression
* @offload_action: function to report true/false to allocate one slot or not in the flow
@@ -975,8 +965,6 @@ struct nft_expr_ops {
bool reset);
int (*validate)(const struct nft_ctx *ctx,
const struct nft_expr *expr);
- bool (*reduce)(struct nft_regs_track *track,
- const struct nft_expr *expr);
bool (*gc)(struct net *net,
const struct nft_expr *expr);
int (*offload)(struct nft_offload_ctx *ctx,
@@ -1959,20 +1947,4 @@ static inline u64 nft_net_tstamp(const struct net *net)
return nft_pernet(net)->tstamp;
}
-#define __NFT_REDUCE_READONLY 1UL
-#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
-
-void nft_reg_track_update(struct nft_regs_track *track,
- const struct nft_expr *expr, u8 dreg, u8 len);
-void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
-void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg);
-
-static inline bool nft_reg_track_cmp(struct nft_regs_track *track,
- const struct nft_expr *expr, u8 dreg)
-{
- return track->regs[dreg].selector &&
- track->regs[dreg].selector->ops == expr->ops &&
- track->regs[dreg].num_reg == 0;
-}
-
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index fcf967286e37..e715405a73cb 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -12,16 +12,19 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
ip = ip_hdr(pkt->skb);
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = ip->protocol;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = 0;
pkt->thoff = ip_hdrlen(pkt->skb);
pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
-static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ int nhoff)
{
struct iphdr *iph, _iph;
u32 len, thoff, skb_len;
- iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb) + nhoff,
sizeof(*iph), &_iph);
if (!iph)
return -1;
@@ -31,7 +34,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
len = iph_totlen(pkt->skb, iph);
thoff = iph->ihl * 4;
- skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb) - nhoff;
if (skb_len < len)
return -1;
@@ -42,7 +45,9 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
- pkt->thoff = skb_network_offset(pkt->skb) + thoff;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = nhoff;
+ pkt->thoff = skb_network_offset(pkt->skb) + nhoff + thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
@@ -50,7 +55,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv4_validate(pkt) < 0)
+ if (__nft_set_pktinfo_ipv4_validate(pkt, 0) < 0)
nft_set_pktinfo_unspec(pkt);
}
@@ -78,6 +83,8 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
}
pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = 0;
pkt->tprot = iph->protocol;
pkt->thoff = thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index c53ac00bb974..d7b8c559b795 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -20,21 +20,23 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = 0;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
}
-static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, int nhoff)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
struct ipv6hdr *ip6h, _ip6h;
- unsigned int thoff = 0;
+ unsigned int thoff = nhoff;
unsigned short frag_off;
u32 pkt_len, skb_len;
int protohdr;
- ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb) + nhoff,
sizeof(*ip6h), &_ip6h);
if (!ip6h)
return -1;
@@ -43,7 +45,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
return -1;
pkt_len = ipv6_payload_len(pkt->skb, ip6h);
- skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb) - nhoff;
if (pkt_len + sizeof(*ip6h) > skb_len)
return -1;
@@ -53,6 +55,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = nhoff;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
@@ -64,7 +68,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
{
- if (__nft_set_pktinfo_ipv6_validate(pkt) < 0)
+ if (__nft_set_pktinfo_ipv6_validate(pkt, 0) < 0)
nft_set_pktinfo_unspec(pkt);
}
@@ -99,6 +103,8 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
+ pkt->ethertype = pkt->skb->protocol;
+ pkt->nhoff = 0;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 3568b6a2f5f0..14c427891ee6 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -67,6 +67,16 @@ struct nft_flow_rule {
struct flow_rule *rule;
};
+static inline struct flow_action_entry *
+nft_flow_action_entry_next(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow)
+{
+ if (unlikely(ctx->num_actions >= flow->rule->action.num_entries))
+ return NULL;
+
+ return &flow->rule->action.entries[ctx->num_actions++];
+}
+
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
enum flow_dissector_key_id addr_type);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 7370fba844ef..e0422456f27b 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -66,6 +66,4 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
void nft_fib_store_result(void *reg, const struct nft_fib *priv,
const struct net_device *dev);
-bool nft_fib_reduce(struct nft_regs_track *track,
- const struct nft_expr *expr);
#endif
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index d602263590fe..f74e63290603 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -43,9 +43,6 @@ void nft_meta_set_destroy(const struct nft_ctx *ctx,
int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr);
-bool nft_meta_get_reduce(struct nft_regs_track *track,
- const struct nft_expr *expr);
-
struct nft_inner_tun_ctx;
void nft_meta_inner_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt,
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 1a8356ca4b78..546d10586576 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -2265,6 +2265,25 @@ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
}
/**
+ * nla_nest_end_safe - Validate and finalize nesting of attributes
+ * @skb: socket buffer the attributes are stored in
+ * @start: container attribute
+ *
+ * Corrects the container attribute header to include all appended
+ * attributes.
+ *
+ * Returns: the total data length of the skb, or -EMSGSIZE if the
+ * nested attribute length exceeds U16_MAX.
+ */
+static inline int nla_nest_end_safe(struct sk_buff *skb, struct nlattr *start)
+{
+ if (skb_tail_pointer(skb) - (unsigned char *)start > U16_MAX)
+ return -EMSGSIZE;
+
+ return nla_nest_end(skb, start);
+}
+
+/**
* nla_nest_cancel - Cancel nesting of attributes
* @skb: socket buffer the message is stored in
* @start: container attribute
diff --git a/include/net/netmem.h b/include/net/netmem.h
index a96b3e5e5574..507b74c9f52d 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -93,27 +93,21 @@ enum net_iov_type {
* supported.
*/
struct net_iov {
- union {
- struct netmem_desc desc;
-
- /* XXX: The following part should be removed once all
- * the references to them are converted so as to be
- * accessed via netmem_desc e.g. niov->desc.pp instead
- * of niov->pp.
- */
- struct {
- unsigned long _flags;
- unsigned long pp_magic;
- struct page_pool *pp;
- unsigned long _pp_mapping_pad;
- unsigned long dma_addr;
- atomic_long_t pp_ref_count;
- };
- };
- struct net_iov_area *owner;
+ struct netmem_desc desc;
+ unsigned int page_type;
enum net_iov_type type;
+ struct net_iov_area *owner;
};
+/* Make sure 'the offset of page_type in struct page == the offset of
+ * type in struct net_iov'.
+ */
+#define NET_IOV_ASSERT_OFFSET(pg, iov) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(page_type, page_type);
+#undef NET_IOV_ASSERT_OFFSET
+
struct net_iov_area {
/* Array of net_iovs for this area. */
struct net_iov *niovs;
@@ -123,26 +117,6 @@ struct net_iov_area {
unsigned long base_virtual;
};
-/* net_iov is union'ed with struct netmem_desc mirroring struct page, so
- * the page_pool can access these fields without worrying whether the
- * underlying fields are accessed via netmem_desc or directly via
- * net_iov, until all the references to them are converted so as to be
- * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
- *
- * The non-net stack fields of struct page are private to the mm stack
- * and must never be mirrored to net_iov.
- */
-#define NET_IOV_ASSERT_OFFSET(desc, iov) \
- static_assert(offsetof(struct netmem_desc, desc) == \
- offsetof(struct net_iov, iov))
-NET_IOV_ASSERT_OFFSET(_flags, _flags);
-NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
-NET_IOV_ASSERT_OFFSET(pp, pp);
-NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
-NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
-NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
-#undef NET_IOV_ASSERT_OFFSET
-
static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
{
return niov->owner;
@@ -256,7 +230,7 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
*/
#define pp_page_to_nmdesc(p) \
({ \
- DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \
+ DEBUG_NET_WARN_ON_ONCE(!PageNetpp(p)); \
__pp_page_to_nmdesc(p); \
})
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 8e971c7bf164..80ccd4dda8e0 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -74,6 +74,7 @@ struct netns_ipv4 {
/* TXRX readonly hotpath cache lines */
__cacheline_group_begin(netns_ipv4_read_txrx);
+ u8 sysctl_tcp_shrink_window;
__cacheline_group_end(netns_ipv4_read_txrx);
/* RX readonly hotpath cache line */
@@ -122,7 +123,6 @@ struct netns_ipv4 {
#endif
bool fib_has_custom_local_routes;
bool fib_offload_disabled;
- u8 sysctl_tcp_shrink_window;
#ifdef CONFIG_IP_ROUTE_CLASSID
atomic_t fib_num_tclassid_users;
#endif
@@ -166,6 +166,7 @@ struct netns_ipv4 {
u8 sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
u8 sysctl_ip_dynaddr;
+ u32 sysctl_ip_local_port_step_width;
#ifdef CONFIG_NET_L3_MASTER_DEV
u8 sysctl_raw_l3mdev_accept;
#endif
@@ -279,6 +280,9 @@ struct netns_ipv4 {
struct list_head mr_tables;
struct fib_rules_ops *mr_rules_ops;
#endif
+ struct fib_notifier_ops *ipmr_notifier_ops;
+ atomic_t ipmr_seq;
+ struct mutex mfc_mutex;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct sysctl_fib_multipath_hash_seed sysctl_fib_multipath_hash_seed;
@@ -290,9 +294,6 @@ struct netns_ipv4 {
struct fib_notifier_ops *notifier_ops;
unsigned int fib_seq; /* writes protected by rtnl_mutex */
- struct fib_notifier_ops *ipmr_notifier_ops;
- unsigned int ipmr_seq; /* protected by rtnl_mutex */
-
atomic_t rt_genid;
siphash_key_t ip_id_key;
struct hlist_head *inet_addr_lst;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 34bdb1308e8f..499e4288170f 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -118,7 +118,7 @@ struct netns_ipv6 {
struct seg6_pernet_data *seg6_data;
struct fib_notifier_ops *notifier_ops;
struct fib_notifier_ops *ip6mr_notifier_ops;
- unsigned int ipmr_seq; /* protected by rtnl_mutex */
+ atomic_t ipmr_seq;
struct {
struct hlist_head head;
spinlock_t lock;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 7e373664b1e7..dce05f8e6a33 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -28,11 +28,6 @@ struct netns_mib {
DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics);
#endif
- DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
-#if IS_ENABLED(CONFIG_IPV6)
- DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
-#endif
-
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index 6682e51513ef..2073cbac2afb 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -17,6 +17,7 @@ struct netns_mpls {
size_t platform_labels;
struct mpls_route __rcu * __rcu *platform_label;
struct mutex platform_mutex;
+ seqcount_mutex_t platform_label_seq;
struct ctl_table_header *ctl;
};
diff --git a/include/net/netns/vsock.h b/include/net/netns/vsock.h
index dc8cbe45f406..7f84aad92f57 100644
--- a/include/net/netns/vsock.h
+++ b/include/net/netns/vsock.h
@@ -20,5 +20,7 @@ struct netns_vsock {
/* 0 = unlocked, 1 = locked to global, 2 = locked to local */
int child_ns_mode_locked;
+
+ int g2h_fallback;
};
#endif /* __NET_NET_NAMESPACE_VSOCK_H */
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
index ada4f968960a..255ce4cfd975 100644
--- a/include/net/page_pool/memory_provider.h
+++ b/include/net/page_pool/memory_provider.h
@@ -23,14 +23,10 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
void net_mp_niov_clear_page_pool(struct net_iov *niov);
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *p);
-int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack);
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *old_p);
-void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
+void netif_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *old_p);
/**
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index cdd95477af7a..03da138722f5 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -44,6 +44,8 @@
* use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
* ring is usually refilled and the max consumed elements will be 64,
* thus a natural max size of objects needed in the cache.
+ * The refill watermark is set to 64 for 4KB pages,
+ * and scales to balance its size in bytes across page sizes.
*
* Keeping room for more objects, is due to XDP_DROP use-case. As
* XDP_DROP allows the opportunity to recycle objects directly into
@@ -51,8 +53,15 @@
* cache is already full (or partly full) then the XDP_DROP recycles
* would have to take a slower code path.
*/
-#define PP_ALLOC_CACHE_SIZE 128
+#if PAGE_SIZE >= SZ_64K
+#define PP_ALLOC_CACHE_REFILL 4
+#elif PAGE_SIZE >= SZ_16K
+#define PP_ALLOC_CACHE_REFILL 16
+#else
#define PP_ALLOC_CACHE_REFILL 64
+#endif
+
+#define PP_ALLOC_CACHE_SIZE (PP_ALLOC_CACHE_REFILL * 2)
struct pp_alloc_cache {
u32 count;
netmem_ref cache[PP_ALLOC_CACHE_SIZE];
diff --git a/include/net/ping.h b/include/net/ping.h
index 05bfd594a64c..bcbdb5a136e3 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -20,8 +20,7 @@
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
struct pingv6_ops {
- int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
- int *addr_len);
+ int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len);
void (*ip6_datagram_recv_common_ctl)(struct sock *sk,
struct msghdr *msg,
struct sk_buff *skb);
@@ -64,7 +63,7 @@ int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int flags, int *addr_len);
+ int flags);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/rps-types.h b/include/net/rps-types.h
new file mode 100644
index 000000000000..6b90a66866c1
--- /dev/null
+++ b/include/net/rps-types.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_RPS_TYPES_H
+#define _NET_RPS_TYPES_H
+
+/* Define a rps_tag_ptr:
+ * Low order 5 bits are used to store the ilog2(size) of an RPS table.
+ */
+typedef unsigned long rps_tag_ptr;
+
+static inline u8 rps_tag_to_log(rps_tag_ptr tag_ptr)
+{
+ return tag_ptr & 31U;
+}
+
+static inline u32 rps_tag_to_mask(rps_tag_ptr tag_ptr)
+{
+ return (1U << rps_tag_to_log(tag_ptr)) - 1;
+}
+
+static inline void *rps_tag_to_table(rps_tag_ptr tag_ptr)
+{
+ return (void *)(tag_ptr & ~31UL);
+}
+#endif /* _NET_RPS_TYPES_H */
diff --git a/include/net/rps.h b/include/net/rps.h
index f1794cd2e7fb..e33c6a2fa8bb 100644
--- a/include/net/rps.h
+++ b/include/net/rps.h
@@ -8,6 +8,7 @@
#include <net/hotdata.h>
#ifdef CONFIG_RPS
+#include <net/rps-types.h>
extern struct static_key_false rps_needed;
extern struct static_key_false rfs_needed;
@@ -39,17 +40,6 @@ struct rps_dev_flow {
#define RPS_NO_FILTER 0xffff
/*
- * The rps_dev_flow_table structure contains a table of flow mappings.
- */
-struct rps_dev_flow_table {
- u8 log;
- struct rcu_head rcu;
- struct rps_dev_flow flows[];
-};
-#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- ((_num) * sizeof(struct rps_dev_flow)))
-
-/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
* Each entry is a 32bit value. Upper part is the high-order bits
@@ -60,41 +50,38 @@ struct rps_dev_flow_table {
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
- struct rcu_head rcu;
- u32 mask;
-
- u32 ents[] ____cacheline_aligned_in_smp;
+ u32 ent;
};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
#define RPS_NO_CPU 0xffff
-static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
+static inline void rps_record_sock_flow(rps_tag_ptr tag_ptr, u32 hash)
{
- unsigned int index = hash & table->mask;
+ unsigned int index = hash & rps_tag_to_mask(tag_ptr);
u32 val = hash & ~net_hotdata.rps_cpu_mask;
+ struct rps_sock_flow_table *table;
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
+ table = rps_tag_to_table(tag_ptr);
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in get_rps_cpu().
*/
- if (READ_ONCE(table->ents[index]) != val)
- WRITE_ONCE(table->ents[index], val);
+ if (READ_ONCE(table[index].ent) != val)
+ WRITE_ONCE(table[index].ent, val);
}
static inline void _sock_rps_record_flow_hash(__u32 hash)
{
- struct rps_sock_flow_table *sock_flow_table;
+ rps_tag_ptr tag_ptr;
if (!hash)
return;
rcu_read_lock();
- sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
- if (sock_flow_table)
- rps_record_sock_flow(sock_flow_table, hash);
+ tag_ptr = READ_ONCE(net_hotdata.rps_sock_flow_table);
+ if (tag_ptr)
+ rps_record_sock_flow(tag_ptr, hash);
rcu_read_unlock();
}
@@ -121,6 +108,7 @@ static inline void _sock_rps_record_flow(const struct sock *sk)
static inline void _sock_rps_delete_flow(const struct sock *sk)
{
struct rps_sock_flow_table *table;
+ rps_tag_ptr tag_ptr;
u32 hash, index;
hash = READ_ONCE(sk->sk_rxhash);
@@ -128,11 +116,12 @@ static inline void _sock_rps_delete_flow(const struct sock *sk)
return;
rcu_read_lock();
- table = rcu_dereference(net_hotdata.rps_sock_flow_table);
- if (table) {
- index = hash & table->mask;
- if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
- WRITE_ONCE(table->ents[index], RPS_NO_CPU);
+ tag_ptr = READ_ONCE(net_hotdata.rps_sock_flow_table);
+ if (tag_ptr) {
+ index = hash & rps_tag_to_mask(tag_ptr);
+ table = rps_tag_to_table(tag_ptr);
+ if (READ_ONCE(table[index].ent) != RPS_NO_CPU)
+ WRITE_ONCE(table[index].ent, RPS_NO_CPU);
}
rcu_read_unlock();
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c3d657359a3d..11159a50d6a1 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -20,12 +20,15 @@
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
#include <linux/xarray.h>
+#include <net/dropreason-qdisc.h>
struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;
struct bpf_flow_keys;
+struct Qdisc;
+struct netdev_queue;
struct qdisc_rate_table {
struct tc_ratespec rate;
@@ -707,8 +710,8 @@ void dev_qdisc_change_real_num_tx(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
-void dev_deactivate(struct net_device *dev);
-void dev_deactivate_many(struct list_head *head);
+void dev_deactivate(struct net_device *dev, bool reset_needed);
+void dev_deactivate_many(struct list_head *head, bool reset_needed);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
@@ -1144,38 +1147,62 @@ static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
return cb;
}
+/* TC classifier accessors - use enum skb_drop_reason */
static inline enum skb_drop_reason
tcf_get_drop_reason(const struct sk_buff *skb)
{
- return tc_skb_cb(skb)->drop_reason;
+ return (enum skb_drop_reason)tc_skb_cb(skb)->drop_reason;
}
static inline void tcf_set_drop_reason(const struct sk_buff *skb,
enum skb_drop_reason reason)
{
- tc_skb_cb(skb)->drop_reason = reason;
+ tc_skb_cb(skb)->drop_reason = (enum qdisc_drop_reason)reason;
}
-static inline void tcf_kfree_skb_list(struct sk_buff *skb)
+/* Qdisc accessors - use enum qdisc_drop_reason */
+static inline enum qdisc_drop_reason
+tcf_get_qdisc_drop_reason(const struct sk_buff *skb)
{
- while (unlikely(skb)) {
- struct sk_buff *next = skb->next;
+ return tc_skb_cb(skb)->drop_reason;
+}
- prefetch(next);
- kfree_skb_reason(skb, tcf_get_drop_reason(skb));
- skb = next;
- }
+static inline void tcf_set_qdisc_drop_reason(const struct sk_buff *skb,
+ enum qdisc_drop_reason reason)
+{
+ tc_skb_cb(skb)->drop_reason = reason;
+}
+
+void __tcf_kfree_skb_list(struct sk_buff *skb, struct Qdisc *q,
+ struct netdev_queue *txq, struct net_device *dev);
+
+static inline void tcf_kfree_skb_list(struct sk_buff *skb, struct Qdisc *q,
+ struct netdev_queue *txq,
+ struct net_device *dev)
+{
+ if (unlikely(skb))
+ __tcf_kfree_skb_list(skb, q, txq, dev);
}
static inline void qdisc_dequeue_drop(struct Qdisc *q, struct sk_buff *skb,
- enum skb_drop_reason reason)
+ enum qdisc_drop_reason reason)
{
+ struct Qdisc *root;
+
DEBUG_NET_WARN_ON_ONCE(!(q->flags & TCQ_F_DEQUEUE_DROPS));
DEBUG_NET_WARN_ON_ONCE(q->flags & TCQ_F_NOLOCK);
- tcf_set_drop_reason(skb, reason);
- skb->next = q->to_free;
- q->to_free = skb;
+ rcu_read_lock();
+ root = qdisc_root_sleeping(q);
+
+ if (root->flags & TCQ_F_DEQUEUE_DROPS) {
+ tcf_set_qdisc_drop_reason(skb, reason);
+ skb->next = root->to_free;
+ root->to_free = skb;
+ } else {
+ kfree_skb_reason(skb, (enum skb_drop_reason)reason);
+ }
+ rcu_read_unlock();
}
/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -1350,9 +1377,9 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
static inline int qdisc_drop_reason(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free,
- enum skb_drop_reason reason)
+ enum qdisc_drop_reason reason)
{
- tcf_set_drop_reason(skb, reason);
+ tcf_set_qdisc_drop_reason(skb, reason);
return qdisc_drop(skb, sch, to_free);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 6c9a83016e95..dccd3738c368 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -81,8 +81,13 @@
* mini-semaphore synchronizes multiple users amongst themselves.
*/
typedef struct {
- spinlock_t slock;
- int owned;
+ union {
+ struct slock_owned {
+ int owned;
+ spinlock_t slock;
+ };
+ long combined;
+ };
wait_queue_head_t wq;
/*
* We express the mutex-alike socket_lock semantics
@@ -121,14 +126,14 @@ typedef __u64 __bitwise __addrpair;
* @skc_bypass_prot_mem: bypass the per-protocol memory accounting for skb
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
- * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
+ * @skc_portaddr_node: second hash linkage for UDP
* @skc_prot: protocol handlers inside a network family
* @skc_net: reference to the network namespace of this socket
* @skc_v6_daddr: IPV6 destination address
* @skc_v6_rcv_saddr: IPV6 source address
* @skc_cookie: socket's cookie value
* @skc_node: main hash linkage for various protocol lookup tables
- * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
+ * @skc_nulls_node: main hash linkage for TCP
* @skc_tx_queue_mapping: tx queue number for this connection
* @skc_rx_queue_mapping: rx queue number for this connection
* @skc_flags: place holder for sk_flags
@@ -537,7 +542,7 @@ struct sock {
rwlock_t sk_callback_lock;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
- unsigned long sk_ino;
+ u64 sk_ino;
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
@@ -1316,7 +1321,7 @@ struct proto {
int (*sendmsg)(struct sock *sk, struct msghdr *msg,
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
- size_t len, int flags, int *addr_len);
+ size_t len, int flags);
void (*splice_eof)(struct socket *sock);
int (*bind)(struct sock *sk,
struct sockaddr_unsized *addr, int addr_len);
@@ -1387,7 +1392,6 @@ struct proto {
union {
struct inet_hashinfo *hashinfo;
- struct udp_table *udp_table;
struct raw_hashinfo *raw_hash;
struct smc_hashinfo *smc_hash;
} h;
@@ -1709,7 +1713,6 @@ static inline void lock_sock(struct sock *sk)
lock_sock_nested(sk, 0);
}
-void __lock_sock(struct sock *sk);
void __release_sock(struct sock *sk);
void release_sock(struct sock *sk);
@@ -2140,7 +2143,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
-static inline unsigned long sock_i_ino(const struct sock *sk)
+static inline u64 sock_i_ino(const struct sock *sk)
{
/* Paired with WRITE_ONCE() in sock_graft() and sock_orphan() */
return READ_ONCE(sk->sk_ino);
@@ -2499,12 +2502,23 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason *reason);
+enum skb_drop_reason
+sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb);
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- return sock_queue_rcv_skb_reason(sk, skb, NULL);
+ enum skb_drop_reason drop_reason = sock_queue_rcv_skb_reason(sk, skb);
+
+ switch (drop_reason) {
+ case SKB_DROP_REASON_SOCKET_RCVBUFF:
+ return -ENOMEM;
+ case SKB_DROP_REASON_PROTO_MEM:
+ return -ENOBUFS;
+ case 0:
+ return 0;
+ default:
+ return -EPERM;
+ }
}
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 8346b0d29542..ee500706496b 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -15,6 +15,7 @@
#define SWITCHDEV_F_NO_RECURSE BIT(0)
#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
#define SWITCHDEV_F_DEFER BIT(2)
+#define SWITCHDEV_F_NO_FOREIGN BIT(3)
enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_UNDEFINED,
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
index ffe58a02537c..4ebb053bb0dd 100644
--- a/include/net/tc_wrapper.h
+++ b/include/net/tc_wrapper.h
@@ -12,7 +12,8 @@
#define TC_INDIRECT_SCOPE
-extern struct static_key_false tc_skip_wrapper;
+extern struct static_key_false tc_skip_wrapper_act;
+extern struct static_key_false tc_skip_wrapper_cls;
/* TC Actions */
#ifdef CONFIG_NET_CLS_ACT
@@ -46,7 +47,7 @@ TC_INDIRECT_ACTION_DECLARE(tunnel_key_act);
static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
- if (static_branch_likely(&tc_skip_wrapper))
+ if (static_branch_likely(&tc_skip_wrapper_act))
goto skip;
#if IS_BUILTIN(CONFIG_NET_ACT_GACT)
@@ -153,7 +154,7 @@ TC_INDIRECT_FILTER_DECLARE(u32_classify);
static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- if (static_branch_likely(&tc_skip_wrapper))
+ if (static_branch_likely(&tc_skip_wrapper_cls))
goto skip;
#if IS_BUILTIN(CONFIG_NET_CLS_BPF)
@@ -202,8 +203,44 @@ skip:
static inline void tc_wrapper_init(void)
{
#ifdef CONFIG_X86
- if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE))
- static_branch_enable(&tc_skip_wrapper);
+ int cnt_cls = IS_BUILTIN(CONFIG_NET_CLS_BPF) +
+ IS_BUILTIN(CONFIG_NET_CLS_U32) +
+ IS_BUILTIN(CONFIG_NET_CLS_FLOWER) +
+ IS_BUILTIN(CONFIG_NET_CLS_FW) +
+ IS_BUILTIN(CONFIG_NET_CLS_MATCHALL) +
+ IS_BUILTIN(CONFIG_NET_CLS_BASIC) +
+ IS_BUILTIN(CONFIG_NET_CLS_CGROUP) +
+ IS_BUILTIN(CONFIG_NET_CLS_FLOW) +
+ IS_BUILTIN(CONFIG_NET_CLS_ROUTE4);
+
+ int cnt_act = IS_BUILTIN(CONFIG_NET_ACT_GACT) +
+ IS_BUILTIN(CONFIG_NET_ACT_MIRRED) +
+ IS_BUILTIN(CONFIG_NET_ACT_PEDIT) +
+ IS_BUILTIN(CONFIG_NET_ACT_SKBEDIT) +
+ IS_BUILTIN(CONFIG_NET_ACT_SKBMOD) +
+ IS_BUILTIN(CONFIG_NET_ACT_POLICE) +
+ IS_BUILTIN(CONFIG_NET_ACT_BPF) +
+ IS_BUILTIN(CONFIG_NET_ACT_CONNMARK) +
+ IS_BUILTIN(CONFIG_NET_ACT_CSUM) +
+ IS_BUILTIN(CONFIG_NET_ACT_CT) +
+ IS_BUILTIN(CONFIG_NET_ACT_CTINFO) +
+ IS_BUILTIN(CONFIG_NET_ACT_GATE) +
+ IS_BUILTIN(CONFIG_NET_ACT_MPLS) +
+ IS_BUILTIN(CONFIG_NET_ACT_NAT) +
+ IS_BUILTIN(CONFIG_NET_ACT_TUNNEL_KEY) +
+ IS_BUILTIN(CONFIG_NET_ACT_VLAN) +
+ IS_BUILTIN(CONFIG_NET_ACT_IFE) +
+ IS_BUILTIN(CONFIG_NET_ACT_SIMP) +
+ IS_BUILTIN(CONFIG_NET_ACT_SAMPLE);
+
+ if (cpu_feature_enabled(X86_FEATURE_RETPOLINE))
+ return;
+
+ if (cnt_cls > 1)
+ static_branch_enable(&tc_skip_wrapper_cls);
+
+ if (cnt_act > 1)
+ static_branch_enable(&tc_skip_wrapper_act);
#endif
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 978eea2d5df0..dfa52ceefd23 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -363,7 +363,6 @@ int tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
-int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
void tcp_remove_empty_skb(struct sock *sk);
@@ -376,7 +375,21 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
int tcp_wmem_schedule(struct sock *sk, int copy);
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
int size_goal);
+
void tcp_release_cb(struct sock *sk);
+
+static inline bool tcp_release_cb_cond(struct sock *sk)
+{
+#ifdef CONFIG_INET
+ if (likely(sk->sk_prot->release_cb == tcp_release_cb)) {
+ if (unlikely(smp_load_acquire(&sk->sk_tsq_flags) & TCP_DEFERRED_ALL))
+ tcp_release_cb(sk);
+ return true;
+ }
+#endif
+ return false;
+}
+
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
@@ -501,11 +514,19 @@ void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int flags, int *addr_len);
+ int flags);
int tcp_set_rcvlowat(struct sock *sk, int val);
+void tcp_set_rcvbuf(struct sock *sk, int val);
int tcp_set_window_clamp(struct sock *sk, int val);
-void tcp_update_recv_tstamps(struct sk_buff *skb,
- struct scm_timestamping_internal *tss);
+
+static inline void
+tcp_update_recv_tstamps(struct sk_buff *skb,
+ struct scm_timestamping_internal *tss)
+{
+ tss->ts[0] = skb->tstamp;
+ tss->ts[2] = skb_hwtstamps(skb)->hwtstamp;
+}
+
void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
struct scm_timestamping_internal *tss);
void tcp_data_ready(struct sock *sk);
@@ -532,7 +553,6 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
* TCP v4 functions exported for the inet6 API
*/
-void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
void tcp_req_err(struct sock *sk, u32 seq, bool abort);
void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
@@ -915,6 +935,28 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp)
return (u32) win;
}
+/* Compute the maximum receive window we ever advertised.
+ * Rcv_nxt can be after the window if our peer push more data
+ * than the offered window.
+ */
+static inline u32 tcp_max_receive_window(const struct tcp_sock *tp)
+{
+ s32 win = tp->rcv_mwnd_seq - tp->rcv_nxt;
+
+ if (win < 0)
+ win = 0;
+ return (u32) win;
+}
+
+/* Check if we need to update the maximum receive window sequence number */
+static inline void tcp_update_max_rcv_wnd_seq(struct tcp_sock *tp)
+{
+ u32 wre = tp->rcv_wup + tp->rcv_wnd;
+
+ if (after(wre, tp->rcv_mwnd_seq))
+ tp->rcv_mwnd_seq = wre;
+}
+
/* Choose a new window, without checks for shrinking, and without
* scaling applied to the result. The caller does these things
* if necessary. This is a "raw" window selection.
@@ -1135,9 +1177,7 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
extern const struct inet_connection_sock_af_ops ipv6_specific;
-INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
-void tcp_v6_early_demux(struct sk_buff *skb);
#endif
@@ -1302,6 +1342,9 @@ struct tcp_congestion_ops {
/* call when cwnd event occurs (optional) */
void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+ /* call when CA_EVENT_TX_START cwnd event occurs (optional) */
+ void (*cwnd_event_tx_start)(struct sock *sk);
+
/* call when ack arrives (optional) */
void (*in_ack_event)(struct sock *sk, u32 flags);
@@ -1401,6 +1444,11 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ if (event == CA_EVENT_TX_START) {
+ if (icsk->icsk_ca_ops->cwnd_event_tx_start)
+ icsk->icsk_ca_ops->cwnd_event_tx_start(sk);
+ return;
+ }
if (icsk->icsk_ca_ops->cwnd_event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
@@ -1633,15 +1681,14 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
__skb_checksum_complete(skb);
}
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason *reason);
+enum skb_drop_reason tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
-static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason *reason)
+static inline enum skb_drop_reason
+tcp_filter(struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = (const struct tcphdr *)skb->data;
- return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason);
+ return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th));
}
void tcp_set_state(struct sock *sk, int state);
@@ -2156,7 +2203,30 @@ enum tcp_chrono {
__TCP_CHRONO_MAX,
};
-void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
+static inline void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
+{
+ const u32 now = tcp_jiffies32;
+ enum tcp_chrono old = tp->chrono_type;
+
+ if (old > TCP_CHRONO_UNSPEC)
+ tp->chrono_stat[old - 1] += now - tp->chrono_start;
+ tp->chrono_start = now;
+ tp->chrono_type = new;
+}
+
+static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* If there are multiple conditions worthy of tracking in a
+ * chronograph then the highest priority enum takes precedence
+ * over the other conditions. So that if something "more interesting"
+ * starts happening, stop the previous chrono and start a new one.
+ */
+ if (type > tp->chrono_type)
+ tcp_chrono_set(tp, type);
+}
+
void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
/* This helper is needed, because skb->tcp_tsorted_anchor uses
@@ -2385,7 +2455,15 @@ void tcp_gro_complete(struct sk_buff *skb);
static inline void tcp_gro_complete(struct sk_buff *skb) { }
#endif
-void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
+static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
+ __be32 daddr)
+{
+ struct tcphdr *th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+}
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
@@ -2999,4 +3077,18 @@ enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
const void *saddr, const void *daddr,
int family, int dif, int sdif);
+static inline int tcp_recv_should_stop(struct sock *sk)
+{
+ return sk->sk_err ||
+ sk->sk_state == TCP_CLOSE ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ signal_pending(current);
+}
+
+INDIRECT_CALLABLE_DECLARE(union tcp_seq_and_ts_off
+ tcp_v4_init_seq_and_ts_off(const struct net *net,
+ const struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(union tcp_seq_and_ts_off
+ tcp_v6_init_seq_and_ts_off(const struct net *net,
+ const struct sk_buff *skb));
#endif /* _TCP_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 1a97e3f32029..c0a421fe0c2a 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -8,7 +8,6 @@
/* IPv6 transport protocols */
extern struct proto rawv6_prot;
extern struct proto udpv6_prot;
-extern struct proto udplitev6_prot;
extern struct proto tcpv6_prot;
extern struct proto pingv6_prot;
@@ -28,8 +27,6 @@ int rawv6_init(void);
void rawv6_exit(void);
int udpv6_init(void);
void udpv6_exit(void);
-int udplitev6_init(void);
-void udplitev6_exit(void);
int tcpv6_init(void);
void tcpv6_exit(void);
diff --git a/include/net/tso.h b/include/net/tso.h
index e7e157ae0526..da82aabd1d48 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -3,6 +3,7 @@
#define _TSO_H
#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
#include <net/ip.h>
#define TSO_HEADER_SIZE 256
@@ -28,4 +29,103 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
int tso_start(struct sk_buff *skb, struct tso_t *tso);
+/**
+ * struct tso_dma_map - DMA mapping state for GSO payload
+ * @dev: device used for DMA mapping
+ * @skb: the GSO skb being mapped
+ * @hdr_len: per-segment header length
+ * @iova_state: DMA IOVA state (when IOMMU available)
+ * @iova_offset: global byte offset into IOVA range (IOVA path only)
+ * @total_len: total payload length
+ * @frag_idx: current region (-1 = linear, 0..nr_frags-1 = frag)
+ * @offset: byte offset within current region
+ * @linear_dma: DMA address of the linear payload
+ * @linear_len: length of the linear payload
+ * @nr_frags: number of frags successfully DMA-mapped
+ * @frags: per-frag DMA address and length
+ *
+ * DMA-maps the payload regions of a GSO skb (linear data + frags).
+ * Prefers the DMA IOVA API for a single contiguous mapping with one
+ * IOTLB sync; falls back to per-region dma_map_phys() otherwise.
+ */
+struct tso_dma_map {
+ struct device *dev;
+ const struct sk_buff *skb;
+ unsigned int hdr_len;
+ /* IOVA path */
+ struct dma_iova_state iova_state;
+ size_t iova_offset;
+ size_t total_len;
+ /* Fallback path if IOVA path fails */
+ int frag_idx;
+ unsigned int offset;
+ dma_addr_t linear_dma;
+ unsigned int linear_len;
+ unsigned int nr_frags;
+ struct {
+ dma_addr_t dma;
+ unsigned int len;
+ } frags[MAX_SKB_FRAGS];
+};
+
+/**
+ * struct tso_dma_map_completion_state - Completion-time cleanup state
+ * @iova_state: DMA IOVA state (when IOMMU available)
+ * @total_len: total payload length of the IOVA mapping
+ *
+ * Drivers store this on their SW ring at xmit time via
+ * tso_dma_map_completion_save(), then call tso_dma_map_complete() at
+ * completion time.
+ */
+struct tso_dma_map_completion_state {
+ struct dma_iova_state iova_state;
+ size_t total_len;
+};
+
+int tso_dma_map_init(struct tso_dma_map *map, struct device *dev,
+ const struct sk_buff *skb, unsigned int hdr_len);
+void tso_dma_map_cleanup(struct tso_dma_map *map);
+unsigned int tso_dma_map_count(struct tso_dma_map *map, unsigned int len);
+bool tso_dma_map_next(struct tso_dma_map *map, dma_addr_t *addr,
+ unsigned int *chunk_len, unsigned int *mapping_len,
+ unsigned int seg_remaining);
+
+/**
+ * tso_dma_map_completion_save - save state needed for completion-time cleanup
+ * @map: the xmit-time DMA map
+ * @cstate: driver-owned storage that persists until completion
+ *
+ * Should be called at xmit time to update the completion state and later passed
+ * to tso_dma_map_complete().
+ */
+static inline void
+tso_dma_map_completion_save(const struct tso_dma_map *map,
+ struct tso_dma_map_completion_state *cstate)
+{
+ cstate->iova_state = map->iova_state;
+ cstate->total_len = map->total_len;
+}
+
+/**
+ * tso_dma_map_complete - tear down mapping at completion time
+ * @dev: the device that owns the mapping
+ * @cstate: state saved by tso_dma_map_completion_save()
+ *
+ * Return: true if the IOVA path was used and the mapping has been
+ * destroyed; false if the fallback per-region path was used and the
+ * driver must unmap via its normal completion path.
+ */
+static inline bool
+tso_dma_map_complete(struct device *dev,
+ struct tso_dma_map_completion_state *cstate)
+{
+ if (dma_use_iova(&cstate->iova_state)) {
+ dma_iova_destroy(dev, &cstate->iova_state, cstate->total_len,
+ DMA_TO_DEVICE, 0);
+ return true;
+ }
+
+ return false;
+}
+
#endif /* _TSO_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index da68702ddf6e..8262e2b215b4 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -29,13 +29,12 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/indirect_call_wrapper.h>
+#include <linux/math.h>
/**
- * struct udp_skb_cb - UDP(-Lite) private variables
+ * struct udp_skb_cb - UDP private variables
*
* @header: private variables used by IPv4/IPv6
- * @cscov: checksum coverage length (UDP-Lite only)
- * @partial_cov: if set indicates partial csum coverage
*/
struct udp_skb_cb {
union {
@@ -44,8 +43,6 @@ struct udp_skb_cb {
struct inet6_skb_parm h6;
#endif
} header;
- __u16 cscov;
- __u8 partial_cov;
};
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
@@ -104,7 +101,7 @@ struct udp_table {
unsigned int log;
};
extern struct udp_table udp_table;
-void udp_table_init(struct udp_table *, const char *);
+
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
const struct net *net,
unsigned int num)
@@ -215,13 +212,11 @@ extern int sysctl_udp_wmem_min;
struct sk_buff;
/*
- * Generic checksumming routines for UDP(-Lite) v4 and v6
+ * Generic checksumming routines for UDP v4 and v6
*/
static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
{
- return (UDP_SKB_CB(skb)->cscov == skb->len ?
- __skb_checksum_complete(skb) :
- __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
+ return __skb_checksum_complete(skb);
}
static inline int udp_lib_checksum_complete(struct sk_buff *skb)
@@ -272,7 +267,6 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
skb->csum);
skb_pull_rcsum(skb, sizeof(struct udphdr));
- UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
}
typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
@@ -281,6 +275,10 @@ typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
void udp_v6_early_demux(struct sk_buff *skb);
INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
+int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+INDIRECT_CALLABLE_DECLARE(int udpv6_recvmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags));
+
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6);
@@ -307,7 +305,7 @@ static inline void udp_drops_inc(struct sock *sk)
numa_drop_add(&udp_sk(sk)->drop_counters, 1);
}
-/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
+/* hash routines shared between UDPv4/6 */
static inline int udp_lib_hash(struct sock *sk)
{
BUG();
@@ -376,7 +374,7 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
*/
hash ^= hash << 16;
- return htons((((u64) hash * (max - min)) >> 32) + min);
+ return htons(reciprocal_scale(hash, max - min + 1) + min);
}
static inline int udp_rqueue_get(struct sock *sk)
@@ -415,6 +413,8 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags));
void udp_splice_eof(struct socket *sock);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
@@ -422,7 +422,6 @@ int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, int *karg);
-int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
@@ -438,9 +437,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
- __be16 sport,
- __be32 daddr, __be16 dport, int dif, int sdif,
- struct udp_table *tbl, struct sk_buff *skb);
+ __be16 sport, __be32 daddr, __be16 dport,
+ int dif, int sdif, struct sk_buff *skb);
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
struct sock *udp6_lib_lookup(const struct net *net,
@@ -450,8 +448,7 @@ struct sock *udp6_lib_lookup(const struct net *net,
struct sock *__udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
- int dif, int sdif, struct udp_table *tbl,
- struct sk_buff *skb);
+ int dif, int sdif, struct sk_buff *skb);
struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
@@ -523,38 +520,28 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
}
/*
- * SNMP statistics for UDP and UDP-Lite
+ * SNMP statistics for UDP
*/
-#define UDP_INC_STATS(net, field, is_udplite) do { \
- if (unlikely(is_udplite)) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
- else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
-#define __UDP_INC_STATS(net, field, is_udplite) do { \
- if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
- else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
-
-#define __UDP6_INC_STATS(net, field, is_udplite) do { \
- if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
- else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
-} while(0)
-#define UDP6_INC_STATS(net, field, __lite) do { \
- if (unlikely(__lite)) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
- else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
-} while(0)
+#define __UDP_INC_STATS(net, field) \
+ __SNMP_INC_STATS((net)->mib.udp_statistics, field)
+#define UDP_INC_STATS(net, field) \
+ SNMP_INC_STATS((net)->mib.udp_statistics, field)
+#define __UDP6_INC_STATS(net, field) \
+ __SNMP_INC_STATS((net)->mib.udp_stats_in6, field)
+#define UDP6_INC_STATS(net, field) \
+ SNMP_INC_STATS((net)->mib.udp_stats_in6, field)
#if IS_ENABLED(CONFIG_IPV6)
-#define __UDPX_MIB(sk, ipv4) \
-({ \
- ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
- sock_net(sk)->mib.udp_statistics) : \
- (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
- sock_net(sk)->mib.udp_stats_in6); \
-})
+#define __UDPX_MIB(sk, ipv4) \
+ ({ \
+ ipv4 ? sock_net(sk)->mib.udp_statistics : \
+ sock_net(sk)->mib.udp_stats_in6; \
+ })
#else
-#define __UDPX_MIB(sk, ipv4) \
-({ \
- IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
- sock_net(sk)->mib.udp_statistics; \
-})
+#define __UDPX_MIB(sk, ipv4) \
+ ({ \
+ sock_net(sk)->mib.udp_statistics; \
+ })
#endif
#define __UDPX_INC_STATS(sk, field) \
@@ -563,7 +550,6 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
#ifdef CONFIG_PROC_FS
struct udp_seq_afinfo {
sa_family_t family;
- struct udp_table *udp_table;
};
struct udp_iter_state {
@@ -575,9 +561,6 @@ void *udp_seq_start(struct seq_file *seq, loff_t *pos);
void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
void udp_seq_stop(struct seq_file *seq, void *v);
-extern const struct seq_operations udp_seq_ops;
-extern const struct seq_operations udp6_seq_ops;
-
int udp4_proc_init(void);
void udp4_proc_exit(void);
#endif /* CONFIG_PROC_FS */
@@ -648,9 +631,6 @@ drop:
static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
{
- /* UDP-lite can't land here - no GRO */
- WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
-
/* UDP packets generated with UDP_SEGMENT and traversing:
*
* UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
@@ -664,7 +644,6 @@ static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
* a valid csum after the segmentation.
* Additionally fixup the UDP CB.
*/
- UDP_SKB_CB(skb)->cscov = skb->len;
if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
skb->csum_valid = 1;
}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index fc1fc43345b5..47c23d4a1740 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -7,7 +7,6 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
-#include <net/ipv6_stubs.h>
#endif
#define UDP_TUNNEL_PARTIAL_FEATURES NETIF_F_GSO_ENCAP_ALL
@@ -230,7 +229,7 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
#if IS_ENABLED(CONFIG_IPV6)
if (READ_ONCE(sk->sk_family) == PF_INET6)
- ipv6_stub->udpv6_encap_enable();
+ udpv6_encap_enable();
#endif
udp_encap_enable();
}
diff --git a/include/net/udplite.h b/include/net/udplite.h
deleted file mode 100644
index 786919d29f8d..000000000000
--- a/include/net/udplite.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Definitions for the UDP-Lite (RFC 3828) code.
- */
-#ifndef _UDPLITE_H
-#define _UDPLITE_H
-
-#include <net/ip6_checksum.h>
-#include <net/udp.h>
-
-/* UDP-Lite socket options */
-#define UDPLITE_SEND_CSCOV 10 /* sender partial coverage (as sent) */
-#define UDPLITE_RECV_CSCOV 11 /* receiver partial coverage (threshold ) */
-
-extern struct proto udplite_prot;
-extern struct udp_table udplite_table;
-
-/*
- * Checksum computation is all in software, hence simpler getfrag.
- */
-static __inline__ int udplite_getfrag(void *from, char *to, int offset,
- int len, int odd, struct sk_buff *skb)
-{
- struct msghdr *msg = from;
- return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
-}
-
-/*
- * Checksumming routines
- */
-static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
-{
- u16 cscov;
-
- /* In UDPv4 a zero checksum means that the transmitter generated no
- * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
- * with a zero checksum field are illegal. */
- if (uh->check == 0) {
- net_dbg_ratelimited("UDPLite: zeroed checksum field\n");
- return 1;
- }
-
- cscov = ntohs(uh->len);
-
- if (cscov == 0) /* Indicates that full coverage is required. */
- ;
- else if (cscov < 8 || cscov > skb->len) {
- /*
- * Coverage length violates RFC 3828: log and discard silently.
- */
- net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n",
- cscov, skb->len);
- return 1;
-
- } else if (cscov < skb->len) {
- UDP_SKB_CB(skb)->partial_cov = 1;
- UDP_SKB_CB(skb)->cscov = cscov;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- skb->csum_valid = 0;
- }
-
- return 0;
-}
-
-/* Fast-path computation of checksum. Socket may not be locked. */
-static inline __wsum udplite_csum(struct sk_buff *skb)
-{
- const int off = skb_transport_offset(skb);
- const struct sock *sk = skb->sk;
- int len = skb->len - off;
-
- if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
- u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
-
- if (pcslen < len) {
- if (pcslen > 0)
- len = pcslen;
- udp_hdr(skb)->len = htons(pcslen);
- }
- }
- skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
-
- return skb_checksum(skb, off, len, 0);
-}
-
-void udplite4_register(void);
-#endif /* _UDPLITE_H */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 23e8861e8b25..ebac60a3d8a1 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -14,7 +14,7 @@
#include <linux/mm.h>
#include <net/sock.h>
-#define XDP_UMEM_SG_FLAG (1 << 1)
+#define XDP_UMEM_SG_FLAG BIT(3)
struct net_device;
struct xsk_queue;
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 6b9ebae2dc95..46797645a0c2 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -41,16 +41,37 @@ static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
return XDP_PACKET_HEADROOM + pool->headroom;
}
+static inline u32 xsk_pool_get_tailroom(bool mbuf)
+{
+ return mbuf ? SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 0;
+}
+
static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
return pool->chunk_size;
}
-static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+static inline u32 __xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
}
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+{
+ u32 frame_size = __xsk_pool_get_rx_frame_size(pool);
+ struct xdp_umem *umem = pool->umem;
+ bool mbuf;
+
+ /* Reserve tailroom only for zero-copy pools that opted into
+ * multi-buffer. The reserved area is used for skb_shared_info,
+ * matching the XDP core's xdp_data_hard_end() layout.
+ */
+ mbuf = pool->dev && (umem->flags & XDP_UMEM_SG_FLAG);
+ frame_size -= xsk_pool_get_tailroom(mbuf);
+
+ return ALIGN_DOWN(frame_size, 128);
+}
+
static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
{
return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 0b1abdb99c9e..ccb3b350001f 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -174,13 +174,6 @@ static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
}
-/* Masks for xdp_umem_page flags.
- * The low 12-bits of the addr will be 0 since this is the page address, so we
- * can use them for flags.
- */
-#define XSK_NEXT_PG_CONTIG_SHIFT 0
-#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
-
static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
u64 addr, u32 len)
{
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 7511f5464c48..39765ff6f098 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -3,9 +3,9 @@
* Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
*
* Deterministic automata (DA) monitor functions, to be used together
- * with automata models in C generated by the dot2k tool.
+ * with automata models in C generated by the rvgen tool.
*
- * The dot2k tool is available at tools/verification/dot2k/
+ * The rvgen tool is available at tools/verification/rvgen/
*
* For further information, see:
* Documentation/trace/rv/monitor_synthesis.rst
@@ -19,6 +19,8 @@
#include <linux/stringify.h>
#include <linux/bug.h>
#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/hashtable.h>
/*
* Per-cpu variables require a unique name although static in some
@@ -28,6 +30,43 @@
static struct rv_monitor rv_this;
+/*
+ * Hook to allow the implementation of hybrid automata: define it with a
+ * function that takes curr_state, event and next_state and returns true if the
+ * environment constraints (e.g. timing) are satisfied, false otherwise.
+ */
+#ifndef da_monitor_event_hook
+#define da_monitor_event_hook(...) true
+#endif
+
+/*
+ * Hook to allow the implementation of hybrid automata: define it with a
+ * function that takes the da_monitor and performs further initialisation
+ * (e.g. reset set up timers).
+ */
+#ifndef da_monitor_init_hook
+#define da_monitor_init_hook(da_mon)
+#endif
+
+/*
+ * Hook to allow the implementation of hybrid automata: define it with a
+ * function that takes the da_monitor and performs further reset (e.g. reset
+ * all clocks).
+ */
+#ifndef da_monitor_reset_hook
+#define da_monitor_reset_hook(da_mon)
+#endif
+
+/*
+ * Type for the target id, default to int but can be overridden.
+ * A long type can work as hash table key (PER_OBJ) but will be downgraded to
+ * int in the event tracepoint.
+ * Unused for implicit monitors.
+ */
+#ifndef da_id_type
+#define da_id_type int
+#endif
+
static void react(enum states curr_state, enum events event)
{
rv_react(&rv_this,
@@ -42,6 +81,7 @@ static void react(enum states curr_state, enum events event)
*/
static inline void da_monitor_reset(struct da_monitor *da_mon)
{
+ da_monitor_reset_hook(da_mon);
da_mon->monitoring = 0;
da_mon->curr_state = model_get_initial_state();
}
@@ -56,6 +96,7 @@ static inline void da_monitor_start(struct da_monitor *da_mon)
{
da_mon->curr_state = model_get_initial_state();
da_mon->monitoring = 1;
+ da_monitor_init_hook(da_mon);
}
/*
@@ -97,90 +138,6 @@ static inline bool da_monitor_handling_event(struct da_monitor *da_mon)
return 1;
}
-#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
-/*
- * Event handler for implicit monitors. Implicit monitor is the one which the
- * handler does not need to specify which da_monitor to manipulate. Examples
- * of implicit monitor are the per_cpu or the global ones.
- *
- * Retry in case there is a race between getting and setting the next state,
- * warn and reset the monitor if it runs out of retries. The monitor should be
- * able to handle various orders.
- */
-
-static inline bool da_event(struct da_monitor *da_mon, enum events event)
-{
- enum states curr_state, next_state;
-
- curr_state = READ_ONCE(da_mon->curr_state);
- for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
- next_state = model_get_next_state(curr_state, event);
- if (next_state == INVALID_STATE) {
- react(curr_state, event);
- CONCATENATE(trace_error_, MONITOR_NAME)(
- model_get_state_name(curr_state),
- model_get_event_name(event));
- return false;
- }
- if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
- CONCATENATE(trace_event_, MONITOR_NAME)(
- model_get_state_name(curr_state),
- model_get_event_name(event),
- model_get_state_name(next_state),
- model_is_final_state(next_state));
- return true;
- }
- }
-
- trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
- pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
- " retries reached for event %s, resetting monitor %s",
- model_get_event_name(event), __stringify(MONITOR_NAME));
- return false;
-}
-
-#elif RV_MON_TYPE == RV_MON_PER_TASK
-/*
- * Event handler for per_task monitors.
- *
- * Retry in case there is a race between getting and setting the next state,
- * warn and reset the monitor if it runs out of retries. The monitor should be
- * able to handle various orders.
- */
-
-static inline bool da_event(struct da_monitor *da_mon, struct task_struct *tsk,
- enum events event)
-{
- enum states curr_state, next_state;
-
- curr_state = READ_ONCE(da_mon->curr_state);
- for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
- next_state = model_get_next_state(curr_state, event);
- if (next_state == INVALID_STATE) {
- react(curr_state, event);
- CONCATENATE(trace_error_, MONITOR_NAME)(tsk->pid,
- model_get_state_name(curr_state),
- model_get_event_name(event));
- return false;
- }
- if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
- CONCATENATE(trace_event_, MONITOR_NAME)(tsk->pid,
- model_get_state_name(curr_state),
- model_get_event_name(event),
- model_get_state_name(next_state),
- model_is_final_state(next_state));
- return true;
- }
- }
-
- trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
- pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
- " retries reached for event %s, resetting monitor %s",
- model_get_event_name(event), __stringify(MONITOR_NAME));
- return false;
-}
-#endif /* RV_MON_TYPE */
-
#if RV_MON_TYPE == RV_MON_GLOBAL
/*
* Functions to define, init and get a global monitor.
@@ -219,7 +176,10 @@ static inline int da_monitor_init(void)
/*
* da_monitor_destroy - destroy the monitor
*/
-static inline void da_monitor_destroy(void) { }
+static inline void da_monitor_destroy(void)
+{
+ da_monitor_reset_all();
+}
#elif RV_MON_TYPE == RV_MON_PER_CPU
/*
@@ -265,7 +225,10 @@ static inline int da_monitor_init(void)
/*
* da_monitor_destroy - destroy the monitor
*/
-static inline void da_monitor_destroy(void) { }
+static inline void da_monitor_destroy(void)
+{
+ da_monitor_reset_all();
+}
#elif RV_MON_TYPE == RV_MON_PER_TASK
/*
@@ -286,6 +249,24 @@ static inline struct da_monitor *da_get_monitor(struct task_struct *tsk)
return &tsk->rv[task_mon_slot].da_mon;
}
+/*
+ * da_get_target - return the task associated to the monitor
+ */
+static inline struct task_struct *da_get_target(struct da_monitor *da_mon)
+{
+ return container_of(da_mon, struct task_struct, rv[task_mon_slot].da_mon);
+}
+
+/*
+ * da_get_id - return the id associated to the monitor
+ *
+ * For per-task monitors, the id is the task's PID.
+ */
+static inline da_id_type da_get_id(struct da_monitor *da_mon)
+{
+ return da_get_target(da_mon)->pid;
+}
+
static void da_monitor_reset_all(void)
{
struct task_struct *g, *p;
@@ -330,120 +311,411 @@ static inline void da_monitor_destroy(void)
}
rv_put_task_monitor_slot(task_mon_slot);
task_mon_slot = RV_PER_TASK_MONITOR_INIT;
+
+ da_monitor_reset_all();
}
-#endif /* RV_MON_TYPE */
-#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+#elif RV_MON_TYPE == RV_MON_PER_OBJ
/*
- * Handle event for implicit monitor: da_get_monitor() will figure out
- * the monitor.
+ * Functions to define, init and get a per-object monitor.
*/
-static inline void __da_handle_event(struct da_monitor *da_mon,
- enum events event)
+struct da_monitor_storage {
+ da_id_type id;
+ monitor_target target;
+ union rv_task_monitor rv;
+ struct hlist_node node;
+ struct rcu_head rcu;
+};
+
+#ifndef DA_MONITOR_HT_BITS
+#define DA_MONITOR_HT_BITS 10
+#endif
+static DEFINE_HASHTABLE(da_monitor_ht, DA_MONITOR_HT_BITS);
+
+/*
+ * da_create_empty_storage - pre-allocate an empty storage
+ */
+static inline struct da_monitor_storage *da_create_empty_storage(da_id_type id)
{
- bool retval;
+ struct da_monitor_storage *mon_storage;
- retval = da_event(da_mon, event);
- if (!retval)
- da_monitor_reset(da_mon);
+ mon_storage = kmalloc_nolock(sizeof(struct da_monitor_storage),
+ __GFP_ZERO, NUMA_NO_NODE);
+ if (!mon_storage)
+ return NULL;
+
+ hash_add_rcu(da_monitor_ht, &mon_storage->node, id);
+ mon_storage->id = id;
+ return mon_storage;
}
/*
- * da_handle_event - handle an event
+ * da_create_storage - create the per-object storage
+ *
+ * The caller is responsible to synchronise writers, either with locks or
+ * implicitly. For instance, if da_create_storage is only called from a single
+ * event for target (e.g. sched_switch), it's safe to call this without locks.
*/
-static inline void da_handle_event(enum events event)
+static inline struct da_monitor *da_create_storage(da_id_type id,
+ monitor_target target,
+ struct da_monitor *da_mon)
{
- struct da_monitor *da_mon = da_get_monitor();
- bool retval;
+ struct da_monitor_storage *mon_storage;
- retval = da_monitor_handling_event(da_mon);
- if (!retval)
- return;
+ if (da_mon)
+ return da_mon;
- __da_handle_event(da_mon, event);
+ mon_storage = da_create_empty_storage(id);
+ if (!mon_storage)
+ return NULL;
+
+ mon_storage->target = target;
+ return &mon_storage->rv.da_mon;
}
/*
- * da_handle_start_event - start monitoring or handle event
+ * __da_get_mon_storage - get the monitor storage from the hash table
+ */
+static inline struct da_monitor_storage *__da_get_mon_storage(da_id_type id)
+{
+ struct da_monitor_storage *mon_storage;
+
+ lockdep_assert_in_rcu_read_lock();
+ hash_for_each_possible_rcu(da_monitor_ht, mon_storage, node, id) {
+ if (mon_storage->id == id)
+ return mon_storage;
+ }
+
+ return NULL;
+}
+
+/*
+ * da_get_monitor - return the monitor for target
+ */
+static struct da_monitor *da_get_monitor(da_id_type id, monitor_target target)
+{
+ struct da_monitor_storage *mon_storage;
+
+ mon_storage = __da_get_mon_storage(id);
+ return mon_storage ? &mon_storage->rv.da_mon : NULL;
+}
+
+/*
+ * da_get_target - return the object associated to the monitor
+ */
+static inline monitor_target da_get_target(struct da_monitor *da_mon)
+{
+ return container_of(da_mon, struct da_monitor_storage, rv.da_mon)->target;
+}
+
+/*
+ * da_get_id - return the id associated to the monitor
+ */
+static inline da_id_type da_get_id(struct da_monitor *da_mon)
+{
+ return container_of(da_mon, struct da_monitor_storage, rv.da_mon)->id;
+}
+
+/*
+ * da_create_or_get - create the per-object storage if not already there
*
- * This function is used to notify the monitor that the system is returning
- * to the initial state, so the monitor can start monitoring in the next event.
- * Thus:
+ * This needs a lookup so should be guarded by RCU, the condition is checked
+ * directly in da_create_storage()
+ */
+static inline void da_create_or_get(da_id_type id, monitor_target target)
+{
+ guard(rcu)();
+ da_create_storage(id, target, da_get_monitor(id, target));
+}
+
+/*
+ * da_fill_empty_storage - store the target in a pre-allocated storage
*
- * If the monitor already started, handle the event.
- * If the monitor did not start yet, start the monitor but skip the event.
+ * Can be used as a substitute of da_create_storage when starting a monitor in
+ * an environment where allocation is unsafe.
*/
-static inline bool da_handle_start_event(enum events event)
+static inline struct da_monitor *da_fill_empty_storage(da_id_type id,
+ monitor_target target,
+ struct da_monitor *da_mon)
{
- struct da_monitor *da_mon;
+ if (unlikely(da_mon && !da_get_target(da_mon)))
+ container_of(da_mon, struct da_monitor_storage, rv.da_mon)->target = target;
+ return da_mon;
+}
- if (!da_monitor_enabled())
- return 0;
+/*
+ * da_get_target_by_id - return the object associated to the id
+ */
+static inline monitor_target da_get_target_by_id(da_id_type id)
+{
+ struct da_monitor_storage *mon_storage;
- da_mon = da_get_monitor();
+ guard(rcu)();
+ mon_storage = __da_get_mon_storage(id);
- if (unlikely(!da_monitoring(da_mon))) {
- da_monitor_start(da_mon);
- return 0;
+ if (unlikely(!mon_storage))
+ return NULL;
+ return mon_storage->target;
+}
+
+/*
+ * da_destroy_storage - destroy the per-object storage
+ *
+ * The caller is responsible to synchronise writers, either with locks or
+ * implicitly. For instance, if da_destroy_storage is called at sched_exit and
+ * da_create_storage can never occur after that, it's safe to call this without
+ * locks.
+ * This function includes an RCU read-side critical section to synchronise
+ * against da_monitor_destroy().
+ */
+static inline void da_destroy_storage(da_id_type id)
+{
+ struct da_monitor_storage *mon_storage;
+
+ guard(rcu)();
+ mon_storage = __da_get_mon_storage(id);
+
+ if (!mon_storage)
+ return;
+ da_monitor_reset_hook(&mon_storage->rv.da_mon);
+ hash_del_rcu(&mon_storage->node);
+ kfree_rcu(mon_storage, rcu);
+}
+
+static void da_monitor_reset_all(void)
+{
+ struct da_monitor_storage *mon_storage;
+ int bkt;
+
+ rcu_read_lock();
+ hash_for_each_rcu(da_monitor_ht, bkt, mon_storage, node)
+ da_monitor_reset(&mon_storage->rv.da_mon);
+ rcu_read_unlock();
+}
+
+static inline int da_monitor_init(void)
+{
+ hash_init(da_monitor_ht);
+ return 0;
+}
+
+static inline void da_monitor_destroy(void)
+{
+ struct da_monitor_storage *mon_storage;
+ struct hlist_node *tmp;
+ int bkt;
+
+ /*
+ * This function is called after all probes are disabled, we need only
+ * worry about concurrency against old events.
+ */
+ synchronize_rcu();
+ hash_for_each_safe(da_monitor_ht, bkt, tmp, mon_storage, node) {
+ da_monitor_reset_hook(&mon_storage->rv.da_mon);
+ hash_del_rcu(&mon_storage->node);
+ kfree(mon_storage);
}
+}
- __da_handle_event(da_mon, event);
+/*
+ * Allow the per-object monitors to run allocation manually, necessary if the
+ * start condition is in a context problematic for allocation (e.g. scheduling).
+ * In such case, if the storage was pre-allocated without a target, set it now.
+ */
+#ifdef DA_SKIP_AUTO_ALLOC
+#define da_prepare_storage da_fill_empty_storage
+#else
+#define da_prepare_storage da_create_storage
+#endif /* DA_SKIP_AUTO_ALLOC */
- return 1;
+#endif /* RV_MON_TYPE */
+
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+/*
+ * Trace events for implicit monitors. Implicit monitor is the one which the
+ * handler does not need to specify which da_monitor to manipulate. Examples
+ * of implicit monitor are the per_cpu or the global ones.
+ */
+
+static inline void da_trace_event(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ char *next_state, bool is_final,
+ da_id_type id)
+{
+ CONCATENATE(trace_event_, MONITOR_NAME)(curr_state, event, next_state,
+ is_final);
+}
+
+static inline void da_trace_error(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(curr_state, event);
}
/*
- * da_handle_start_run_event - start monitoring and handle event
+ * da_get_id - unused for implicit monitors
+ */
+static inline da_id_type da_get_id(struct da_monitor *da_mon)
+{
+ return 0;
+}
+
+#elif RV_MON_TYPE == RV_MON_PER_TASK || RV_MON_TYPE == RV_MON_PER_OBJ
+/*
+ * Trace events for per_task/per_object monitors, report the target id.
+ */
+
+static inline void da_trace_event(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ char *next_state, bool is_final,
+ da_id_type id)
+{
+ CONCATENATE(trace_event_, MONITOR_NAME)(id, curr_state, event,
+ next_state, is_final);
+}
+
+static inline void da_trace_error(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(id, curr_state, event);
+}
+#endif /* RV_MON_TYPE */
+
+/*
+ * da_event - handle an event for the da_mon
*
- * This function is used to notify the monitor that the system is in the
- * initial state, so the monitor can start monitoring and handling event.
+ * This function is valid for both implicit and id monitors.
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
-static inline bool da_handle_start_run_event(enum events event)
+static inline bool da_event(struct da_monitor *da_mon, enum events event, da_id_type id)
{
- struct da_monitor *da_mon;
+ enum states curr_state, next_state;
+ curr_state = READ_ONCE(da_mon->curr_state);
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
+ next_state = model_get_next_state(curr_state, event);
+ if (next_state == INVALID_STATE) {
+ react(curr_state, event);
+ da_trace_error(da_mon, model_get_state_name(curr_state),
+ model_get_event_name(event), id);
+ return false;
+ }
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
+ if (!da_monitor_event_hook(da_mon, curr_state, event, next_state, id))
+ return false;
+ da_trace_event(da_mon, model_get_state_name(curr_state),
+ model_get_event_name(event),
+ model_get_state_name(next_state),
+ model_is_final_state(next_state), id);
+ return true;
+ }
+ }
+
+ trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
+ " retries reached for event %s, resetting monitor %s",
+ model_get_event_name(event), __stringify(MONITOR_NAME));
+ return false;
+}
+
+static inline void __da_handle_event_common(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
+ if (!da_event(da_mon, event, id))
+ da_monitor_reset(da_mon);
+}
+
+static inline void __da_handle_event(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
+ if (da_monitor_handling_event(da_mon))
+ __da_handle_event_common(da_mon, event, id);
+}
+
+static inline bool __da_handle_start_event(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
if (!da_monitor_enabled())
return 0;
+ if (unlikely(!da_monitoring(da_mon))) {
+ da_monitor_start(da_mon);
+ return 0;
+ }
+
+ __da_handle_event_common(da_mon, event, id);
- da_mon = da_get_monitor();
+ return 1;
+}
+static inline bool __da_handle_start_run_event(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
+ if (!da_monitor_enabled())
+ return 0;
if (unlikely(!da_monitoring(da_mon)))
da_monitor_start(da_mon);
- __da_handle_event(da_mon, event);
+ __da_handle_event_common(da_mon, event, id);
return 1;
}
-#elif RV_MON_TYPE == RV_MON_PER_TASK
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
/*
- * Handle event for per task.
+ * Handle event for implicit monitor: da_get_monitor() will figure out
+ * the monitor.
*/
-static inline void __da_handle_event(struct da_monitor *da_mon,
- struct task_struct *tsk, enum events event)
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(enum events event)
{
- bool retval;
+ __da_handle_event(da_get_monitor(), event, 0);
+}
- retval = da_event(da_mon, tsk, event);
- if (!retval)
- da_monitor_reset(da_mon);
+/*
+ * da_handle_start_event - start monitoring or handle event
+ *
+ * This function is used to notify the monitor that the system is returning
+ * to the initial state, so the monitor can start monitoring in the next event.
+ * Thus:
+ *
+ * If the monitor already started, handle the event.
+ * If the monitor did not start yet, start the monitor but skip the event.
+ */
+static inline bool da_handle_start_event(enum events event)
+{
+ return __da_handle_start_event(da_get_monitor(), event, 0);
}
/*
- * da_handle_event - handle an event
+ * da_handle_start_run_event - start monitoring and handle event
+ *
+ * This function is used to notify the monitor that the system is in the
+ * initial state, so the monitor can start monitoring and handling event.
*/
-static inline void da_handle_event(struct task_struct *tsk, enum events event)
+static inline bool da_handle_start_run_event(enum events event)
{
- struct da_monitor *da_mon = da_get_monitor(tsk);
- bool retval;
+ return __da_handle_start_run_event(da_get_monitor(), event, 0);
+}
- retval = da_monitor_handling_event(da_mon);
- if (!retval)
- return;
+#elif RV_MON_TYPE == RV_MON_PER_TASK
+/*
+ * Handle event for per task.
+ */
- __da_handle_event(da_mon, tsk, event);
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(struct task_struct *tsk, enum events event)
+{
+ __da_handle_event(da_get_monitor(tsk), event, tsk->pid);
}
/*
@@ -459,21 +731,60 @@ static inline void da_handle_event(struct task_struct *tsk, enum events event)
static inline bool da_handle_start_event(struct task_struct *tsk,
enum events event)
{
- struct da_monitor *da_mon;
+ return __da_handle_start_event(da_get_monitor(tsk), event, tsk->pid);
+}
- if (!da_monitor_enabled())
- return 0;
+/*
+ * da_handle_start_run_event - start monitoring and handle event
+ *
+ * This function is used to notify the monitor that the system is in the
+ * initial state, so the monitor can start monitoring and handling event.
+ */
+static inline bool da_handle_start_run_event(struct task_struct *tsk,
+ enum events event)
+{
+ return __da_handle_start_run_event(da_get_monitor(tsk), event, tsk->pid);
+}
- da_mon = da_get_monitor(tsk);
+#elif RV_MON_TYPE == RV_MON_PER_OBJ
+/*
+ * Handle event for per object.
+ */
- if (unlikely(!da_monitoring(da_mon))) {
- da_monitor_start(da_mon);
- return 0;
- }
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(da_id_type id, monitor_target target, enum events event)
+{
+ struct da_monitor *da_mon;
- __da_handle_event(da_mon, tsk, event);
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ if (likely(da_mon))
+ __da_handle_event(da_mon, event, id);
+}
- return 1;
+/*
+ * da_handle_start_event - start monitoring or handle event
+ *
+ * This function is used to notify the monitor that the system is returning
+ * to the initial state, so the monitor can start monitoring in the next event.
+ * Thus:
+ *
+ * If the monitor already started, handle the event.
+ * If the monitor did not start yet, start the monitor but skip the event.
+ */
+static inline bool da_handle_start_event(da_id_type id, monitor_target target,
+ enum events event)
+{
+ struct da_monitor *da_mon;
+
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ da_mon = da_prepare_storage(id, target, da_mon);
+ if (unlikely(!da_mon))
+ return 0;
+ return __da_handle_start_event(da_mon, event, id);
}
/*
@@ -482,22 +793,27 @@ static inline bool da_handle_start_event(struct task_struct *tsk,
* This function is used to notify the monitor that the system is in the
* initial state, so the monitor can start monitoring and handling event.
*/
-static inline bool da_handle_start_run_event(struct task_struct *tsk,
+static inline bool da_handle_start_run_event(da_id_type id, monitor_target target,
enum events event)
{
struct da_monitor *da_mon;
- if (!da_monitor_enabled())
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ da_mon = da_prepare_storage(id, target, da_mon);
+ if (unlikely(!da_mon))
return 0;
+ return __da_handle_start_run_event(da_mon, event, id);
+}
- da_mon = da_get_monitor(tsk);
-
- if (unlikely(!da_monitoring(da_mon)))
- da_monitor_start(da_mon);
-
- __da_handle_event(da_mon, tsk, event);
+static inline void da_reset(da_id_type id, monitor_target target)
+{
+ struct da_monitor *da_mon;
- return 1;
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ if (likely(da_mon))
+ da_monitor_reset(da_mon);
}
#endif /* RV_MON_TYPE */
diff --git a/include/rv/ha_monitor.h b/include/rv/ha_monitor.h
new file mode 100644
index 000000000000..d59507e8cb30
--- /dev/null
+++ b/include/rv/ha_monitor.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025-2028 Red Hat, Inc. Gabriele Monaco <gmonaco@redhat.com>
+ *
+ * Hybrid automata (HA) monitor functions, to be used together
+ * with automata models in C generated by the rvgen tool.
+ *
+ * This type of monitors extends the Deterministic automata (DA) class by
+ * adding a set of environment variables (e.g. clocks) that can be used to
+ * constraint the valid transitions.
+ *
+ * The rvgen tool is available at tools/verification/rvgen/
+ *
+ * For further information, see:
+ * Documentation/trace/rv/monitor_synthesis.rst
+ */
+
+#ifndef _RV_HA_MONITOR_H
+#define _RV_HA_MONITOR_H
+
+#include <rv/automata.h>
+
+#ifndef da_id_type
+#define da_id_type int
+#endif
+
+static inline void ha_monitor_init_env(struct da_monitor *da_mon);
+static inline void ha_monitor_reset_env(struct da_monitor *da_mon);
+static inline void ha_setup_timer(struct ha_monitor *ha_mon);
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon);
+static bool ha_monitor_handle_constraint(struct da_monitor *da_mon,
+ enum states curr_state,
+ enum events event,
+ enum states next_state,
+ da_id_type id);
+#define da_monitor_event_hook ha_monitor_handle_constraint
+#define da_monitor_init_hook ha_monitor_init_env
+#define da_monitor_reset_hook ha_monitor_reset_env
+
+#include <rv/da_monitor.h>
+#include <linux/seq_buf.h>
+
+/* This simplifies things since da_mon and ha_mon coexist in the same union */
+_Static_assert(offsetof(struct ha_monitor, da_mon) == 0,
+ "da_mon must be the first element in an ha_mon!");
+#define to_ha_monitor(da) container_of(da, struct ha_monitor, da_mon)
+
+#define ENV_MAX CONCATENATE(env_max_, MONITOR_NAME)
+#define ENV_MAX_STORED CONCATENATE(env_max_stored_, MONITOR_NAME)
+#define envs CONCATENATE(envs_, MONITOR_NAME)
+
+/* Environment storage before being reset */
+#define ENV_INVALID_VALUE U64_MAX
+/* Error with no event occurs only on timeouts */
+#define EVENT_NONE EVENT_MAX
+#define EVENT_NONE_LBL "none"
+#define ENV_BUFFER_SIZE 64
+
+#ifdef CONFIG_RV_REACTORS
+
+/*
+ * ha_react - trigger the reaction after a failed environment constraint
+ *
+ * The transition from curr_state with event is otherwise valid, but the
+ * environment constraint is false. This function can be called also with no
+ * event from a timer (state constraints only).
+ */
+static void ha_react(enum states curr_state, enum events event, char *env)
+{
+ rv_react(&rv_this,
+ "rv: monitor %s does not allow event %s on state %s with env %s\n",
+ __stringify(MONITOR_NAME),
+ event == EVENT_NONE ? EVENT_NONE_LBL : model_get_event_name(event),
+ model_get_state_name(curr_state), env);
+}
+
+#else /* CONFIG_RV_REACTOR */
+
+static void ha_react(enum states curr_state, enum events event, char *env) { }
+#endif
+
+/*
+ * model_get_state_name - return the (string) name of the given state
+ */
+static char *model_get_env_name(enum envs env)
+{
+ if ((env < 0) || (env >= ENV_MAX))
+ return "INVALID";
+
+ return RV_AUTOMATON_NAME.env_names[env];
+}
+
+/*
+ * Monitors requiring a timer implementation need to request it explicitly.
+ */
+#ifndef HA_TIMER_TYPE
+#define HA_TIMER_TYPE HA_TIMER_NONE
+#endif
+
+#if HA_TIMER_TYPE == HA_TIMER_WHEEL
+static void ha_monitor_timer_callback(struct timer_list *timer);
+#elif HA_TIMER_TYPE == HA_TIMER_HRTIMER
+static enum hrtimer_restart ha_monitor_timer_callback(struct hrtimer *hrtimer);
+#endif
+
+/*
+ * ktime_get_ns is expensive, since we usually don't require precise accounting
+ * of changes within the same event, cache the current time at the beginning of
+ * the constraint handler and use the cache for subsequent calls.
+ * Monitors without ns clocks automatically skip this.
+ */
+#ifdef HA_CLK_NS
+#define ha_get_ns() ktime_get_ns()
+#else
+#define ha_get_ns() 0
+#endif /* HA_CLK_NS */
+
+/* Should be supplied by the monitor */
+static u64 ha_get_env(struct ha_monitor *ha_mon, enum envs env, u64 time_ns);
+static bool ha_verify_constraint(struct ha_monitor *ha_mon,
+ enum states curr_state,
+ enum events event,
+ enum states next_state,
+ u64 time_ns);
+
+/*
+ * ha_monitor_reset_all_stored - reset all environment variables in the monitor
+ */
+static inline void ha_monitor_reset_all_stored(struct ha_monitor *ha_mon)
+{
+ for (int i = 0; i < ENV_MAX_STORED; i++)
+ WRITE_ONCE(ha_mon->env_store[i], ENV_INVALID_VALUE);
+}
+
+/*
+ * ha_monitor_init_env - setup timer and reset all environment
+ *
+ * Called from a hook in the DA start functions, it supplies the da_mon
+ * corresponding to the current ha_mon.
+ * Not all hybrid automata require the timer, still set it for simplicity.
+ */
+static inline void ha_monitor_init_env(struct da_monitor *da_mon)
+{
+ struct ha_monitor *ha_mon = to_ha_monitor(da_mon);
+
+ ha_monitor_reset_all_stored(ha_mon);
+ ha_setup_timer(ha_mon);
+}
+
+/*
+ * ha_monitor_reset_env - stop timer and reset all environment
+ *
+ * Called from a hook in the DA reset functions, it supplies the da_mon
+ * corresponding to the current ha_mon.
+ * Not all hybrid automata require the timer, still clear it for simplicity.
+ */
+static inline void ha_monitor_reset_env(struct da_monitor *da_mon)
+{
+ struct ha_monitor *ha_mon = to_ha_monitor(da_mon);
+
+ /* Initialisation resets the monitor before initialising the timer */
+ if (likely(da_monitoring(da_mon)))
+ ha_cancel_timer(ha_mon);
+}
+
+/*
+ * ha_monitor_env_invalid - return true if env has not been initialised
+ */
+static inline bool ha_monitor_env_invalid(struct ha_monitor *ha_mon, enum envs env)
+{
+ return READ_ONCE(ha_mon->env_store[env]) == ENV_INVALID_VALUE;
+}
+
+static inline void ha_get_env_string(struct seq_buf *s,
+ struct ha_monitor *ha_mon, u64 time_ns)
+{
+ const char *format_str = "%s=%llu";
+
+ for (int i = 0; i < ENV_MAX; i++) {
+ seq_buf_printf(s, format_str, model_get_env_name(i),
+ ha_get_env(ha_mon, i, time_ns));
+ format_str = ",%s=%llu";
+ }
+}
+
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+static inline void ha_trace_error_env(struct ha_monitor *ha_mon,
+ char *curr_state, char *event, char *env,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_env_, MONITOR_NAME)(curr_state, event, env);
+}
+#elif RV_MON_TYPE == RV_MON_PER_TASK || RV_MON_TYPE == RV_MON_PER_OBJ
+
+#define ha_get_target(ha_mon) da_get_target(&ha_mon->da_mon)
+
+static inline void ha_trace_error_env(struct ha_monitor *ha_mon,
+ char *curr_state, char *event, char *env,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_env_, MONITOR_NAME)(id, curr_state, event, env);
+}
+#endif /* RV_MON_TYPE */
+
+/*
+ * ha_get_monitor - return the current monitor
+ */
+#define ha_get_monitor(...) to_ha_monitor(da_get_monitor(__VA_ARGS__))
+
+/*
+ * ha_monitor_handle_constraint - handle the constraint on the current transition
+ *
+ * If the monitor implementation defines a constraint in the transition from
+ * curr_state to event, react and trace appropriately as well as return false.
+ * This function is called from the hook in the DA event handle function and
+ * triggers a failure in the monitor.
+ */
+static bool ha_monitor_handle_constraint(struct da_monitor *da_mon,
+ enum states curr_state,
+ enum events event,
+ enum states next_state,
+ da_id_type id)
+{
+ struct ha_monitor *ha_mon = to_ha_monitor(da_mon);
+ u64 time_ns = ha_get_ns();
+ DECLARE_SEQ_BUF(env_string, ENV_BUFFER_SIZE);
+
+ if (ha_verify_constraint(ha_mon, curr_state, event, next_state, time_ns))
+ return true;
+
+ ha_get_env_string(&env_string, ha_mon, time_ns);
+ ha_react(curr_state, event, env_string.buffer);
+ ha_trace_error_env(ha_mon,
+ model_get_state_name(curr_state),
+ model_get_event_name(event),
+ env_string.buffer, id);
+ return false;
+}
+
+static inline void __ha_monitor_timer_callback(struct ha_monitor *ha_mon)
+{
+ enum states curr_state = READ_ONCE(ha_mon->da_mon.curr_state);
+ DECLARE_SEQ_BUF(env_string, ENV_BUFFER_SIZE);
+ u64 time_ns = ha_get_ns();
+
+ ha_get_env_string(&env_string, ha_mon, time_ns);
+ ha_react(curr_state, EVENT_NONE, env_string.buffer);
+ ha_trace_error_env(ha_mon, model_get_state_name(curr_state),
+ EVENT_NONE_LBL, env_string.buffer,
+ da_get_id(&ha_mon->da_mon));
+
+ da_monitor_reset(&ha_mon->da_mon);
+}
+
+/*
+ * The clock variables have 2 different representations in the env_store:
+ * - The guard representation is the timestamp of the last reset
+ * - The invariant representation is the timestamp when the invariant expires
+ * As the representations are incompatible, care must be taken when switching
+ * between them: the invariant representation can only be used when starting a
+ * timer when the previous representation was guard (e.g. no other invariant
+ * started since the last reset operation).
+ * Likewise, switching from invariant to guard representation without a reset
+ * can be done only by subtracting the exact value used to start the invariant.
+ *
+ * Reading the environment variable (ha_get_clk) also reflects this difference
+ * any reads in states that have an invariant return the (possibly negative)
+ * time since expiration, other reads return the time since last reset.
+ */
+
+/*
+ * Helper functions for env variables describing clocks with ns granularity
+ */
+static inline u64 ha_get_clk_ns(struct ha_monitor *ha_mon, enum envs env, u64 time_ns)
+{
+ return time_ns - READ_ONCE(ha_mon->env_store[env]);
+}
+static inline void ha_reset_clk_ns(struct ha_monitor *ha_mon, enum envs env, u64 time_ns)
+{
+ WRITE_ONCE(ha_mon->env_store[env], time_ns);
+}
+static inline void ha_set_invariant_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 value, u64 time_ns)
+{
+ WRITE_ONCE(ha_mon->env_store[env], time_ns + value);
+}
+static inline bool ha_check_invariant_ns(struct ha_monitor *ha_mon,
+ enum envs env, u64 time_ns)
+{
+ return READ_ONCE(ha_mon->env_store[env]) >= time_ns;
+}
+/*
+ * ha_invariant_passed_ns - prepare the invariant and return the time since reset
+ */
+static inline u64 ha_invariant_passed_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = 0;
+
+ if (env < 0 || env >= ENV_MAX_STORED)
+ return 0;
+ if (ha_monitor_env_invalid(ha_mon, env))
+ return 0;
+ passed = ha_get_env(ha_mon, env, time_ns);
+ ha_set_invariant_ns(ha_mon, env, expire - passed, time_ns);
+ return passed;
+}
+
+/*
+ * Helper functions for env variables describing clocks with jiffy granularity
+ */
+static inline u64 ha_get_clk_jiffy(struct ha_monitor *ha_mon, enum envs env)
+{
+ return get_jiffies_64() - READ_ONCE(ha_mon->env_store[env]);
+}
+static inline void ha_reset_clk_jiffy(struct ha_monitor *ha_mon, enum envs env)
+{
+ WRITE_ONCE(ha_mon->env_store[env], get_jiffies_64());
+}
+static inline void ha_set_invariant_jiffy(struct ha_monitor *ha_mon,
+ enum envs env, u64 value)
+{
+ WRITE_ONCE(ha_mon->env_store[env], get_jiffies_64() + value);
+}
+static inline bool ha_check_invariant_jiffy(struct ha_monitor *ha_mon,
+ enum envs env, u64 time_ns)
+{
+ return time_after64(READ_ONCE(ha_mon->env_store[env]), get_jiffies_64());
+
+}
+/*
+ * ha_invariant_passed_jiffy - prepare the invariant and return the time since reset
+ */
+static inline u64 ha_invariant_passed_jiffy(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = 0;
+
+ if (env < 0 || env >= ENV_MAX_STORED)
+ return 0;
+ if (ha_monitor_env_invalid(ha_mon, env))
+ return 0;
+ passed = ha_get_env(ha_mon, env, time_ns);
+ ha_set_invariant_jiffy(ha_mon, env, expire - passed);
+ return passed;
+}
+
+/*
+ * Retrieve the last reset time (guard representation) from the invariant
+ * representation (expiration).
+ * It the caller's responsibility to make sure the storage was actually in the
+ * invariant representation (e.g. the current state has an invariant).
+ * The provided value must be the same used when starting the invariant.
+ *
+ * This function's access to the storage is NOT atomic, due to the rarity when
+ * this is used. If a monitor allows writes concurrent to this, likely
+ * other things are broken and need rethinking the model or additional locking.
+ */
+static inline void ha_inv_to_guard(struct ha_monitor *ha_mon, enum envs env,
+ u64 value, u64 time_ns)
+{
+ WRITE_ONCE(ha_mon->env_store[env], READ_ONCE(ha_mon->env_store[env]) - value);
+}
+
+#if HA_TIMER_TYPE == HA_TIMER_WHEEL
+/*
+ * Helper functions to handle the monitor timer.
+ * Not all monitors require a timer, in such case the timer will be set up but
+ * never armed.
+ * Timers start since the last reset of the supplied env or from now if env is
+ * not an environment variable. If env was not initialised no timer starts.
+ * Timers can expire on any CPU unless the monitor is per-cpu,
+ * where we assume every event occurs on the local CPU.
+ */
+static void ha_monitor_timer_callback(struct timer_list *timer)
+{
+ struct ha_monitor *ha_mon = container_of(timer, struct ha_monitor, timer);
+
+ __ha_monitor_timer_callback(ha_mon);
+}
+static inline void ha_setup_timer(struct ha_monitor *ha_mon)
+{
+ int mode = 0;
+
+ if (RV_MON_TYPE == RV_MON_PER_CPU)
+ mode |= TIMER_PINNED;
+ timer_setup(&ha_mon->timer, ha_monitor_timer_callback, mode);
+}
+static inline void ha_start_timer_jiffy(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = ha_invariant_passed_jiffy(ha_mon, env, expire, time_ns);
+
+ mod_timer(&ha_mon->timer, get_jiffies_64() + expire - passed);
+}
+static inline void ha_start_timer_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = ha_invariant_passed_ns(ha_mon, env, expire, time_ns);
+
+ ha_start_timer_jiffy(ha_mon, ENV_MAX_STORED,
+ nsecs_to_jiffies(expire - passed + TICK_NSEC - 1), time_ns);
+}
+/*
+ * ha_cancel_timer - Cancel the timer
+ *
+ * Returns:
+ * * 1 when the timer was active
+ * * 0 when the timer was not active or running a callback
+ */
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon)
+{
+ return timer_delete(&ha_mon->timer);
+}
+#elif HA_TIMER_TYPE == HA_TIMER_HRTIMER
+/*
+ * Helper functions to handle the monitor timer.
+ * Not all monitors require a timer, in such case the timer will be set up but
+ * never armed.
+ * Timers start since the last reset of the supplied env or from now if env is
+ * not an environment variable. If env was not initialised no timer starts.
+ * Timers can expire on any CPU unless the monitor is per-cpu,
+ * where we assume every event occurs on the local CPU.
+ */
+static enum hrtimer_restart ha_monitor_timer_callback(struct hrtimer *hrtimer)
+{
+ struct ha_monitor *ha_mon = container_of(hrtimer, struct ha_monitor, hrtimer);
+
+ __ha_monitor_timer_callback(ha_mon);
+ return HRTIMER_NORESTART;
+}
+static inline void ha_setup_timer(struct ha_monitor *ha_mon)
+{
+ hrtimer_setup(&ha_mon->hrtimer, ha_monitor_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+}
+static inline void ha_start_timer_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ int mode = HRTIMER_MODE_REL_HARD;
+ u64 passed = ha_invariant_passed_ns(ha_mon, env, expire, time_ns);
+
+ if (RV_MON_TYPE == RV_MON_PER_CPU)
+ mode |= HRTIMER_MODE_PINNED;
+ hrtimer_start(&ha_mon->hrtimer, ns_to_ktime(expire - passed), mode);
+}
+static inline void ha_start_timer_jiffy(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = ha_invariant_passed_jiffy(ha_mon, env, expire, time_ns);
+
+ ha_start_timer_ns(ha_mon, ENV_MAX_STORED,
+ jiffies_to_nsecs(expire - passed), time_ns);
+}
+/*
+ * ha_cancel_timer - Cancel the timer
+ *
+ * Returns:
+ * * 1 when the timer was active
+ * * 0 when the timer was not active or running a callback
+ */
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon)
+{
+ return hrtimer_try_to_cancel(&ha_mon->hrtimer) == 1;
+}
+#else /* HA_TIMER_NONE */
+/*
+ * Start function is intentionally not defined, monitors using timers must
+ * set HA_TIMER_TYPE to either HA_TIMER_WHEEL or HA_TIMER_HRTIMER.
+ */
+static inline void ha_setup_timer(struct ha_monitor *ha_mon) { }
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index dc0789c20333..5c64b3e02211 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1,6 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
- * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2025, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef ABI_BPMP_ABI_H
@@ -74,6 +74,7 @@
/**
* @ingroup MRQ_Format
+ *
* Request an answer from the peer.
* This should be set in mrq_request::flags for all requests targetted
* at BPMP. For requests originating in BPMP, this flag is optional except
@@ -85,6 +86,7 @@
/**
* @ingroup MRQ_Format
+ *
* Ring the sender's doorbell when responding. This should be set unless
* the sender wants to poll the underlying communications layer directly.
*
@@ -94,7 +96,9 @@
/**
* @ingroup MRQ_Format
- * CRC present
+ *
+ * This is set in mrq_request::flags for requests that have CRC present and
+ * correspondingly in mrq_response::flags for responses that have CRC present.
*/
#define BPMP_MAIL_CRC_PRESENT (1U << 2U)
@@ -127,91 +131,319 @@ struct mrq_request {
* crc16, xid and length fields are present when set.
* Some platform configurations, especially when targeted to applications requiring
* functional safety, mandate this option being set or otherwise will respond with
- * -BPMP_EBADMSG and ignore the request.
+ * -#BPMP_EBADMSG and ignore the request.
*
* **xid** is a transaction ID.
*
* Only used when #BPMP_MAIL_CRC_PRESENT is set.
*
* **payload_length** of the message expressed in bytes without the size of this header.
- * See table below for minimum accepted payload lengths for each MRQ.
- * Note: For DMCE communication, this field expresses the length as a multiple of 4 bytes
- * rather than bytes.
+ * See tables below for minimum accepted payload lengths for each MRQ.
*
* Only used when #BPMP_MAIL_CRC_PRESENT is set.
*
- * | MRQ | CMD | minimum payload length
- * | -------------------- | ------------------------------------ | ------------------------------------------ |
- * | MRQ_PING | | 4 |
- * | MRQ_THREADED_PING | | 4 |
- * | MRQ_RESET | any | 8 |
- * | MRQ_I2C | | 12 + cmd_i2c_xfer_request.data_size |
- * | MRQ_CLK | CMD_CLK_GET_RATE | 4 |
- * | MRQ_CLK | CMD_CLK_SET_RATE | 16 |
- * | MRQ_CLK | CMD_CLK_ROUND_RATE | 16 |
- * | MRQ_CLK | CMD_CLK_GET_PARENT | 4 |
- * | MRQ_CLK | CMD_CLK_SET_PARENT | 8 |
- * | MRQ_CLK | CMD_CLK_ENABLE | 4 |
- * | MRQ_CLK | CMD_CLK_DISABLE | 4 |
- * | MRQ_CLK | CMD_CLK_IS_ENABLED | 4 |
- * | MRQ_CLK | CMD_CLK_GET_ALL_INFO | 4 |
- * | MRQ_CLK | CMD_CLK_GET_MAX_CLK_ID | 4 |
- * | MRQ_CLK | CMD_CLK_GET_FMAX_AT_VMIN | 4 |
- * | MRQ_QUERY_ABI | | 4 |
- * | MRQ_PG | CMD_PG_QUERY_ABI | 12 |
- * | MRQ_PG | CMD_PG_SET_STATE | 12 |
- * | MRQ_PG | CMD_PG_GET_STATE | 8 |
- * | MRQ_PG | CMD_PG_GET_NAME | 8 |
- * | MRQ_PG | CMD_PG_GET_MAX_ID | 8 |
- * | MRQ_THERMAL | CMD_THERMAL_QUERY_ABI | 8 |
- * | MRQ_THERMAL | CMD_THERMAL_GET_TEMP | 8 |
- * | MRQ_THERMAL | CMD_THERMAL_SET_TRIP | 20 |
- * | MRQ_THERMAL | CMD_THERMAL_GET_NUM_ZONES | 4 |
- * | MRQ_THERMAL | CMD_THERMAL_GET_THERMTRIP | 8 |
- * | MRQ_CPU_VHINT | | 8 |
- * | MRQ_ABI_RATCHET | | 2 |
- * | MRQ_EMC_DVFS_LATENCY | | 8 |
- * | MRQ_EMC_DVFS_EMCHUB | | 8 |
- * | MRQ_EMC_DISP_RFL | | 4 |
- * | MRQ_BWMGR | CMD_BWMGR_QUERY_ABI | 8 |
- * | MRQ_BWMGR | CMD_BWMGR_CALC_RATE | 8 + 8 * bwmgr_rate_req.num_iso_clients |
- * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_QUERY_ABI | 8 |
- * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_CALCULATE_LA | 16 |
- * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_SET_LA | 16 |
- * | MRQ_ISO_CLIENT | CMD_ISO_CLIENT_GET_MAX_BW | 8 |
- * | MRQ_CPU_NDIV_LIMITS | | 4 |
- * | MRQ_CPU_AUTO_CC3 | | 4 |
- * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
- * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_READ | 5 |
- * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
- * | MRQ_RINGBUF_CONSOLE | CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
- * | MRQ_STRAP | STRAP_SET | 12 |
- * | MRQ_UPHY | CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
- * | MRQ_UPHY | CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
- * | MRQ_UPHY | CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
- * | MRQ_UPHY | CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
- * | MRQ_UPHY | CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
- * | MRQ_FMON | CMD_FMON_GEAR_CLAMP | 16 |
- * | MRQ_FMON | CMD_FMON_GEAR_FREE | 4 |
- * | MRQ_FMON | CMD_FMON_GEAR_GET | 4 |
- * | MRQ_FMON | CMD_FMON_FAULT_STS_GET | 8 |
- * | MRQ_EC | CMD_EC_STATUS_EX_GET | 12 |
- * | MRQ_QUERY_FW_TAG | | 0 |
- * | MRQ_DEBUG | CMD_DEBUG_OPEN_RO | 4 + length of cmd_debug_fopen_request.name |
- * | MRQ_DEBUG | CMD_DEBUG_OPEN_WO | 4 + length of cmd_debug_fopen_request.name |
- * | MRQ_DEBUG | CMD_DEBUG_READ | 8 |
- * | MRQ_DEBUG | CMD_DEBUG_WRITE | 12 + cmd_debug_fwrite_request.datalen |
- * | MRQ_DEBUG | CMD_DEBUG_CLOSE | 8 |
- * | MRQ_TELEMETRY | | 8 |
- * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_QUERY_ABI | 8 |
- * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_SET | 20 |
- * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_GET | 16 |
- * | MRQ_PWR_LIMIT | CMD_PWR_LIMIT_CURR_CAP | 8 |
- * | MRQ_GEARS | | 0 |
- * | MRQ_BWMGR_INT | CMD_BWMGR_INT_QUERY_ABI | 8 |
- * | MRQ_BWMGR_INT | CMD_BWMGR_INT_CALC_AND_SET | 16 |
- * | MRQ_BWMGR_INT | CMD_BWMGR_INT_CAP_SET | 8 |
- * | MRQ_OC_STATUS | | 0 |
+ * | MRQ | Sub-command | Minimum payload length
+ * | --------------------- | ------------------------------------ | ------------------------------------------------------- |
+ * | #MRQ_PING | - | 4 |
+ * | #MRQ_THREADED_PING | - | 4 |
+ * | #MRQ_RESET | any | 8 |
+ * | #MRQ_I2C | - | 12 + cmd_i2c_xfer_request.data_size |
+ * | #MRQ_CLK | #CMD_CLK_GET_RATE | 4 |
+ * | #MRQ_CLK | #CMD_CLK_SET_RATE | 16 |
+ * | #MRQ_CLK | #CMD_CLK_ROUND_RATE | 16 |
+ * | #MRQ_CLK | #CMD_CLK_GET_PARENT | 4 |
+ * | #MRQ_CLK | #CMD_CLK_SET_PARENT | 8 |
+ * | #MRQ_CLK | #CMD_CLK_ENABLE | 4 |
+ * | #MRQ_CLK | #CMD_CLK_DISABLE | 4 |
+ * | #MRQ_CLK | #CMD_CLK_IS_ENABLED | 4 |
+ * | #MRQ_CLK | #CMD_CLK_GET_ALL_INFO | 4 |
+ * | #MRQ_CLK | #CMD_CLK_GET_MAX_CLK_ID | 4 |
+ * | #MRQ_CLK | #CMD_CLK_GET_FMAX_AT_VMIN | 4 |
+ * | #MRQ_QUERY_ABI | - | 4 |
+ * | #MRQ_PG | #CMD_PG_QUERY_ABI | 12 |
+ * | #MRQ_PG | #CMD_PG_SET_STATE | 12 |
+ * | #MRQ_PG | #CMD_PG_GET_STATE | 8 |
+ * | #MRQ_PG | #CMD_PG_GET_NAME | 8 |
+ * | #MRQ_PG | #CMD_PG_GET_MAX_ID | 8 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_QUERY_ABI | 8 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_GET_TEMP | 8 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_GET_NUM_ZONES | 4 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_GET_THERMTRIP | 8 |
+ * | #MRQ_ABI_RATCHET | - | 2 |
+ * | #MRQ_EMC_DVFS_LATENCY | - | 8 |
+ * | #MRQ_QUERY_FW_TAG | - | 0 |
+ * | #MRQ_DEBUG | #CMD_DEBUG_OPEN_RO | 4 + length of cmd_debug_fopen_request.name |
+ * | #MRQ_DEBUG | #CMD_DEBUG_OPEN_WO | 4 + length of cmd_debug_fopen_request.name |
+ * | #MRQ_DEBUG | #CMD_DEBUG_READ | 8 |
+ * | #MRQ_DEBUG | #CMD_DEBUG_WRITE | 12 + cmd_debug_fwrite_request.datalen |
+ * | #MRQ_DEBUG | #CMD_DEBUG_CLOSE | 8 |
+ *
+ * @cond (bpmp_t186)
+ * The following additional MRQ is supported on T186 -platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | --------------------- | ------------------------------------- | ------------------------------------- |
+ * | #MRQ_CPU_VHINT | - | 8 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_SET_TRIP | 20 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * @endcond
+ *
+ * @cond (bpmp_t194)
+ * The following additional MRQs are supported on T194 -platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | --------------------- | ------------------------------------- | ------------------------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_STRAP | #STRAP_SET | 12 |
+ * | #MRQ_CPU_AUTO_CC3 | - | 4 |
+ * | #MRQ_EC | #CMD_EC_STATUS_EX_GET | 12 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_CLAMP | 16 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_FREE | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_GET | 4 |
+ * | #MRQ_FMON | #CMD_FMON_FAULT_STS_GET | 8 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_SET_TRIP | 20 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * @endcond
+ *
+ * @cond (bpmp_safe && bpmp_t234)
+ * The following additional MRQ is supported on functional-safety
+ * builds for the T234 platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | --------------------- | ------------------------------------- | ------------------------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_CLAMP | 16 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_FREE | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_GET | 4 |
+ * | #MRQ_FMON | #CMD_FMON_FAULT_STS_GET | 8 |
+ * | #MRQ_EMC_DVFS_EMCHUB | - | 8 |
+ * | #MRQ_EMC_DISP_RFL | - | 4 |
+ *
+ * @endcond
+ *
+ * @cond (!bpmp_safe && bpmp_t234)
+ *
+ * The following additional MRQs are supported on non-functional-safety
+ * builds for the T234 and T238 -platforms:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | --------------------- | ------------------------------------- | --------------------------------------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_STRAP | #STRAP_SET | 12 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_SET_TRIP | 20 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_CLAMP | 16 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_FREE | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_GET | 4 |
+ * | #MRQ_FMON | #CMD_FMON_FAULT_STS_GET | 8 |
+ * | #MRQ_EMC_DVFS_EMCHUB | - | 8 |
+ * | #MRQ_EMC_DISP_RFL | - | 4 |
+ * | #MRQ_BWMGR | #CMD_BWMGR_QUERY_ABI | 8 |
+ * | #MRQ_BWMGR | #CMD_BWMGR_CALC_RATE | 8 + 8 * cmd_bwmgr_calc_rate_request.num_iso_clients |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_QUERY_ABI | 8 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_CALCULATE_LA | 16 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_SET_LA | 16 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_GET_MAX_BW | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_QUERY_ABI | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CALC_AND_SET | 16 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CAP_SET | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_GET_LAST_REQUEST | 9 |
+ * | #MRQ_OC_STATUS | - | 0 |
+ * @endcond
+ *
+ * @cond bpmp_t238
+ * The following additional MRQs are supported on T238 platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | --------------------- | ------------------------------------- | --------------------------------------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_STRAP | #STRAP_SET | 12 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_SET_TRIP | 20 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_QUERY_ABI | 8 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_READ | 5 |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_WRITE | 5 + cmd_ringbuf_console_write_req.len |
+ * | #MRQ_RINGBUF_CONSOLE | #CMD_RINGBUF_CONSOLE_GET_FIFO | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_CLAMP | 16 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_FREE | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_GET | 4 |
+ * | #MRQ_FMON | #CMD_FMON_FAULT_STS_GET | 8 |
+ * | #MRQ_EMC_DVFS_EMCHUB | - | 8 |
+ * | #MRQ_EMC_DISP_RFL | - | 4 |
+ * | #MRQ_BWMGR | #CMD_BWMGR_QUERY_ABI | 8 |
+ * | #MRQ_BWMGR | #CMD_BWMGR_CALC_RATE | 8 + 8 * cmd_bwmgr_calc_rate_request.num_iso_clients |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_QUERY_ABI | 8 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_CALCULATE_LA | 16 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_SET_LA | 16 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_GET_MAX_BW | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_QUERY_ABI | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CALC_AND_SET | 16 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CAP_SET | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_GET_LAST_REQUEST | 9 |
+ * | #MRQ_OC_STATUS | - | 0 |
+ * | #MRQ_THROTTLE | #CMD_THROTTLE_SET_OC_CONFIG | 5 |
+ * @endcond
+ *
+ * @cond (bpmp_th500)
+ * The following additional MRQs are supported on TH500 -platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | -------------------- | ------------------------------------- | ---------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_THERMAL | #CMD_THERMAL_SET_TRIP | 20 |
+ * | #MRQ_STRAP | #STRAP_SET | 12 |
+ * | #MRQ_SHUTDOWN | - | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | 24 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_LANE_MARGIN_STATUS | 4 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT | 5 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_CONTROLLER_STATE | 6 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF | 5 |
+ * | #MRQ_UPHY | #CMD_UPHY_PCIE_CONFIG_VDM | 3 |
+ * | #MRQ_TELEMETRY | - | 8 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_QUERY_ABI | 8 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_SET | 20 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_GET | 16 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_CURR_CAP | 8 |
+ * | #MRQ_GEARS | - | 0 |
+ * | #MRQ_C2C | #CMD_C2C_QUERY_ABI | 8 |
+ * | #MRQ_C2C | #CMD_C2C_START_INITIALIZATION | 5 |
+ * | #MRQ_C2C | #CMD_C2C_GET_STATUS | 4 |
+ * | #MRQ_C2C | #CMD_C2C_HOTRESET_PREP | 5 |
+ * | #MRQ_C2C | #CMD_C2C_START_HOTRESET | 5 |
+ * | #MRQ_THROTTLE | #CMD_THROTTLE_QUERY_ABI | 4 |
+ * | #MRQ_THROTTLE | #CMD_THROTTLE_GET_CHIPTHROT_STATUS | 4 |
+ * | #MRQ_PWRMODEL | #CMD_PWRMODEL_QUERY_ABI | 8 |
+ * | #MRQ_PWRMODEL | #CMD_PWRMODEL_PWR_GET | 16 |
+ * | #MRQ_PWR_CNTRL | #CMD_PWR_CNTRL_QUERY_ABI | 8 |
+ * | #MRQ_PWR_CNTRL | #CMD_PWR_CNTRL_BYPASS_SET | 12 |
+ * | #MRQ_PWR_CNTRL | #CMD_PWR_CNTRL_BYPASS_GET | 8 |
+ * @endcond
+ *
+ * @cond (bpmp_tb500)
+ * The following additional MRQs are supported on TB500 -platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | -------------------- | ---------------------------------------- | ---------------------- |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_QUERY_ABI | 8 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_SET | 20 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_GET | 16 |
+ * | #MRQ_PWR_LIMIT | #CMD_PWR_LIMIT_CURR_CAP | 8 |
+ * | #MRQ_TELEMETRY_EX | #CMD_TELEMETRY_EX_QUERY_ABI | 8 |
+ * | #MRQ_TELEMETRY_EX | #CMD_TELEMETRY_EX_BASE_SZ_GET | 12 |
+ * | #MRQ_THROTTLE | #CMD_THROTTLE_GET_CHIPTHROT_STATUS | 4 |
+ * | #MRQ_C2C | #CMD_C2C_QUERY_ABI | 8 |
+ * | #MRQ_C2C | #CMD_C2C_START_INITIALIZATION | 5 |
+ * | #MRQ_C2C | #CMD_C2C_GET_STATUS | 4 |
+ * | #MRQ_C2C | #CMD_C2C_HOTRESET_PREP | 5 |
+ * | #MRQ_C2C | #CMD_C2C_START_HOTRESET | 5 |
+ * | MRQ_HWPM | CMD_HWPM_QUERY_ABI | 4 |
+ * | MRQ_HWPM | CMD_HWPM_IPMU_SET_TRIGGERS | 120 |
+ * | MRQ_HWPM | CMD_HWPM_IPMU_SET_PAYLOADS_SHIFTS | 120 |
+ * | MRQ_HWPM | CMD_HWPM_IPMU_GET_MAX_PAYLOADS | 0 |
+ * | MRQ_HWPM | CMD_HWPM_NVTHERM_SET_SAMPLE_RATE | 4 |
+ * | MRQ_HWPM | CMD_HWPM_NVTHERM_SET_BUBBLE_INTERVAL | 4 |
+ * | MRQ_HWPM | CMD_HWPM_NVTHERM_SET_FLEX_CHANNELS | 120 |
+ * | MRQ_HWPM | CMD_HWPM_ISENSE_GET_SENSOR_NAME | 4 |
+ * | MRQ_HWPM | CMD_HWPM_ISENSE_GET_SENSOR_CHANNEL | 4 |
+ * | MRQ_HWPM | CMD_HWPM_ISENSE_GET_SENSOR_SCALE_FACTOR | 4 |
+ * | MRQ_HWPM | CMD_HWPM_ISENSE_GET_SENSOR_OFFSET | 4 |
+ * | MRQ_HWPM | CMD_HWPM_ISENSE_GET_SUM_BLOCK_NAME | 4 |
+ * | MRQ_HWPM | CMD_HWPM_ISENSE_GET_SUM_BLOCK_INPUTS | 4 |
+ * | MRQ_DVFS | CMD_DVFS_QUERY_ABI | 4 |
+ * | MRQ_DVFS | CMD_DVFS_SET_CTRL_STATE | 8 |
+ * | MRQ_DVFS | CMD_DVFS_SET_MGR_STATE | 8 |
+ * | MRQ_PPP_PROFILE | CMD_PPP_PROFILE_QUERY_ABI | 8 |
+ * | MRQ_PPP_PROFILE | CMD_PPP_PROFILE_QUERY_MASKS | 8 |
+ * | MRQ_PPP_PROFILE | CMD_PPP_CORE_QUERY_CPU_MASK | 8 |
+ * | MRQ_PPP_PROFILE | CMD_PPP_AVAILABLE_QUERY | 4 |
+ * @endcond
+ *
+ * @cond (bpmp_safe && bpmp_t264)
+ * The following additional MRQ is supported on functional-safety
+ * builds for the T264 platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | -------------------- | --------------------------------- | ---------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_STRAP | #STRAP_SET | 12 |
+ * | #MRQ_SHUTDOWN | - | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_CLAMP | 16 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_FREE | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_GET | 4 |
+ * | #MRQ_FMON | #CMD_FMON_FAULT_STS_GET | 8 |
+ * | #MRQ_PCIE | #CMD_PCIE_EP_CONTROLLER_INIT | 5 |
+ * | #MRQ_PCIE | #CMD_PCIE_EP_CONTROLLER_OFF | 5 |
+ * | #MRQ_CR7 | #CMD_CR7_ENTRY | 12 |
+ * | #MRQ_CR7 | #CMD_CR7_EXIT | 12 |
+ * | #MRQ_SLC | #CMD_SLC_QUERY_ABI | 8 |
+ * | #MRQ_SLC | #CMD_SLC_BYPASS_SET | 8 |
+ * | #MRQ_SLC | #CMD_SLC_BYPASS_GET | 4 |
+ * @endcond
+ *
+ * @cond (!bpmp_safe && bpmp_t264)
+ * The following additional MRQs are supported on non-functional-safety
+ * builds for the T264 -platform:
+ *
+ * | MRQ | Sub-command | Minimum payload length |
+ * | -------------------- | --------------------------------- | ---------------------- |
+ * | #MRQ_CPU_NDIV_LIMITS | - | 4 |
+ * | #MRQ_STRAP | #STRAP_SET | 12 |
+ * | #MRQ_SHUTDOWN | - | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_CLAMP | 16 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_FREE | 4 |
+ * | #MRQ_FMON | #CMD_FMON_GEAR_GET | 4 |
+ * | #MRQ_FMON | #CMD_FMON_FAULT_STS_GET | 8 |
+ * | #MRQ_OC_STATUS | - | 0 |
+ * | #MRQ_PCIE | #CMD_PCIE_EP_CONTROLLER_INIT | 5 |
+ * | #MRQ_PCIE | #CMD_PCIE_EP_CONTROLLER_OFF | 5 |
+ * | #MRQ_PCIE | #CMD_PCIE_RP_CONTROLLER_OFF | 5 |
+ * | #MRQ_CR7 | #CMD_CR7_ENTRY | 12 |
+ * | #MRQ_CR7 | #CMD_CR7_EXIT | 12 |
+ * | #MRQ_SLC | #CMD_SLC_QUERY_ABI | 8 |
+ * | #MRQ_SLC | #CMD_SLC_BYPASS_SET | 8 |
+ * | #MRQ_SLC | #CMD_SLC_BYPASS_GET | 4 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_QUERY_ABI | 8 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_CALCULATE_LA | 16 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_SET_LA | 16 |
+ * | #MRQ_ISO_CLIENT | #CMD_ISO_CLIENT_GET_MAX_BW | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_QUERY_ABI | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CALC_AND_SET | 16 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CAP_SET | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_CURR_AVAILABLE_BW | 8 |
+ * | #MRQ_BWMGR_INT | #CMD_BWMGR_INT_GET_LAST_REQUEST | 9 |
+ * @endcond
*
* **crc16**
*
@@ -220,7 +452,7 @@ struct mrq_request {
* including this header. However the crc16 field is considered to be set to 0 when
* calculating the CRC. Only used when #BPMP_MAIL_CRC_PRESENT is set. If
* #BPMP_MAIL_CRC_PRESENT is set and this field does not match the CRC as
- * calculated by BPMP, -BPMP_EBADMSG will be returned and the request will
+ * calculated by BPMP, -#BPMP_EBADMSG will be returned and the request will
* be ignored. See code snippet below on how to calculate the CRC.
*
* @code
@@ -322,6 +554,9 @@ struct mrq_response {
#define MRQ_CPU_VHINT 28U
#define MRQ_ABI_RATCHET 29U
#define MRQ_EMC_DVFS_LATENCY 31U
+//adoc: tag::bpmp_dmce_mrq_shutdown[]
+#define MRQ_SHUTDOWN 49U
+//adoc: end::bpmp_dmce_mrq_shutdown[]
#define MRQ_RINGBUF_CONSOLE 65U
#define MRQ_PG 66U
#define MRQ_CPU_NDIV_LIMITS 67U
@@ -341,48 +576,31 @@ struct mrq_response {
#define MRQ_GEARS 82U
#define MRQ_BWMGR_INT 83U
#define MRQ_OC_STATUS 84U
-
-/** @cond DEPRECATED */
-#define MRQ_RESERVED_2 2U
-#define MRQ_RESERVED_3 3U
-#define MRQ_RESERVED_4 4U
-#define MRQ_RESERVED_5 5U
-#define MRQ_RESERVED_6 6U
-#define MRQ_RESERVED_7 7U
-#define MRQ_RESERVED_8 8U
-#define MRQ_RESERVED_10 10U
-#define MRQ_RESERVED_11 11U
-#define MRQ_RESERVED_12 12U
-#define MRQ_RESERVED_13 13U
-#define MRQ_RESERVED_14 14U
-#define MRQ_RESERVED_15 15U
-#define MRQ_RESERVED_16 16U
-#define MRQ_RESERVED_17 17U
-#define MRQ_RESERVED_18 18U
-#define MRQ_RESERVED_24 24U
-#define MRQ_RESERVED_25 25U
-#define MRQ_RESERVED_26 26U
-#define MRQ_RESERVED_30 30U
-#define MRQ_RESERVED_64 64U
-#define MRQ_RESERVED_74 74U
-/** @endcond DEPRECATED */
-
-/** @} */
+#define MRQ_C2C 85U
+#define MRQ_THROTTLE 86U
+#define MRQ_PWRMODEL 87U
+#define MRQ_PCIE 88U
+#define MRQ_PWR_CNTRL 89U
+#define MRQ_CR7 90U
+#define MRQ_SLC 91U
+#define MRQ_TELEMETRY_EX 92U
+#define MRQ_HWPM 93U
+#define MRQ_DVFS 94U
+#define MRQ_PPP_PROFILE 95U
/**
- * @ingroup MRQ_Codes
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 84U
+#define MAX_CPU_MRQ_ID 95U
+
+/** @} */
/**
* @addtogroup MRQ_Payloads
* @{
* @defgroup Ping Ping
* @defgroup Query_Tag Query Tag
- * @defgroup Module Loadable Modules
- * @defgroup Trace Trace
* @defgroup Debugfs Debug File System
* @defgroup Reset Reset
* @defgroup I2C I2C
@@ -390,6 +608,7 @@ struct mrq_response {
* @defgroup ABI_info ABI Info
* @defgroup Powergating Power Gating
* @defgroup Thermal Thermal
+ * @defgroup Throttle Throttle
* @defgroup OC_status OC status
* @defgroup Vhint CPU Voltage hint
* @defgroup EMC EMC
@@ -405,7 +624,22 @@ struct mrq_response {
* @defgroup Telemetry Telemetry
* @defgroup Pwrlimit PWR_LIMIT
* @defgroup Gears Gears
+ * @defgroup Shutdown Shutdown
* @defgroup BWMGR_INT Bandwidth Manager Integrated
+ * @defgroup C2C C2C
+ * @defgroup Pwrmodel Power Model
+ * @defgroup Pwrcntrl Power Controllers
+ * @cond bpmp_t264
+ * * @defgroup PCIE PCIE
+ * * @defgroup CR7 CR7
+ * * @defgroup Slc Slc
+ * @endcond
+ * @cond bpmp_tb500
+ * * @defgroup Telemetry_ex Telemetry Expanded
+ * * @defgroup HWPM Hardware Performance Monitoring
+ * * @defgroup DVFS Dynamic Voltage and Frequency Scaling
+ * * @defgroup PPP power/performance profiles
+ * @endcond
* @} MRQ_Payloads
*/
@@ -414,7 +648,6 @@ struct mrq_response {
* @def MRQ_PING
* @brief A simple ping
*
- * * Platforms: All
* * Initiators: Any
* * Targets: Any
* * Request Payload: @ref mrq_ping_request
@@ -424,7 +657,6 @@ struct mrq_response {
* @def MRQ_THREADED_PING
* @brief A deeper ping
*
- * * Platforms: All
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_ping_request
@@ -441,8 +673,8 @@ struct mrq_response {
* @brief Request with #MRQ_PING
*
* Used by the sender of an #MRQ_PING message to request a pong from
- * recipient. The response from the recipient is computed based on
- * #challenge.
+ * recipient. The response from the recipient is computed based on the
+ * mrq_ping_request::challenge -value.
*/
struct mrq_ping_request {
/** @brief Arbitrarily chosen value */
@@ -470,7 +702,7 @@ struct mrq_ping_response {
*
* @deprecated Use #MRQ_QUERY_FW_TAG instead.
*
- * * Platforms: All
+ * @details
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_query_tag_request
@@ -483,7 +715,7 @@ struct mrq_ping_response {
* @brief Request with #MRQ_QUERY_TAG
*
* @deprecated This structure will be removed in future version.
- * Use MRQ_QUERY_FW_TAG instead.
+ * Use #MRQ_QUERY_FW_TAG instead.
*/
struct mrq_query_tag_request {
/** @brief Base address to store the firmware tag */
@@ -496,7 +728,6 @@ struct mrq_query_tag_request {
* @def MRQ_QUERY_FW_TAG
* @brief Query BPMP firmware's tag (i.e. unique identifier)
*
- * * Platforms: All
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: N/A
@@ -510,10 +741,9 @@ struct mrq_query_tag_request {
*
* Sent in response to #MRQ_QUERY_FW_TAG message. #tag contains the unique
* identifier for the version of firmware issuing the reply.
- *
*/
struct mrq_query_fw_tag_response {
- /** @brief Array to store tag information */
+ /** @brief Array to store tag information */
uint8_t tag[32];
} BPMP_ABI_PACKED;
@@ -532,9 +762,8 @@ struct mrq_threaded_ping_response {
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
- * @deprecated use MRQ_DEBUG instead.
+ * @deprecated Use #MRQ_DEBUG instead.
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_debugfs_request
@@ -626,9 +855,9 @@ struct cmd_debugfs_dumpdir_response {
/**
* @ingroup Debugfs
- * @brief Request with #MRQ_DEBUGFS.
+ * @brief Request with #MRQ_DEBUG.
*
- * The sender of an MRQ_DEBUGFS message uses #cmd to specify a debugfs
+ * The sender of an MRQ_DEBUG message uses #cmd to specify a debugfs
* command to execute. Legal commands are the values of @ref
* mrq_debugfs_commands. Each command requires a specific additional
* payload of data.
@@ -676,16 +905,15 @@ struct mrq_debugfs_response {
/**
* @ingroup MRQ_Codes
* @def MRQ_DEBUG
- * @brief Interact with BPMP's debugfs file nodes. Use message payload
+ * @brief Interact with BPMP-FW debugfs file nodes. Use message payload
* for exchanging data. This is functionally equivalent to
- * @ref MRQ_DEBUGFS. But the way in which data is exchanged is different.
- * When software running on CPU tries to read a debugfs file,
+ * the deprecated MRQ_DEBUGFS but the way in which data is exchanged is
+ * different. When software running on CPU tries to read a debugfs file,
* the file path and read data will be stored in message payload.
* Since the message payload size is limited, a debugfs file
* transaction might require multiple frames of data exchanged
* between BPMP and CPU until the transaction completes.
*
- * * Platforms: T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_debug_request
@@ -694,17 +922,34 @@ struct mrq_debugfs_response {
/** @ingroup Debugfs */
enum mrq_debug_commands {
- /** @brief Open required file for read operation */
+ /**
+ * @brief Open file represented by the path in
+ * cmd_debug_fopen_request::name for read operation
+ */
CMD_DEBUG_OPEN_RO = 0,
- /** @brief Open required file for write operation */
+ /**
+ * @brief Open file represented by the path in
+ * cmd_debug_fopen_request::name for write operation
+ */
CMD_DEBUG_OPEN_WO = 1,
- /** @brief Perform read */
+ /**
+ * @brief Perform read on a previously opened file handle represented
+ * by the cmd_debug_fread_request::fd -value.
+ */
CMD_DEBUG_READ = 2,
- /** @brief Perform write */
+ /**
+ * @brief Perform write on a previously opened file handle represented
+ * by the cmd_debug_fwrite_request::fd -value.
+ */
CMD_DEBUG_WRITE = 3,
- /** @brief Close file */
+ /**
+ * @brief Close previously opened file handle.
+ */
CMD_DEBUG_CLOSE = 4,
- /** @brief Not a command */
+ /**
+ * @brief Not a command, represents maximum number of supported
+ * sub-commands
+ */
CMD_DEBUG_MAX
};
@@ -727,35 +972,38 @@ enum mrq_debug_commands {
/**
* @ingroup Debugfs
- * @brief Parameters for CMD_DEBUG_OPEN command
+ * @brief Parameters for #CMD_DEBUG_OPEN_RO and #CMD_DEBUG_OPEN_WO -commands
*/
struct cmd_debug_fopen_request {
- /** @brief File name - Null-terminated string with maximum
- * length @ref DEBUG_FNAME_MAX_SZ
+ /**
+ * @brief File name - Null-terminated string with maximum
+ * length including the terminator defined by the
+ * #DEBUG_FNAME_MAX_SZ -preprocessor constant.
*/
char name[DEBUG_FNAME_MAX_SZ];
} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
- * @brief Response data for CMD_DEBUG_OPEN_RO/WO command
+ * @brief Response data for #CMD_DEBUG_OPEN_RO and #CMD_DEBUG_OPEN_WO commands
*/
struct cmd_debug_fopen_response {
/** @brief Identifier for file access */
uint32_t fd;
/** @brief Data length. File data size for READ command.
- * Maximum allowed length for WRITE command
+ * Maximum allowed length for WRITE command
*/
uint32_t datalen;
} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
- * @brief Parameters for CMD_DEBUG_READ command
+ * @brief Parameters for #CMD_DEBUG_READ command
*/
struct cmd_debug_fread_request {
- /** @brief File access identifier received in response
- * to CMD_DEBUG_OPEN_RO request
+ /**
+ * @brief File access identifier received in response
+ * to #CMD_DEBUG_OPEN_RO request
*/
uint32_t fd;
} BPMP_ABI_PACKED;
@@ -770,7 +1018,7 @@ struct cmd_debug_fread_request {
/**
* @ingroup Debugfs
- * @brief Response data for CMD_DEBUG_READ command
+ * @brief Response data for #CMD_DEBUG_READ command
*/
struct cmd_debug_fread_response {
/** @brief Size of data provided in this response in bytes */
@@ -789,11 +1037,11 @@ struct cmd_debug_fread_response {
/**
* @ingroup Debugfs
- * @brief Parameters for CMD_DEBUG_WRITE command
+ * @brief Parameters for #CMD_DEBUG_WRITE command
*/
struct cmd_debug_fwrite_request {
/** @brief File access identifier received in response
- * to CMD_DEBUG_OPEN_RO request
+ * to prior #CMD_DEBUG_OPEN_RO -request
*/
uint32_t fd;
/** @brief Size of write data in bytes */
@@ -804,11 +1052,12 @@ struct cmd_debug_fwrite_request {
/**
* @ingroup Debugfs
- * @brief Parameters for CMD_DEBUG_CLOSE command
+ * @brief Parameters for #CMD_DEBUG_CLOSE command
*/
struct cmd_debug_fclose_request {
- /** @brief File access identifier received in response
- * to CMD_DEBUG_OPEN_RO request
+ /**
+ * @brief File access identifier received in prior response
+ * to #CMD_DEBUG_OPEN_RO or #CMD_DEBUG_OPEN_WO -request.
*/
uint32_t fd;
} BPMP_ABI_PACKED;
@@ -817,30 +1066,34 @@ struct cmd_debug_fclose_request {
* @ingroup Debugfs
* @brief Request with #MRQ_DEBUG.
*
- * The sender of an MRQ_DEBUG message uses #cmd to specify a debugfs
- * command to execute. Legal commands are the values of @ref
- * mrq_debug_commands. Each command requires a specific additional
- * payload of data.
+ * The sender of an #MRQ_DEBUG message uses mrq_debug_request::cmd to specify
+ * which debugfs sub-command to execute. Legal sub-commands are the values
+ * specified in the @ref mrq_debug_commands -enumeration. Each sub-command
+ * requires a specific additional payload of data according to the following
+ * table:
*
- * |command |payload|
- * |-------------------|-------|
- * |CMD_DEBUG_OPEN_RO |fop |
- * |CMD_DEBUG_OPEN_WO |fop |
- * |CMD_DEBUG_READ |frd |
- * |CMD_DEBUG_WRITE |fwr |
- * |CMD_DEBUG_CLOSE |fcl |
+ * |Sub-command |Payload structure |
+ * |--------------------|---------------------------|
+ * |#CMD_DEBUG_OPEN_RO |cmd_debug_fopen_request |
+ * |#CMD_DEBUG_OPEN_WO |cmd_debug_fopen_request |
+ * |#CMD_DEBUG_READ |cmd_debug_fread_request |
+ * |#CMD_DEBUG_WRITE |cmd_debug_fwrite_request |
+ * |#CMD_DEBUG_CLOSE |cmd_debug_fclose_request |
*/
struct mrq_debug_request {
- /** @brief Sub-command (@ref mrq_debug_commands) */
+ /** @brief Sub-command identifier from @ref mrq_debug_commands */
uint32_t cmd;
union {
- /** @brief Request payload for CMD_DEBUG_OPEN_RO/WO command */
+ /**
+ * @brief Request payload for #CMD_DEBUG_OPEN_RO and
+ * #CMD_DEBUG_OPEN_WO sub-commands
+ */
struct cmd_debug_fopen_request fop;
- /** @brief Request payload for CMD_DEBUG_READ command */
+ /** @brief Request payload for #CMD_DEBUG_READ sub-command */
struct cmd_debug_fread_request frd;
- /** @brief Request payload for CMD_DEBUG_WRITE command */
+ /** @brief Request payload for #CMD_DEBUG_WRITE sub-command */
struct cmd_debug_fwrite_request fwr;
- /** @brief Request payload for CMD_DEBUG_CLOSE command */
+ /** @brief Request payload for #CMD_DEBUG_CLOSE sub-command */
struct cmd_debug_fclose_request fcl;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
@@ -850,9 +1103,12 @@ struct mrq_debug_request {
*/
struct mrq_debug_response {
union {
- /** @brief Response data for CMD_DEBUG_OPEN_RO/WO command */
+ /**
+ * @brief Response data for the #CMD_DEBUG_OPEN_RO and
+ * #CMD_DEBUG_OPEN_WO sub-commands
+ */
struct cmd_debug_fopen_response fop;
- /** @brief Response data for CMD_DEBUG_READ command */
+ /** @brief Response data for the #CMD_DEBUG_READ sub-command */
struct cmd_debug_fread_response frd;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
@@ -862,7 +1118,6 @@ struct mrq_debug_response {
* @def MRQ_RESET
* @brief Reset an IP block
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_reset_request
@@ -872,39 +1127,46 @@ struct mrq_debug_response {
* @{
*/
+/**
+ * @brief Sub-command identifiers for #MRQ_RESET
+ */
enum mrq_reset_commands {
/**
* @brief Assert module reset
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
- * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
- * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ * mrq_response::err is
+ * * 0 if the operation was successful
+ * * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid
+ * * -#BPMP_EACCES if mrq master is not an owner of target domain reset
+ * * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
*/
CMD_RESET_ASSERT = 1,
/**
* @brief Deassert module reset
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
- * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
- * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ * mrq_response::err is
+ * * 0 if the operation was successful
+ * * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid
+ * * -#BPMP_EACCES if mrq master is not an owner of target domain reset
+ * * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
*/
CMD_RESET_DEASSERT = 2,
/**
* @brief Assert and deassert the module reset
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
- * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
- * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ * mrq_response::err is
+ * * 0 if the operation was successful
+ * * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid
+ * * -#BPMP_EACCES if mrq master is not an owner of target domain reset
+ * * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
*/
CMD_RESET_MODULE = 3,
/**
* @brief Get the highest reset ID
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_ENODEV if no reset domains are supported (number of IDs is 0)
+ * mrq_response::err is
+ * * 0 if the operation was successful
+ * * -#BPMP_ENODEV if no reset domains are supported (number of IDs is 0)
*/
CMD_RESET_GET_MAX_ID = 4,
@@ -913,15 +1175,15 @@ enum mrq_reset_commands {
};
/**
- * @brief Request with MRQ_RESET
+ * @brief Request with #MRQ_RESET
*
* Used by the sender of an #MRQ_RESET message to request BPMP to
- * assert or or deassert a given reset line.
+ * assert or deassert a given reset line.
*/
struct mrq_reset_request {
- /** @brief Reset action to perform (@ref mrq_reset_commands) */
+ /** @brief Reset action to perform, from @ref mrq_reset_commands */
uint32_t cmd;
- /** @brief Id of the reset to affected */
+ /** @brief ID of the reset to affected, from @ref bpmp_reset_ids */
uint32_t reset_id;
} BPMP_ABI_PACKED;
@@ -940,7 +1202,7 @@ struct cmd_reset_get_max_id_response {
*
* Each sub-command supported by @ref mrq_reset_request may return
* sub-command-specific data. Some do and some do not as indicated
- * in the following table
+ * in the following table:
*
* | sub-command | payload |
* |----------------------|------------------|
@@ -962,7 +1224,6 @@ struct mrq_reset_response {
* @def MRQ_I2C
* @brief Issue an i2c transaction
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_i2c_request
@@ -971,19 +1232,60 @@ struct mrq_reset_response {
* @addtogroup I2C
* @{
*/
+
+/**
+ * @brief Size of the cmd_i2c_xfer_request::data_buf -member array in bytes.
+ */
#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12U)
+
+/**
+ * @brief Size of the cmd_i2c_xfer_response::data_buf -member array in bytes.
+ */
#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4U)
+/**
+ * @defgroup seriali2c_flags I2C flags
+ *
+ * @brief I2C transaction modifier flags for each transaction segment
+ * in #MRQ_I2C subcommand CMD_I2C_XFER
+ */
+
+/**
+ * @addtogroup seriali2c_flags
+ * @{
+ */
+
+/** @brief when set, use 10-bit I2C slave address */
#define SERIALI2C_TEN 0x0010U
+/** @brief when set, perform a Read transaction */
#define SERIALI2C_RD 0x0001U
-#define SERIALI2C_STOP 0x8000U
+/**
+ * @brief when set, no repeated START is issued between the segments
+ * of transaction. This flag is ignored for the first segment as any
+ * transaction always starts with a START condition
+ */
#define SERIALI2C_NOSTART 0x4000U
-#define SERIALI2C_REV_DIR_ADDR 0x2000U
+/**
+ * @brief when set, a no-ACK from slave device is ignored and treated
+ * always as success
+ */
#define SERIALI2C_IGNORE_NAK 0x1000U
+/** @} seriali2c_flags */
+
+/** brief Unused flag. Retained for backwards compatibility. */
+#define SERIALI2C_STOP 0x8000U
+/** brief Unused flag. Retained for backwards compatibility. */
+#define SERIALI2C_REV_DIR_ADDR 0x2000U
+/** brief Unused flag. Retained for backwards compatibility. */
#define SERIALI2C_NO_RD_ACK 0x0800U
+/** brief Unused flag. Retained for backwards compatibility. */
#define SERIALI2C_RECV_LEN 0x0400U
-enum {
+/**
+ * @brief Supported I2C sub-command identifiers
+ */
+enum mrq_i2c_commands {
+ /** @brief Perform an I2C transaction */
CMD_I2C_XFER = 1
};
@@ -1005,7 +1307,7 @@ enum {
struct serial_i2c_request {
/** @brief I2C slave address */
uint16_t addr;
- /** @brief Bitmask of SERIALI2C_ flags */
+ /** @brief Bitmask of @ref seriali2c_flags */
uint16_t flags;
/** @brief Length of I2C transaction in bytes */
uint16_t len;
@@ -1020,13 +1322,13 @@ struct cmd_i2c_xfer_request {
/**
* @brief Tegra PWR_I2C bus identifier
*
- * @cond (bpmp_t234 || bpmp_t239 || bpmp_t194)
+ * @cond (bpmp_t186 || bpmp_t194 || bpmp_t234 || bpmp_t238 || bpmp_t264)
* Must be set to 5.
- * @endcond (bpmp_t234 || bpmp_t239 || bpmp_t194)
- * @cond bpmp_th500
- * Must be set to 1.
- * @endcond bpmp_th500
+ * @endcond
*
+ * @cond (bpmp_th500)
+ * Must be set to 1.
+ * @endcond
*/
uint32_t bus_id;
@@ -1047,7 +1349,7 @@ struct cmd_i2c_xfer_request {
struct cmd_i2c_xfer_response {
/** @brief Count of valid bytes in #data_buf*/
uint32_t data_size;
- /** @brief I2c read data */
+ /** @brief I2C read data */
uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
} BPMP_ABI_PACKED;
@@ -1064,16 +1366,19 @@ struct mrq_i2c_request {
/**
* @brief Response to #MRQ_I2C
*
- * mrq_response:err is
- * 0: Success
- * -#BPMP_EBADCMD: if mrq_i2c_request::cmd is other than 1
- * -#BPMP_EINVAL: if cmd_i2c_xfer_request does not contain correctly formatted request
- * -#BPMP_ENODEV: if cmd_i2c_xfer_request::bus_id is not supported by BPMP
- * -#BPMP_EACCES: if i2c transaction is not allowed due to firewall rules
- * -#BPMP_ETIMEDOUT: if i2c transaction times out
- * -#BPMP_ENXIO: if i2c slave device does not reply with ACK to the transaction
- * -#BPMP_EAGAIN: if ARB_LOST condition is detected by the i2c controller
- * -#BPMP_EIO: any other i2c controller error code than NO_ACK or ARB_LOST
+ * mrq_response::err value for this response is defined as:
+ *
+ * | Value | Description |
+ * |--------------------|---------------------------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_EBADCMD | mrq_i2c_request::cmd is other than 1 |
+ * | -#BPMP_EINVAL | cmd_i2c_xfer_request does not contain correctly formatted request |
+ * | -#BPMP_ENODEV | cmd_i2c_xfer_request::bus_id is not supported by BPMP |
+ * | -#BPMP_EACCES | I2C transaction is not allowed due to firewall rules |
+ * | -#BPMP_ETIMEDOUT | I2C transaction times out |
+ * | -#BPMP_ENXIO | I2C slave device does not reply with ACK to the transaction |
+ * | -#BPMP_EAGAIN | ARB_LOST condition is detected by the I2C controller |
+ * | -#BPMP_EIO | Any other I2C controller error code than NO_ACK or ARB_LOST |
*/
struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
@@ -1086,7 +1391,6 @@ struct mrq_i2c_response {
* @def MRQ_CLK
* @brief Perform a clock operation
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_clk_request
@@ -1095,205 +1399,354 @@ struct mrq_i2c_response {
* @addtogroup Clocks
* @{
*/
-enum {
+
+/**
+ * @brief Sub-command identifiers for #MRQ_CLK
+ */
+enum mrq_clk_commands {
+ /** Get clock rate */
CMD_CLK_GET_RATE = 1,
+
+ /** Set clock rate */
CMD_CLK_SET_RATE = 2,
+
+ /** Get attainable clock rate closer to a given rate */
CMD_CLK_ROUND_RATE = 3,
+
+ /** Get parent clock identifier for a given clock */
CMD_CLK_GET_PARENT = 4,
+
+ /** Change clock parent */
CMD_CLK_SET_PARENT = 5,
+
+ /** Get clock enable status */
CMD_CLK_IS_ENABLED = 6,
+
+ /** Enable a clock */
CMD_CLK_ENABLE = 7,
+
+ /** Disable a clock */
CMD_CLK_DISABLE = 8,
-/** @cond DEPRECATED */
- CMD_CLK_PROPERTIES = 9,
- CMD_CLK_POSSIBLE_PARENTS = 10,
- CMD_CLK_NUM_POSSIBLE_PARENTS = 11,
- CMD_CLK_GET_POSSIBLE_PARENT = 12,
- CMD_CLK_RESET_REFCOUNTS = 13,
-/** @endcond DEPRECATED */
+
+ /** Get all information about a clock */
CMD_CLK_GET_ALL_INFO = 14,
+
+ /** Get largest supported clock identifier */
CMD_CLK_GET_MAX_CLK_ID = 15,
+
+ /** Get clock maximum rate at VMIN */
CMD_CLK_GET_FMAX_AT_VMIN = 16,
+
+ /** Largest supported #MRQ_CLK sub-command identifier + 1 */
CMD_CLK_MAX,
};
+/**
+ * Flag bit set in cmd_clk_get_all_info_response::flags -field when clock
+ * supports changing of the parent clock at runtime.
+ */
#define BPMP_CLK_HAS_MUX (1U << 0U)
+
+/**
+ * Flag bit set in cmd_clk_get_all_info_response::flags -field when clock
+ * supports changing the clock rate at runtime.
+ */
#define BPMP_CLK_HAS_SET_RATE (1U << 1U)
+
+/**
+ * Flag bit set in cmd_clk_get_all_info_response::flags -field when clock is a
+ * root clock without visible parents.
+ */
#define BPMP_CLK_IS_ROOT (1U << 2U)
+
#define BPMP_CLK_IS_VAR_ROOT (1U << 3U)
+
/**
* @brief Protection against rate and parent changes
*
- * #MRQ_CLK command #CMD_CLK_SET_RATE or #MRQ_CLK command #CMD_CLK_SET_PARENT will return
- * -#BPMP_EACCES.
+ * #MRQ_CLK command #CMD_CLK_SET_RATE or #MRQ_CLK command #CMD_CLK_SET_PARENT
+ * will return -#BPMP_EACCES.
*/
#define BPMP_CLK_RATE_PARENT_CHANGE_DENIED (1U << 30)
/**
* @brief Protection against state changes
*
- * #MRQ_CLK command #CMD_CLK_ENABLE or #MRQ_CLK command #CMD_CLK_DISABLE will return
- * -#BPMP_EACCES.
+ * #MRQ_CLK command #CMD_CLK_ENABLE or #MRQ_CLK command #CMD_CLK_DISABLE
+ * will return -#BPMP_EACCES.
*/
#define BPMP_CLK_STATE_CHANGE_DENIED (1U << 31)
+/**
+ * Size of the cmd_clk_get_all_info_response::name -array in number
+ * of elements.
+ */
#define MRQ_CLK_NAME_MAXLEN 40U
+
+/**
+ * @brief Maximum number of elements in parent_id arrays of clock info responses.
+ */
#define MRQ_CLK_MAX_PARENTS 16U
-/** @private */
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_GET_RATE
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_get_rate_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_GET_RATE
+ */
struct cmd_clk_get_rate_response {
+ /**
+ * Current rate of the given clock in Hz if mrq_response::err is 0 to
+ * indicate successful #CMD_CLK_GET_RATE -request.
+ */
int64_t rate;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_SET_RATE
+ */
struct cmd_clk_set_rate_request {
+ /** Unused / reserved field. */
int32_t unused;
+
+ /** Requested rate of the clock in Hz. */
int64_t rate;
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_SET_RATE
+ */
struct cmd_clk_set_rate_response {
+ /**
+ * If request was successful (mrq_response::err is 0), set to the new
+ * rate of the given clock in Hz.
+ */
int64_t rate;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_ROUND_RATE
+ */
struct cmd_clk_round_rate_request {
+ /** Unused / reserved field. */
int32_t unused;
+
+ /** Target rate for the clock */
int64_t rate;
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_ROUND_RATE
+ */
struct cmd_clk_round_rate_response {
+ /**
+ * The attainable rate if request was successful
+ * (mrq_response::err is 0).
+ */
int64_t rate;
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_GET_PARENT
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_get_parent_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_GET_PARENT
+ */
struct cmd_clk_get_parent_response {
+ /**
+ * The clock identifier of the parent clock if request was successful
+ * (mrq_response::err is 0).
+ */
uint32_t parent_id;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_SET_PARENT
+ */
struct cmd_clk_set_parent_request {
+ /**
+ * The clock identifier of the new parent clock.
+ */
uint32_t parent_id;
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_SET_PARENT
+ */
struct cmd_clk_set_parent_response {
+ /**
+ * The clock identifier of the new parent clock if request was
+ * successful (mrq_response::err is 0).
+ */
uint32_t parent_id;
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Request payload for #CMD_CLK_IS_ENABLED -sub-command
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_is_enabled_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
/**
- * @brief Response data to #MRQ_CLK sub-command CMD_CLK_IS_ENABLED
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_IS_ENABLED
*/
struct cmd_clk_is_enabled_response {
/**
* @brief The state of the clock that has been successfully
- * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the
+ * requested with #CMD_CLK_ENABLE or #CMD_CLK_DISABLE by the
* master invoking the command earlier.
*
* The state may not reflect the physical state of the clock
* if there are some other masters requesting it to be
- * enabled.
+ * enabled. Valid values:
*
- * Value 0 is disabled, all other values indicate enabled.
+ * * Value 0: The clock is disabled,
+ * * Value 1: The clock is enabled.
*/
int32_t state;
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_ENABLE
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_enable_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_ENABLE
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_enable_response {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_DISABLE
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_disable_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_DISABLE
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_disable_response {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
-/** @cond DEPRECATED */
-/** @private */
-struct cmd_clk_properties_request {
- BPMP_ABI_EMPTY
-} BPMP_ABI_PACKED;
-
-/** @todo flags need to be spelled out here */
-struct cmd_clk_properties_response {
- uint32_t flags;
-} BPMP_ABI_PACKED;
-
-/** @private */
-struct cmd_clk_possible_parents_request {
- BPMP_ABI_EMPTY
-} BPMP_ABI_PACKED;
-
-struct cmd_clk_possible_parents_response {
- uint8_t num_parents;
- uint8_t reserved[3];
- uint32_t parent_id[MRQ_CLK_MAX_PARENTS];
-} BPMP_ABI_PACKED;
-
-/** @private */
-struct cmd_clk_num_possible_parents_request {
- BPMP_ABI_EMPTY
-} BPMP_ABI_PACKED;
-
-struct cmd_clk_num_possible_parents_response {
- uint8_t num_parents;
-} BPMP_ABI_PACKED;
-
-struct cmd_clk_get_possible_parent_request {
- uint8_t parent_idx;
-} BPMP_ABI_PACKED;
-
-struct cmd_clk_get_possible_parent_response {
- uint32_t parent_id;
-} BPMP_ABI_PACKED;
-/** @endcond DEPRECATED */
-
-/** @private */
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_GET_ALL_INFO
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_get_all_info_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_GET_ALL_INFO
+ *
+ * The values in the response are only set and valid if request status in
+ * mrq_response::err is 0.
+ */
struct cmd_clk_get_all_info_response {
+ /**
+ * State / informational flags for the clock:
+ *
+ * | Flag bit | Description |
+ * |------------------------|------------------------------------------|
+ * | #BPMP_CLK_IS_ROOT | Clock is a root clock. |
+ * | #BPMP_CLK_HAS_MUX | Clock supports changing of parent clock. |
+ * | #BPMP_CLK_HAS_SET_RATE | Clock supports changing clock rate. |
+ */
uint32_t flags;
+
+ /**
+ * Current parent clock identifier.
+ */
uint32_t parent;
+
+ /**
+ * Array of possible parent clock identifiers.
+ */
uint32_t parents[MRQ_CLK_MAX_PARENTS];
+
+ /**
+ * Number of identifiers in the #parents -array.
+ */
uint8_t num_parents;
+
+ /**
+ * Friendly name of the clock, truncated to fit the array
+ * and null-terminated.
+ */
uint8_t name[MRQ_CLK_NAME_MAXLEN];
} BPMP_ABI_PACKED;
-/** @private */
+
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_GET_MAX_CLK_ID
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_get_max_clk_id_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_GET_MAX_CLK_ID
+ */
struct cmd_clk_get_max_clk_id_response {
+ /** @brief Largest supported clock identifier. */
uint32_t max_id;
} BPMP_ABI_PACKED;
-/** @private */
+/**
+ * @brief Request payload for #MRQ_CLK sub-command #CMD_CLK_GET_FMAX_AT_VMIN
+ *
+ * This structure is an empty placeholder for future expansion of this
+ * sub-command.
+ */
struct cmd_clk_get_fmax_at_vmin_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_CLK sub-command #CMD_CLK_GET_FMAX_AT_VMIN
+ */
struct cmd_clk_get_fmax_at_vmin_response {
int64_t rate;
} BPMP_ABI_PACKED;
@@ -1308,38 +1761,26 @@ struct cmd_clk_get_fmax_at_vmin_response {
* require no additional data. Others have a sub-command specific
* payload
*
- * |sub-command |payload |
- * |----------------------------|-----------------------|
- * |CMD_CLK_GET_RATE |- |
- * |CMD_CLK_SET_RATE |clk_set_rate |
- * |CMD_CLK_ROUND_RATE |clk_round_rate |
- * |CMD_CLK_GET_PARENT |- |
- * |CMD_CLK_SET_PARENT |clk_set_parent |
- * |CMD_CLK_IS_ENABLED |- |
- * |CMD_CLK_ENABLE |- |
- * |CMD_CLK_DISABLE |- |
- * |CMD_CLK_GET_ALL_INFO |- |
- * |CMD_CLK_GET_MAX_CLK_ID |- |
- * |CMD_CLK_GET_FMAX_AT_VMIN |-
- * |
- *
+ * |Sub-command |Payload |
+ * |----------------------------|-----------------------------|
+ * |#CMD_CLK_GET_RATE |- |
+ * |#CMD_CLK_SET_RATE |#cmd_clk_set_rate_request |
+ * |#CMD_CLK_ROUND_RATE |#cmd_clk_round_rate_request |
+ * |#CMD_CLK_GET_PARENT |- |
+ * |#CMD_CLK_SET_PARENT |#cmd_clk_set_parent_request |
+ * |#CMD_CLK_IS_ENABLED |- |
+ * |#CMD_CLK_ENABLE |- |
+ * |#CMD_CLK_DISABLE |- |
+ * |#CMD_CLK_GET_ALL_INFO |- |
+ * |#CMD_CLK_GET_MAX_CLK_ID |- |
+ * |#CMD_CLK_GET_FMAX_AT_VMIN |- |
*/
-/** @cond DEPRECATED
- *
- * Older versions of firmware also supported following sub-commands:
- * |CMD_CLK_PROPERTIES |- |
- * |CMD_CLK_POSSIBLE_PARENTS |- |
- * |CMD_CLK_NUM_POSSIBLE_PARENTS|- |
- * |CMD_CLK_GET_POSSIBLE_PARENT |clk_get_possible_parent|
- * |CMD_CLK_RESET_REFCOUNTS |- |
- *
- * @endcond DEPRECATED */
-
struct mrq_clk_request {
/** @brief Sub-command and clock id concatenated to 32-bit word.
- * - bits[31..24] is the sub-cmd.
- * - bits[23..0] is the clock id
+ *
+ * - bits[31..24] is the sub-command ID from @ref mrq_clk_commands.
+ * - bits[23..0] is the clock identifier from @ref bpmp_clock_ids.
*/
uint32_t cmd_and_id;
@@ -1357,15 +1798,6 @@ struct mrq_clk_request {
struct cmd_clk_disable_request clk_disable;
/** @private */
struct cmd_clk_is_enabled_request clk_is_enabled;
- /** @cond DEPRECATED */
- /** @private */
- struct cmd_clk_properties_request clk_properties;
- /** @private */
- struct cmd_clk_possible_parents_request clk_possible_parents;
- /** @private */
- struct cmd_clk_num_possible_parents_request clk_num_possible_parents;
- struct cmd_clk_get_possible_parent_request clk_get_possible_parent;
- /** @endcond DEPRECATED */
/** @private */
struct cmd_clk_get_all_info_request clk_get_all_info;
/** @private */
@@ -1381,35 +1813,24 @@ struct mrq_clk_request {
*
* Each sub-command supported by @ref mrq_clk_request may return
* sub-command-specific data. Some do and some do not as indicated in
- * the following table
+ * the following table:
*
- * |sub-command |payload |
- * |----------------------------|------------------------|
- * |CMD_CLK_GET_RATE |clk_get_rate |
- * |CMD_CLK_SET_RATE |clk_set_rate |
- * |CMD_CLK_ROUND_RATE |clk_round_rate |
- * |CMD_CLK_GET_PARENT |clk_get_parent |
- * |CMD_CLK_SET_PARENT |clk_set_parent |
- * |CMD_CLK_IS_ENABLED |clk_is_enabled |
- * |CMD_CLK_ENABLE |- |
- * |CMD_CLK_DISABLE |- |
- * |CMD_CLK_GET_ALL_INFO |clk_get_all_info |
- * |CMD_CLK_GET_MAX_CLK_ID |clk_get_max_id |
- * |CMD_CLK_GET_FMAX_AT_VMIN |clk_get_fmax_at_vmin |
+ * |Sub-command |Payload |
+ * |----------------------------|-----------------------------------|
+ * |#CMD_CLK_GET_RATE |#cmd_clk_get_rate_response |
+ * |#CMD_CLK_SET_RATE |#cmd_clk_set_rate_response |
+ * |#CMD_CLK_ROUND_RATE |#cmd_clk_round_rate_response |
+ * |#CMD_CLK_GET_PARENT |#cmd_clk_get_parent_response |
+ * |#CMD_CLK_SET_PARENT |#cmd_clk_set_parent_response |
+ * |#CMD_CLK_IS_ENABLED |#cmd_clk_is_enabled_response |
+ * |#CMD_CLK_ENABLE |- |
+ * |#CMD_CLK_DISABLE |- |
+ * |#CMD_CLK_GET_ALL_INFO |#cmd_clk_get_all_info_response |
+ * |#CMD_CLK_GET_MAX_CLK_ID |#cmd_clk_get_max_clk_id_response |
+ * |#CMD_CLK_GET_FMAX_AT_VMIN |#cmd_clk_get_fmax_at_vmin_response |
*
*/
-/** @cond DEPRECATED
- *
- * Older versions of firmware also supported following sub-commands:
- * |CMD_CLK_PROPERTIES |clk_properties |
- * |CMD_CLK_POSSIBLE_PARENTS |clk_possible_parents |
- * |CMD_CLK_NUM_POSSIBLE_PARENTS|clk_num_possible_parents|
- * |CMD_CLK_GET_POSSIBLE_PARENT |clk_get_possible_parents|
- * |CMD_CLK_RESET_REFCOUNTS |- |
- *
- * @endcond DEPRECATED */
-
struct mrq_clk_response {
union {
struct cmd_clk_get_rate_response clk_get_rate;
@@ -1422,12 +1843,6 @@ struct mrq_clk_response {
/** @private */
struct cmd_clk_disable_response clk_disable;
struct cmd_clk_is_enabled_response clk_is_enabled;
- /** @cond DEPRECATED */
- struct cmd_clk_properties_response clk_properties;
- struct cmd_clk_possible_parents_response clk_possible_parents;
- struct cmd_clk_num_possible_parents_response clk_num_possible_parents;
- struct cmd_clk_get_possible_parent_response clk_get_possible_parent;
- /** @endcond DEPRECATED */
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
@@ -1441,7 +1856,6 @@ struct mrq_clk_response {
* @def MRQ_QUERY_ABI
* @brief Check if an MRQ is implemented
*
- * * Platforms: All
* * Initiators: Any
* * Targets: Any except DMCE
* * Request Payload: @ref mrq_query_abi_request
@@ -1450,7 +1864,7 @@ struct mrq_clk_response {
/**
* @ingroup ABI_info
- * @brief Request with MRQ_QUERY_ABI
+ * @brief Request with #MRQ_QUERY_ABI
*
* Used by #MRQ_QUERY_ABI call to check if MRQ code #mrq is supported
* by the recipient.
@@ -1468,7 +1882,11 @@ struct mrq_query_abi_request {
* successful, not that the MRQ itself is supported!
*/
struct mrq_query_abi_response {
- /** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
+ /**
+ * This response field is set to:
+ * - 0 if queried MRQ is supported, or
+ * - -#BPMP_ENODEV if queried MRQ is not supported
+ */
int32_t status;
} BPMP_ABI_PACKED;
@@ -1476,9 +1894,7 @@ struct mrq_query_abi_response {
*
* @ingroup MRQ_Codes
* @def MRQ_PG
- * @brief Control power-gating state of a partition. In contrast to
- * MRQ_PG_UPDATE_STATE, operations that change the power partition
- * state are NOT reference counted
+ * @brief Control power-gating state of a partition.
*
* @cond (bpmp_t194 || bpmp_t186)
* @note On T194 and earlier BPMP-FW forcefully turns off some partitions as
@@ -1486,9 +1902,8 @@ struct mrq_query_abi_response {
* Therefore, it is recommended to power off all domains via MRQ_PG prior to SC7
* entry.
* See @ref bpmp_pdomain_ids for further detail.
- * @endcond (bpmp_t194 || bpmp_t186)
+ * @endcond
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_pg_request
@@ -1497,6 +1912,10 @@ struct mrq_query_abi_response {
* @addtogroup Powergating
* @{
*/
+
+/**
+ * @brief Sub-command identifiers for #MRQ_PG -command.
+ */
enum mrq_pg_cmd {
/**
* @brief Check whether the BPMP driver supports the specified
@@ -1512,9 +1931,14 @@ enum mrq_pg_cmd {
* possible values for power domains are defined in enum
* pg_states
*
- * mrq_response:err is
- * 0: Success
- * -#BPMP_EINVAL: Invalid request parameters
+ * mrq_response:err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------------------------------------ |
+ * | 0 | Request was successful. |
+ * | -#BPMP_EINVAL | Invalid request parameters were provided. |
+ * | -#BPMP_EACCES | Permission denied or always-off partition was attempted to be turned on. |
+ * | Any other <0 | Internal error while performing the operation. |
*/
CMD_PG_SET_STATE = 1,
@@ -1523,18 +1947,26 @@ enum mrq_pg_cmd {
* possible values for power domains are defined in enum
* pg_states
*
- * mrq_response:err is
- * 0: Success
- * -#BPMP_EINVAL: Invalid request parameters
+ * mrq_response:err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ---------------------------------------------- |
+ * | 0 | Request was successful. |
+ * | -#BPMP_EINVAL | Invalid request parameters were provided. |
+ * | Any other <0 | Internal error while performing the operation. |
*/
CMD_PG_GET_STATE = 2,
/**
* @brief Get the name string of specified power domain id.
*
- * mrq_response:err is
- * 0: Success
- * -#BPMP_EINVAL: Invalid request parameters
+ * mrq_response:err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ---------------------------------------------- |
+ * | 0 | Request was successful. |
+ * | -#BPMP_EINVAL | Invalid request parameters were provided. |
+ * | Any other <0 | Internal error while performing the operation. |
*/
CMD_PG_GET_NAME = 3,
@@ -1543,20 +1975,29 @@ enum mrq_pg_cmd {
* @brief Get the highest power domain id in the system. Not
* all IDs between 0 and max_id are valid IDs.
*
- * mrq_response:err is
- * 0: Success
- * -#BPMP_EINVAL: Invalid request parameters
+ * mrq_response:err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ---------------------------------------------- |
+ * | 0 | Request was successful. |
+ * | -#BPMP_EINVAL | Invalid request parameters were provided. |
+ * | Any other <0 | Internal error while performing the operation. |
*/
CMD_PG_GET_MAX_ID = 4,
};
#define MRQ_PG_NAME_MAXLEN 40
+/**
+ * @brief State value for the cmd_pg_set_state_request::state -field.
+ */
enum pg_states {
/** @brief Power domain is OFF */
PG_STATE_OFF = 0,
/** @brief Power domain is ON */
PG_STATE_ON = 1,
+
+ /** @cond bpmp_t186 */
/**
* @brief a legacy state where power domain and the clock
* associated to the domain are ON.
@@ -1564,40 +2005,51 @@ enum pg_states {
* deprecated.
*/
PG_STATE_RUNNING = 2,
+ /** @endcond */
};
struct cmd_pg_query_abi_request {
- /** @ref mrq_pg_cmd */
+ /** #MRQ_PG sub-command identifier from @ref mrq_pg_cmd */
uint32_t type;
} BPMP_ABI_PACKED;
struct cmd_pg_set_state_request {
- /** @ref pg_states */
+ /** One of the state values from @ref pg_states */
uint32_t state;
} BPMP_ABI_PACKED;
/**
- * @brief Response data to #MRQ_PG sub command #CMD_PG_GET_STATE
+ * @brief Response payload for the #MRQ_PG sub-command #CMD_PG_GET_STATE
*/
struct cmd_pg_get_state_response {
/**
* @brief The state of the power partition that has been
- * succesfuly requested by the master earlier using #MRQ_PG
+ * successfully requested by the master earlier using #MRQ_PG
* command #CMD_PG_SET_STATE.
*
* The state may not reflect the physical state of the power
* partition if there are some other masters requesting it to
* be enabled.
*
- * See @ref pg_states for possible values
+ * See @ref pg_states for possible values.
*/
uint32_t state;
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for the #MRQ_PG sub-command #CMD_PG_GET_NAME
+ */
struct cmd_pg_get_name_response {
+ /**
+ * @brief On successful response contains the null-terminated
+ * friendly name of the requested power-domain.
+ */
uint8_t name[MRQ_PG_NAME_MAXLEN];
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for the #MRQ_PG sub-command #CMD_PG_GET_MAX_ID
+ */
struct cmd_pg_get_max_id_response {
uint32_t max_id;
} BPMP_ABI_PACKED;
@@ -1606,22 +2058,28 @@ struct cmd_pg_get_max_id_response {
* @brief Request with #MRQ_PG
*
* Used by the sender of an #MRQ_PG message to control power
- * partitions. The pg_request is split into several sub-commands. Some
- * sub-commands require no additional data. Others have a sub-command
- * specific payload
+ * partitions. The expected payload depends on the sub-command identifier.
+ * Some sub-commands require no additional data while others have a sub-command
+ * specific payload:
*
- * |sub-command |payload |
- * |----------------------------|-----------------------|
- * |CMD_PG_QUERY_ABI | query_abi |
- * |CMD_PG_SET_STATE | set_state |
- * |CMD_PG_GET_STATE | - |
- * |CMD_PG_GET_NAME | - |
- * |CMD_PG_GET_MAX_ID | - |
+ * |Sub-command |Payload |
+ * |----------------------------|---------------------------|
+ * |#CMD_PG_QUERY_ABI | #cmd_pg_query_abi_request |
+ * |#CMD_PG_SET_STATE | #cmd_pg_set_state_request |
+ * |#CMD_PG_GET_STATE | - |
+ * |#CMD_PG_GET_NAME | - |
+ * |#CMD_PG_GET_MAX_ID | - |
*
*/
struct mrq_pg_request {
+ /** @brief Sub-command identifier from @ref mrq_pg_cmd. */
uint32_t cmd;
+
+ /**
+ * @brief Power-domain identifier
+ */
uint32_t id;
+
union {
struct cmd_pg_query_abi_request query_abi;
struct cmd_pg_set_state_request set_state;
@@ -1629,19 +2087,18 @@ struct mrq_pg_request {
} BPMP_ABI_PACKED;
/**
- * @brief Response to MRQ_PG
+ * @brief Response to #MRQ_PG
*
- * Each sub-command supported by @ref mrq_pg_request may return
- * sub-command-specific data. Some do and some do not as indicated in
- * the following table
+ * Some of the #MRQ_PG sub-commands return a sub-command -specific payload
+ * as specified in the following table:
*
- * |sub-command |payload |
- * |----------------------------|-----------------------|
- * |CMD_PG_QUERY_ABI | - |
- * |CMD_PG_SET_STATE | - |
- * |CMD_PG_GET_STATE | get_state |
- * |CMD_PG_GET_NAME | get_name |
- * |CMD_PG_GET_MAX_ID | get_max_id |
+ * |Sub-command |Payload |
+ * |--------------------|------------------------------|
+ * |#CMD_PG_QUERY_ABI | - |
+ * |#CMD_PG_SET_STATE | - |
+ * |#CMD_PG_GET_STATE | #cmd_pg_get_state_response |
+ * |#CMD_PG_GET_NAME | #cmd_pg_get_name_response |
+ * |#CMD_PG_GET_MAX_ID | #cmd_pg_get_max_id_response |
*/
struct mrq_pg_response {
union {
@@ -1658,11 +2115,10 @@ struct mrq_pg_response {
* @def MRQ_THERMAL
* @brief Interact with BPMP thermal framework
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: Any
- * * Request Payload: TODO
- * * Response Payload: TODO
+ * * Request Payload: #mrq_thermal_host_to_bpmp_request
+ * * Response Payload: #mrq_thermal_bpmp_to_host_response
*
* @addtogroup Thermal
*
@@ -1686,10 +2142,14 @@ struct mrq_pg_response {
* payload of @ref mrq_thermal_bpmp_to_host_request.
* @{
*/
+
+/**
+ * @brief Sub-command identifiers for Host->BPMP #MRQ_THERMAL -command.
+ */
enum mrq_thermal_host_to_bpmp_cmd {
/**
- * @brief Check whether the BPMP driver supports the specified
- * request type.
+ * @brief Check whether BPMP-FW supports the specified
+ * #MRQ_THERMAL sub-command.
*
* Host needs to supply request parameters.
*
@@ -1703,31 +2163,44 @@ enum mrq_thermal_host_to_bpmp_cmd {
*
* Host needs to supply request parameters.
*
- * mrq_response::err is
- * * 0: Temperature query succeeded.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * * -#BPMP_ENOENT: No driver registered for thermal zone..
- * * -#BPMP_EFAULT: Problem reading temperature measurement.
+ * mrq_response::err value for this sub-command is:
+ *
+ * | Value | Description |
+ * | -------------- | ----------------------------------------- |
+ * | 0 | Temperature query succeeded. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_ENOENT | No driver registered for thermal zone. |
+ * | -#BPMP_EFAULT | Problem reading temperature measurement. |
*/
CMD_THERMAL_GET_TEMP = 1,
/**
+ * @cond (!bpmp_safe && !bpmp_t264)
* @brief Enable or disable and set the lower and upper
* thermal limits for a thermal trip point. Each zone has
* one trip point.
*
* Host needs to supply request parameters. Once the
* temperature hits a trip point, the BPMP will send a message
- * to the CPU having MRQ=MRQ_THERMAL and
- * type=CMD_THERMAL_HOST_TRIP_REACHED
+ * to the CPU having MRQ command identifier equal to #MRQ_THERMAL and
+ * sub-command identifier equal to #CMD_THERMAL_HOST_TRIP_REACHED.
*
- * mrq_response::err is
- * * 0: Trip successfully set.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * * -#BPMP_ENOENT: No driver registered for thermal zone.
- * * -#BPMP_EFAULT: Problem setting trip point.
+ * If #CMD_THERMAL_SET_TRIP -sub-command is issued for a
+ * thermal zone that is currently power gated and unable to
+ * report temperature, a temperature of -256C is used as
+ * temperature for evaluation of the trip.
+ *
+ * mrq_response::err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * | --------------- | -------------------------------------- |
+ * | 0 | Trip successfully set. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_ENOENT | No driver registered for thermal zone. |
+ * | -#BPMP_EFAULT | Problem setting trip point. |
*/
CMD_THERMAL_SET_TRIP = 2,
+ /** @endcond */
/**
* @brief Get the number of supported thermal zones.
@@ -1739,135 +2212,153 @@ enum mrq_thermal_host_to_bpmp_cmd {
CMD_THERMAL_GET_NUM_ZONES = 3,
/**
- * @brief Get the thermtrip of the specified zone.
+ * @brief Get the thermal trip value of the specified zone.
*
* Host needs to supply request parameters.
*
- * mrq_response::err is
- * * 0: Valid zone information returned.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * * -#BPMP_ENOENT: No driver registered for thermal zone.
- * * -#BPMP_ERANGE if thermtrip is invalid or disabled.
- * * -#BPMP_EFAULT: Problem reading zone information.
+ * mrq_response::err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * | --------------- | -------------------------------------- |
+ * | 0 | Valid zone information returned. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_ENOENT | No driver registered for thermal zone. |
+ * | -#BPMP_ERANGE | Thermal trip is invalid or disabled. |
+ * | -#BPMP_EFAULT | Problem reading zone information. |
*/
CMD_THERMAL_GET_THERMTRIP = 4,
- /** @brief: number of supported host-to-bpmp commands. May
- * increase in future
+ /**
+ * @brief Number of supported host-to-bpmp commands.
*/
CMD_THERMAL_HOST_TO_BPMP_NUM
};
+/**
+ * @brief Sub-command identifiers for BPMP->host #MRQ_THERMAL -command
+ */
enum mrq_thermal_bpmp_to_host_cmd {
/**
* @brief Indication that the temperature for a zone has
- * exceeded the range indicated in the thermal trip point
- * for the zone.
+ * exceeded the range indicated in the thermal trip point
+ * for the zone.
*
- * BPMP needs to supply request parameters. Host only needs to
+ * BPMP-FW needs to supply request parameters. Host only needs to
* acknowledge.
*/
CMD_THERMAL_HOST_TRIP_REACHED = 100,
- /** @brief: number of supported bpmp-to-host commands. May
- * increase in future
+ /**
+ * @brief: Number of supported bpmp-to-host commands. May
+ * increase in future.
*/
CMD_THERMAL_BPMP_TO_HOST_NUM
};
-/*
- * Host->BPMP request data for request type CMD_THERMAL_QUERY_ABI
- *
- * zone: Request type for which to check existence.
+/**
+ * Host->BPMP request payload for the #CMD_THERMAL_QUERY_ABI sub-command
*/
struct cmd_thermal_query_abi_request {
+ /**
+ * Request type for which to check whether supported by BPMP-FW.
+ *
+ * Valid identifiers are available at #mrq_thermal_host_to_bpmp_cmd
+ */
uint32_t type;
} BPMP_ABI_PACKED;
-/*
- * Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
- *
- * zone: Number of thermal zone.
+/**
+ * Host->BPMP request payload for the #CMD_THERMAL_GET_TEMP sub-command
*/
struct cmd_thermal_get_temp_request {
+ /** Thermal zone identifier from @ref bpmp_thermal_ids. */
uint32_t zone;
} BPMP_ABI_PACKED;
-/*
- * BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
+/**
+ * BPMP->Host response payload for the #CMD_THERMAL_GET_TEMP sub-command.
+ *
+ * mrq_response::err is defined as:
*
- * error: 0 if request succeeded.
- * -BPMP_EINVAL if request parameters were invalid.
- * -BPMP_ENOENT if no driver was registered for the specified thermal zone.
- * -BPMP_EFAULT for other thermal zone driver errors.
- * temp: Current temperature in millicelsius.
+ * | Value | Description |
+ * | ------------- | -------------------------------------------------------- |
+ * | 0 | Request succeeded. |
+ * | -#BPMP_EINVAL | Request parameters were invalid. |
+ * | -#BPMP_ENOENT | No driver was registered for the specified thermal zone. |
+ * | -#BPMP_EFAULT | For other BPMP-FW internal thermal zone driver errors. |
*/
struct cmd_thermal_get_temp_response {
+ /** @brief Current temperature in millicelsius. */
int32_t temp;
} BPMP_ABI_PACKED;
-/*
- * Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
+/**
+ * @cond (!bpmp_safe && !bpmp_t264)
*
- * zone: Number of thermal zone.
- * low: Temperature of lower trip point in millicelsius
- * high: Temperature of upper trip point in millicelsius
- * enabled: 1 to enable trip point, 0 to disable trip point
+ * Host->BPMP request payload for the #CMD_THERMAL_SET_TRIP sub-command.
*/
struct cmd_thermal_set_trip_request {
+ /** @brief Thermal zone identifier from @ref bpmp_thermal_ids. */
uint32_t zone;
+ /** @brief Temperature of lower trip point in millicelsius */
int32_t low;
+ /** @brief Temperature of upper trip point in millicelsius */
int32_t high;
+ /** 1 to enable trip point, 0 to disable trip point */
uint32_t enabled;
} BPMP_ABI_PACKED;
-/*
- * BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
- *
- * zone: Number of thermal zone where trip point was reached.
+/**
+ * BPMP->Host request payload for the #CMD_THERMAL_HOST_TRIP_REACHED sub-command.
*/
struct cmd_thermal_host_trip_reached_request {
+ /**
+ * @brief ID of the thermal zone where trip point was reached,
+ * from @ref bpmp_thermal_ids.
+ */
uint32_t zone;
} BPMP_ABI_PACKED;
+/** @endcond */
-/*
- * BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
- *
- * num: Number of supported thermal zones. The thermal zones are indexed
- * starting from zero.
+/**
+ * BPMP->Host response payload for the #CMD_THERMAL_GET_NUM_ZONES sub-command.
*/
struct cmd_thermal_get_num_zones_response {
+ /**
+ * @brief Number of supported thermal zones.
+ *
+ * The thermal zones are indexed starting from zero.
+ */
uint32_t num;
} BPMP_ABI_PACKED;
-/*
- * Host->BPMP request data for request type CMD_THERMAL_GET_THERMTRIP
- *
- * zone: Number of thermal zone.
+/**
+ * Host->BPMP request payload for the #CMD_THERMAL_GET_THERMTRIP sub-command.
*/
struct cmd_thermal_get_thermtrip_request {
+ /** @brief Thermal zone identifier from @ref bpmp_thermal_ids. */
uint32_t zone;
} BPMP_ABI_PACKED;
-/*
- * BPMP->Host reply data for request CMD_THERMAL_GET_THERMTRIP
- *
- * thermtrip: HW shutdown temperature in millicelsius.
+/**
+ * BPMP->Host response payload for the #CMD_THERMAL_GET_THERMTRIP sub-command.
*/
struct cmd_thermal_get_thermtrip_response {
+ /** @brief HW shutdown temperature in millicelsius. */
int32_t thermtrip;
} BPMP_ABI_PACKED;
-/*
- * Host->BPMP request data.
- *
- * Reply type is union mrq_thermal_bpmp_to_host_response.
+/**
+ * Host->BPMP #MRQ_THERMAL request payload.
*
- * type: Type of request. Values listed in enum mrq_thermal_type.
- * data: Request type specific parameters.
+ * Response payload type is #mrq_thermal_bpmp_to_host_response.
*/
struct mrq_thermal_host_to_bpmp_request {
+ /**
+ * Request sub-command identifier from @ref mrq_thermal_host_to_bpmp_cmd.
+ */
uint32_t type;
+
union {
struct cmd_thermal_query_abi_request query_abi;
struct cmd_thermal_get_temp_request get_temp;
@@ -1876,21 +2367,22 @@ struct mrq_thermal_host_to_bpmp_request {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/*
- * BPMP->Host request data.
- *
- * type: Type of request. Values listed in enum mrq_thermal_type.
- * data: Request type specific parameters.
+/**
+ * @brief Request payload for the BPMP->Host #MRQ_THERMAL command.
*/
struct mrq_thermal_bpmp_to_host_request {
+ /**
+ * Request sub-command identifier from @ref mrq_thermal_bpmp_to_host_cmd.
+ */
uint32_t type;
+
union {
struct cmd_thermal_host_trip_reached_request host_trip_reached;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/*
- * Data in reply to a Host->BPMP request.
+/**
+ * @brief Response payload for the Host->BPMP #MRQ_THERMAL command.
*/
union mrq_thermal_bpmp_to_host_response {
struct cmd_thermal_get_temp_response get_temp;
@@ -1900,13 +2392,11 @@ union mrq_thermal_bpmp_to_host_response {
/** @} Thermal */
-/**
+/** @cond (!bpmp_safe && (bpmp_t234 || bpmp_t238 || bpmp_t264))
* @ingroup MRQ_Codes
* @def MRQ_OC_STATUS
- * @brief Query over current status
+ * @brief Query overcurrent status
*
- * * Platforms: T234
- * @cond bpmp_t234
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -1916,33 +2406,224 @@ union mrq_thermal_bpmp_to_host_response {
* @{
*/
+/**
+ * @brief Size of the mrq_oc_status_response::throt_en and
+ * mrq_oc_status_response::event_cnt -arrays.
+ */
#define OC_STATUS_MAX_SIZE 24U
-/*
- * @brief Response to #MRQ_OC_STATUS
- *
- * throt_en: Value for each OC alarm where zero signifies throttle is
- * disabled, and non-zero throttle is enabled.
- * event_cnt: Total number of OC events for each OC alarm.
+/**
+ * @brief Response payload for the #MRQ_OC_STATUS -command.
*
* mrq_response::err is 0 if the operation was successful and
* -#BPMP_ENODEV otherwise.
*/
struct mrq_oc_status_response {
+ /**
+ * @brief Value for each overcurrent alarm where zero signifies
+ * throttle is disabled, and non-zero throttle is enabled.
+ */
uint8_t throt_en[OC_STATUS_MAX_SIZE];
+
+ /**
+ * @brief Total number of overcurrent events for each overcurrent alarm.
+ */
uint32_t event_cnt[OC_STATUS_MAX_SIZE];
} BPMP_ABI_PACKED;
/** @} OC_status */
-/** @endcond bpmp_t234 */
+/** @endcond */
+
+/** @cond (bpmp_th500 || bpmp_tb500 || bpmp_t238)
+ * @ingroup MRQ_Codes
+ * @def MRQ_THROTTLE
+ * @brief Overcurrent throttling
+ *
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_throttle_request
+ * * Response Payload: @ref mrq_throttle_response
+ * @addtogroup Throttle
+ * @{
+ */
+enum mrq_throttle_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_THROTTLE sub-command.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_THROTTLE_QUERY_ABI = 0,
+
+ /**
+ * @cond (bpmp_th500 || bpmp_tb500)
+ * @brief query chipthrot status
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|--------------------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | CMD_THROTTLE_GET_CHIPTHROT_STATUS is not supported by BPMP-FW|
+ */
+ CMD_THROTTLE_GET_CHIPTHROT_STATUS = 1,
+ /** @endcond */
+
+ /**
+ * @cond bpmp_t238
+ * @brief program OC throttle configuration
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|--------------------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_EINVAL | ID out of range or alarm for this ID not enabled at boot |
+ * | -#BPMP_ENODEV | CMD_THROTTLE_SET_OC_CONFIG is not supported by BPMP-FW |
+ */
+ CMD_THROTTLE_SET_OC_CONFIG = 2,
+ /** @endcond */
+};
+
+/**
+ * @brief Request payload for #MRQ_THROTTLE sub-command #CMD_THROTTLE_QUERY_ABI
+ */
+struct cmd_throttle_query_abi_request {
+ uint32_t cmd; /**< @ref mrq_throttle_cmd */
+} BPMP_ABI_PACKED;
+
+/**
+ * @cond bpmp_th500
+ * @brief Response payload for #MRQ_THROTTLE sub-command
+ * #CMD_THROTTLE_GET_CHIPTHROT_STATUS
+ *
+ * Bit-mask of all h/w throttling actions that have been engaged since
+ * last invocation of this command
+ * Bit 0...11 : HW throttling status of the thermal zones.
+ * Bit 12...23 : Reserved for future thermal zone events.
+ * Bit 24...25 : HW throttling status of the Over current Alarms OC1 & OC2.
+ * Bit 26...31 : Reserved for future Over current alarm events.
+ * Bit 32...63 : Reserved for future use.
+ * @endcond
+ * @cond bpmp_tb500
+ * @brief Response payload for #MRQ_THROTTLE sub-command
+ * #CMD_THROTTLE_GET_CHIPTHROT_STATUS
+ *
+ * Bit-mask of all h/w throttling actions that have been engaged since
+ * last invocation of this command
+ * Bit 0 : HW throttling status of the TB500C_TJ_MAX thermal zone.
+ * Bit 1...63 : Reserved for future use.
+ * @endcond
+ * @cond (bpmp_th500 || bpmp_tb500)
+ */
+struct cmd_throttle_get_chipthrot_status_response {
+ uint64_t status;
+} BPMP_ABI_PACKED;
+/** @endcond */
+
+/**
+ * @cond bpmp_t238
+ * @brief Request payload for #MRQ_THROTTLE sub-command
+ * #CMD_THROTTLE_SET_OC_CONFIG
+ *
+ * Only alarms that have been configured as enabled in BPMP-DTB at boot can
+ * be reconfigured with this MRQ.
+ */
+struct cmd_throttle_set_oc_config_request {
+ /** @brief valid OC alarm ID from @ref bpmp_soctherm_edp_oc_ids */
+ uint32_t id;
+ /** @brief Throttling enable/disable
+ *
+ * Set to 1 to enable throttling, or 0 to disable. Other values are
+ * disallowed.
+ */
+ uint8_t en_throttle;
+} BPMP_ABI_PACKED;
+/** @endcond */
+
+/**
+ * @brief Request payload for the #MRQ_THROTTLE -command
+ *
+ * | Sub-command | Request payload |
+ * |------------------------------------|----------------------------------|
+ * | #CMD_THROTTLE_QUERY_ABI | #cmd_throttle_query_abi_request |
+ *
+ * @cond bpmp_th500
+ * The following additional sub-commands are supported on TH500 platforms:
+ * | Sub-command | Request payload |
+ * |------------------------------------|----------------------------------|
+ * | #CMD_THROTTLE_GET_CHIPTHROT_STATUS | - |
+ * @endcond
+ *
+ * @cond bpmp_tb500
+ * The following additional sub-commands are supported on TB500 platforms:
+ * | Sub-command | Request payload |
+ * |------------------------------------|----------------------------------|
+ * | #CMD_THROTTLE_GET_CHIPTHROT_STATUS | - |
+ * @endcond
+ *
+ * @cond bpmp_t238
+ * The following additional sub-commands are supported on T238 platforms:
+ * | Sub-command | Request payload |
+ * |------------------------------------|-------------------------------------|
+ * | #CMD_THROTTLE_SET_OC_CONFIG | #cmd_throttle_set_oc_config_request |
+ * @endcond
+ */
+struct mrq_throttle_request {
+ uint32_t cmd;
+ union {
+ struct cmd_throttle_query_abi_request throttle_query_abi_req;
+ /** @cond bpmp_t238 */
+ struct cmd_throttle_set_oc_config_request throttle_set_oc_config_req;
+ /** @endcond */
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
+ * @brief Response payload for the #MRQ_THROTTLE -command.
+ *
+ * | Sub-command | Response payload |
+ * |------------------------------------|--------------------------------------------|
+ * | #CMD_THROTTLE_QUERY_ABI | - |
+ *
+ * @cond bpmp_th500
+ * The following additional sub-commands are supported on TH500 platforms:
+ * | Sub-command | Response payload |
+ * |------------------------------------|--------------------------------------------|
+ * | #CMD_THROTTLE_GET_CHIPTHROT_STATUS | #cmd_throttle_get_chipthrot_status_response|
+ * @endcond
+ *
+ * @cond bpmp_tb500
+ * The following additional sub-commands are supported on TB500 platforms:
+ * | Sub-command | Response payload |
+ * |------------------------------------|--------------------------------------------|
+ * | #CMD_THROTTLE_GET_CHIPTHROT_STATUS | #cmd_throttle_get_chipthrot_status_response|
+ * @endcond
+ *
+ * @cond bpmp_t238
+ * The following additional sub-commands are supported on T238 platforms:
+ * | Sub-command | Response payload |
+ * |------------------------------------|--------------------------------------------|
+ * | #CMD_THROTTLE_SET_OC_CONFIG | - |
+ * @endcond
+ */
+struct mrq_throttle_response {
+ union {
+ /** @cond (bpmp_th500 || bpmp_tb500) */
+ struct cmd_throttle_get_chipthrot_status_response throttle_get_chipthrot_status_resp;
+ /** @endcond */
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+/** @} Throttle */
+/** @endcond */
+
+
+/** @cond bpmp_t186
* @ingroup MRQ_Codes
* @def MRQ_CPU_VHINT
* @brief Query CPU voltage hint data
*
- * * Platforms: T186
- * @cond bpmp_t186
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_cpu_vhint_request
@@ -1995,14 +2676,13 @@ struct cpu_vhint_data {
} BPMP_ABI_PACKED;
/** @} Vhint */
-/** @endcond bpmp_t186 */
+/** @endcond */
/**
* @ingroup MRQ_Codes
* @def MRQ_ABI_RATCHET
* @brief ABI ratchet value query
*
- * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_abi_ratchet_request
@@ -2014,7 +2694,7 @@ struct cpu_vhint_data {
/**
* @brief An ABI compatibility mechanism
*
- * BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future
+ * #BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future
* revision of this header file.
* 1. That future revision deprecates some MRQ
* 2. That future revision introduces a breaking change to an existing
@@ -2051,11 +2731,11 @@ struct mrq_abi_ratchet_request {
*
* If #ratchet is less than the requester's #BPMP_ABI_RATCHET_VALUE,
* the requster must either interoperate with BPMP according to an ABI
- * header version with BPMP_ABI_RATCHET_VALUE = ratchet or cease
+ * header version with #BPMP_ABI_RATCHET_VALUE = ratchet or cease
* communication with BPMP.
*
* If mrq_response::err is 0 and ratchet is greater than or equal to the
- * requester's BPMP_ABI_RATCHET_VALUE, the requester should continue
+ * requester's #BPMP_ABI_RATCHET_VALUE, the requester should continue
* normal operation.
*/
struct mrq_abi_ratchet_response {
@@ -2070,7 +2750,9 @@ struct mrq_abi_ratchet_response {
* @def MRQ_EMC_DVFS_LATENCY
* @brief Query frequency dependent EMC DVFS latency
*
- * * Platforms: T186, T194, T234
+ * On T264 and onwards, this MRQ service is available only when
+ * BPMP-FW has valid DRAM timing table passed by earlier boot stages.
+ *
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -2094,7 +2776,11 @@ struct emc_dvfs_latency {
* @brief Response to #MRQ_EMC_DVFS_LATENCY
*/
struct mrq_emc_dvfs_latency_response {
- /** @brief The number valid entries in #pairs */
+ /**
+ * @brief The number valid entries in #pairs
+ *
+ * Valid range is [0, #EMC_DVFS_LATENCY_MAX_SIZE]
+ */
uint32_t num_pairs;
/** @brief EMC DVFS node <frequency, latency> information */
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
@@ -2102,13 +2788,11 @@ struct mrq_emc_dvfs_latency_response {
/** @} EMC */
-/**
+/** @cond (bpmp_t234)
* @ingroup MRQ_Codes
* @def MRQ_EMC_DVFS_EMCHUB
* @brief Query EMC HUB frequencies
*
- * * Platforms: T234 onwards
- * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -2139,15 +2823,13 @@ struct mrq_emc_dvfs_emchub_response {
} BPMP_ABI_PACKED;
/** @} EMC */
-/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (bpmp_t234)
* @ingroup MRQ_Codes
* @def MRQ_EMC_DISP_RFL
* @brief Set EMC display RFL handshake mode of operations
*
- * * Platforms: T234 onwards
- * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_emc_disp_rfl_request
@@ -2157,6 +2839,9 @@ struct mrq_emc_dvfs_emchub_response {
* @{
*/
+/**
+ * @brief Allowed mode values for the mrq_emc_disp_rfl_request::mode -field.
+ */
enum mrq_emc_disp_rfl_mode {
/** @brief EMC display RFL handshake disabled */
EMC_DISP_RFL_MODE_DISABLED = 0,
@@ -2171,65 +2856,75 @@ enum mrq_emc_disp_rfl_mode {
* Used by the sender of an #MRQ_EMC_DISP_RFL message to
* request the mode of EMC display RFL handshake.
*
- * mrq_response::err is
- * * 0: RFL mode is set successfully
- * * -#BPMP_EINVAL: invalid mode requested
- * * -#BPMP_ENOSYS: RFL handshake is not supported
- * * -#BPMP_EACCES: Permission denied
- * * -#BPMP_ENODEV: if disp rfl mrq is not supported by BPMP-FW
+ * mrq_response::err for this request is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | --------------------------------------------- |
+ * | 0 | RFL mode is set successfully. |
+ * | -#BPMP_EINVAL | Invalid mode requested. |
+ * | -#BPMP_ENOSYS | RFL handshake is not supported. |
+ * | -#BPMP_EACCES | Permission denied. |
+ * | -#BPMP_ENODEV | if disp rfl mrq is not supported by BPMP-FW. |
*/
struct mrq_emc_disp_rfl_request {
- /** @brief EMC display RFL mode (@ref mrq_emc_disp_rfl_mode) */
+ /** @brief EMC display RFL mode from @ref mrq_emc_disp_rfl_mode */
uint32_t mode;
} BPMP_ABI_PACKED;
/** @} EMC */
-/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (!bpmp_safe && (bpmp_t234 || bpmp_t238))
* @ingroup MRQ_Codes
* @def MRQ_BWMGR
- * @brief bwmgr requests
+ * @brief Bandwidth manager (BWMGR) commands
*
- * * Platforms: T234 onwards
- * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_bwmgr_request
* * Response Payload: @ref mrq_bwmgr_response
*
* @addtogroup BWMGR
- *
* @{
*/
+/**
+ * @brief Sub-command identifiers for #MRQ_BWMGR
+ */
enum mrq_bwmgr_cmd {
/**
- * @brief Check whether the BPMP driver supports the specified
- * request type
+ * @brief Check whether BPMP-FW supports the specified
+ * #MRQ_BWMGR sub-command.
*
- * mrq_response::err is 0 if the specified request is
- * supported and -#BPMP_ENODEV otherwise.
+ * mrq_response::err is defined to be:
+ *
+ * | Value | Description
+ * |----------------|----------------------------
+ * | 0 | Specified sub-command is supported.
+ * | -#BPMP_ENODEV | Specified sub-command is not supported.
*/
CMD_BWMGR_QUERY_ABI = 0,
/**
- * @brief Determine dram rate to satisfy iso/niso bw requests
+ * @brief Determine DRAM rate to satisfy ISO/NISO bandwidth requests
*
- * mrq_response::err is
- * * 0: calc_rate succeeded.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * * -#BPMP_ENOTSUP: Requested bw is not available.
+ * mrq_response::err is defined to be:
+ *
+ * | Value | Description
+ * |----------------|----------------------------
+ * | 0 | Rate calculation succeeded.
+ * | -#BPMP_EINVAL | Invalid request parameters.
+ * | -#BPMP_ENOTSUP | Requested bandwidth is not available.
+ * | <0 | Any other internal error.
*/
CMD_BWMGR_CALC_RATE = 1
};
-/*
- * request data for request type CMD_BWMGR_QUERY_ABI
- *
- * type: Request type for which to check existence.
+/**
+ * @brief Request payload for #MRQ_BWMGR sub-command #CMD_BWMGR_QUERY_ABI
*/
struct cmd_bwmgr_query_abi_request {
+ /** @brief Sub-command identifier from @ref mrq_bwmgr_cmd. */
uint32_t type;
} BPMP_ABI_PACKED;
@@ -2237,47 +2932,56 @@ struct cmd_bwmgr_query_abi_request {
* @brief Used by @ref cmd_bwmgr_calc_rate_request
*/
struct iso_req {
- /* @brief bwmgr client ID @ref bpmp_bwmgr_ids */
+ /** @brief BWMGR client ID from @ref bpmp_bwmgr_ids */
uint32_t id;
- /* @brief bw in kBps requested by client */
+ /** @brief Bandwidth in kBps requested by client */
uint32_t iso_bw;
} BPMP_ABI_PACKED;
+/**
+ * @brief Size of the cmd_bwmgr_calc_rate_request::isobw_reqs -array.
+ */
#define MAX_ISO_CLIENTS 13U
-/*
- * request data for request type CMD_BWMGR_CALC_RATE
+
+/**
+ * @brief Request payload for #MRQ_BWMGR sub-command #CMD_BWMGR_CALC_RATE
*/
struct cmd_bwmgr_calc_rate_request {
- /* @brief total bw in kBps requested by all niso clients */
+ /** @brief Total bandwidth in kBps requested by all NISO clients. */
uint32_t sum_niso_bw;
- /* @brief The number of iso clients */
+ /** @brief The number of ISO client requests in #isobw_reqs -array */
uint32_t num_iso_clients;
- /* @brief iso_req <id, iso_bw> information */
+ /** @brief iso_req <id, iso_bw> information */
struct iso_req isobw_reqs[MAX_ISO_CLIENTS];
} BPMP_ABI_PACKED;
-/*
- * response data for request type CMD_BWMGR_CALC_RATE
- *
- * iso_rate_min: min dram data clk rate in kHz to satisfy all iso bw reqs
- * total_rate_min: min dram data clk rate in kHz to satisfy all bw reqs
+/**
+ * @brief Response payload for #MRQ_BWMGR sub-command #CMD_BWMGR_CALC_RATE
*/
struct cmd_bwmgr_calc_rate_response {
+ /**
+ * @brief Minimum DRAM data clock rate in kHz to satisfy all ISO client
+ * bandwidth requests.
+ */
uint32_t iso_rate_min;
+
+ /**
+ * @brief Minimum DRAM data clock rate in kHz to satisfy all
+ * bandwidth requests.
+ */
uint32_t total_rate_min;
} BPMP_ABI_PACKED;
-/*
- * @brief Request with #MRQ_BWMGR
- *
- *
- * |sub-command |payload |
- * |----------------------------|------------------------------|
- * |CMD_BWMGR_QUERY_ABI | cmd_bwmgr_query_abi_request |
- * |CMD_BWMGR_CALC_RATE | cmd_bwmgr_calc_rate_request |
+/**
+ * @brief Request payload for the #MRQ_BWMGR -command.
*
+ * |Sub-command |Payload |
+ * |----------------------|-----------------------------|
+ * |#CMD_BWMGR_QUERY_ABI |#cmd_bwmgr_query_abi_request |
+ * |#CMD_BWMGR_CALC_RATE |#cmd_bwmgr_calc_rate_request |
*/
struct mrq_bwmgr_request {
+ /** @brief Sub-command identifier from @ref mrq_bwmgr_cmd. */
uint32_t cmd;
union {
struct cmd_bwmgr_query_abi_request query_abi;
@@ -2285,12 +2989,12 @@ struct mrq_bwmgr_request {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/*
- * @brief Response to MRQ_BWMGR
+/**
+ * @brief Response payload for the #MRQ_BWMGR -command.
*
- * |sub-command |payload |
- * |----------------------------|------------------------------|
- * |CMD_BWMGR_CALC_RATE | cmd_bwmgr_calc_rate_response |
+ * |Sub-command |Payload |
+ * |----------------------|------------------------------|
+ * |#CMD_BWMGR_CALC_RATE |#cmd_bwmgr_calc_rate_response |
*/
struct mrq_bwmgr_response {
union {
@@ -2299,15 +3003,13 @@ struct mrq_bwmgr_response {
} BPMP_ABI_PACKED;
/** @} BWMGR */
-/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (!bpmp_safe && (bpmp_t234 || bpmp_t238 || bpmp_t264))
* @ingroup MRQ_Codes
* @def MRQ_BWMGR_INT
- * @brief bpmp-integrated bwmgr requests
+ * @brief BPMP-FW integrated BWMGR requests
*
- * * Platforms: T234 onwards
- * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_bwmgr_int_request
@@ -2317,10 +3019,13 @@ struct mrq_bwmgr_response {
* @{
*/
+/**
+ * @brief Sub-command identifiers for #MRQ_BWMGR_INT
+ */
enum mrq_bwmgr_int_cmd {
/**
* @brief Check whether the BPMP-FW supports the specified
- * request type
+ * sub-command.
*
* mrq_response::err is 0 if the specified request is
* supported and -#BPMP_ENODEV otherwise.
@@ -2328,36 +3033,64 @@ enum mrq_bwmgr_int_cmd {
CMD_BWMGR_INT_QUERY_ABI = 1,
/**
- * @brief Determine and set dram rate to satisfy iso/niso bw request
+ * @brief Determine and set DRAM rate to satisfy ISO/NISO bandwidth requests.
*
- * mrq_response::err is
- * * 0: request succeeded.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * set_frequency in @ref cmd_bwmgr_int_calc_and_set_response
- * will not be set.
- * * -#BPMP_ENOTSUP: Requested bw is not available.
- * set_frequency in @ref cmd_bwmgr_int_calc_and_set_response
- * will be current dram-clk rate.
+ * mrq_response::err is defined as:
+ *
+ * |Value |Description |
+ * |-----------------|----------------------------------------------------------------------------------------------------------------|
+ * |0 |Request succeeded. |
+ * |-#BPMP_EINVAL |Invalid request parameters, cmd_bwmgr_int_calc_and_set_response::rate is not set. |
+ * |-#BPMP_ENOTSUP |Requested bandwidth is not available, cmd_bwmgr_int_calc_and_set_response::rate is the current DRAM clock rate. |
+ * |<0 |Any other internal error. |
*/
CMD_BWMGR_INT_CALC_AND_SET = 2,
/**
- * @brief Set a max DRAM frequency for the bandwidth-manager
+ * @brief Set a max DRAM frequency for the bandwidth manager.
*
- * mrq_response::err is
- * * 0: request succeeded.
- * * -#BPMP_ENOTSUP: Requested cap frequency is not possible.
+ * mrq_response::err is defined as:
+ *
+ * |Value |Description |
+ * |-----------------|------------------------------------------|
+ * |0 |Request succeeded. |
+ * |-#BPMP_ENOTSUP |Requested cap frequency is not possible. |
+ * |<0 |Any other internal error. |
*/
- CMD_BWMGR_INT_CAP_SET = 3
+ CMD_BWMGR_INT_CAP_SET = 3,
+
+ /**
+ * @brief Obtain the maximum amount of bandwidth currently allocatable
+ * to the requesting client.
+ *
+ * mrq_response::err is defined as:
+ *
+ * |Value |Description |
+ * |-----------------|------------------------------------------|
+ * |0 |Request succeeded. |
+ * |-#BPMP_EINVAL |Invalid request parameters. |
+ * |<0 |Any other internal error. |
+ */
+ CMD_BWMGR_INT_CURR_AVAILABLE_BW = 4,
+ /**
+ * @brief Get the last request made by the client.
+ *
+ * mrq_response::err is defined as:
+ *
+ * |Value |Description |
+ * |-----------------|------------------------------------------|
+ * |0 |Request succeeded. |
+ * |-#BPMP_EINVAL |Invalid request parameters. |
+ * |<0 |Any other internal error. |
+ */
+ CMD_BWMGR_INT_GET_LAST_REQUEST = 5,
};
-/*
- * request structure for request type CMD_BWMGR_QUERY_ABI
- *
- * type: Request type for which to check existence.
+/**
+ * @brief Request payload for #MRQ_BWMGR_INT sub-command #CMD_BWMGR_INT_QUERY_ABI
*/
struct cmd_bwmgr_int_query_abi_request {
- /* @brief request type determined by @ref mrq_bwmgr_int_cmd */
+ /** @brief Sub-command identifier from @ref mrq_bwmgr_int_cmd. */
uint32_t type;
} BPMP_ABI_PACKED;
@@ -2373,87 +3106,168 @@ struct cmd_bwmgr_int_query_abi_request {
/** @} bwmgr_int_unit_type */
-/*
- * request data for request type CMD_BWMGR_INT_CALC_AND_SET
+/**
+ * @brief Request payload for #MRQ_BWMGR_INT sub-command #CMD_BWMGR_INT_CALC_AND_SET
*/
struct cmd_bwmgr_int_calc_and_set_request {
- /* @brief bwmgr client ID @ref bpmp_bwmgr_ids */
+ /** @brief BWGMR client ID from @ref bpmp_bwmgr_ids */
uint32_t client_id;
- /* @brief average niso bw usage in kBps requested by client. */
+ /** @brief Average NISO bandwidth usage in kBps requested by client. */
uint32_t niso_bw;
- /*
- * @brief average iso bw usage in kBps requested by client.
- * Value is ignored if client is niso. Determined by client_id.
+ /**
+ * @brief Average ISO bandwidth usage in kBps requested by client.
+ *
+ * Value is ignored if client is NISO as determined by #client_id.
*/
uint32_t iso_bw;
- /*
- * @brief memory clock floor requested by client.
- * Unit determined by floor_unit.
+ /**
+ * @brief Memory clock floor requested by client, unit of the value
+ * is determined by #floor_unit -field.
*/
uint32_t mc_floor;
- /*
- * @brief toggle to determine the unit-type of floor value.
- * See @ref bwmgr_int_unit_type definitions for unit-type mappings.
+ /**
+ * @brief Value set to determine the unit of the #mc_floor value:
+ *
+ * | Value | Unit |
+ * |-----------------------|----------------------|
+ * | #BWMGR_INT_UNIT_KBPS | Kilobytes per second |
+ * | #BWMGR_INT_UNIT_KHZ | Kilohertz |
*/
uint8_t floor_unit;
} BPMP_ABI_PACKED;
-struct cmd_bwmgr_int_cap_set_request {
- /* @brief requested cap frequency in Hz. */
+/**
+ * @brief Response payload for #MRQ_BWMGR_INT sub-command #CMD_BWMGR_INT_CALC_AND_SET
+ */
+struct cmd_bwmgr_int_calc_and_set_response {
+ /** @brief Currently set memory clock frequency in Hz */
uint64_t rate;
} BPMP_ABI_PACKED;
-/*
- * response data for request type CMD_BWMGR_CALC_AND_SET
+/**
+ * @brief Request payload for #MRQ_BWMGR_INT sub-command #CMD_BWMGR_INT_CAP_SET
*/
-struct cmd_bwmgr_int_calc_and_set_response {
- /* @brief current set memory clock frequency in Hz */
+struct cmd_bwmgr_int_cap_set_request {
+ /** @brief Requested cap frequency in Hz. */
uint64_t rate;
} BPMP_ABI_PACKED;
-/*
- * @brief Request with #MRQ_BWMGR_INT
+/**
+ * @brief Request payload for #MRQ_BWMGR_INT sub-command #CMD_BWMGR_INT_CURR_AVAILABLE_BW
+ */
+struct cmd_bwmgr_int_curr_available_bw_request {
+ /** @brief BWMGR client ID from @ref bpmp_bwmgr_ids */
+ uint32_t id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for #MRQ_BWMGR_INT sub-command #CMD_BWMGR_INT_CURR_AVAILABLE_BW
+ */
+struct cmd_bwmgr_int_curr_available_bw_response {
+ /** @brief Current cap frequency in KHz. */
+ uint64_t cap_rate;
+ /** @brief Currently available bandwidth for the requesting client
+ * to allocate in KBps.
+ */
+ uint64_t available_bw;
+} BPMP_ABI_PACKED;
+
+struct cmd_bwmgr_int_get_last_request_request {
+ /** @brief BWMGR client ID from @ref bpmp_bwmgr_ids */
+ uint32_t id;
+ /**
+ * @brief Value set to determine the unit of the returned mc_floor value:
+ *
+ * | Value | Unit |
+ * |-----------------------|----------------------|
+ * | #BWMGR_INT_UNIT_KBPS | Kilobytes per second |
+ * | #BWMGR_INT_UNIT_KHZ | Kilohertz |
+ */
+ uint8_t floor_unit;
+} BPMP_ABI_PACKED;
+
+struct cmd_bwmgr_int_get_last_request_response {
+ /** @brief BWGMR client ID from @ref bpmp_bwmgr_ids */
+ uint32_t client_id;
+ /** @brief Average NISO bandwidth usage in kBps requested by client. */
+ uint32_t niso_bw;
+ /**
+ * @brief Average ISO bandwidth usage in kBps requested by client.
+ */
+ uint32_t iso_bw;
+ /**
+ * @brief Memory clock floor requested by client, unit of the value
+ * is determined by #floor_unit -field.
+ */
+ uint32_t mc_floor;
+ /**
+ * @brief Value set to determine the unit of the #mc_floor value:
+ *
+ * | Value | Unit |
+ * |-----------------------|----------------------|
+ * | #BWMGR_INT_UNIT_KBPS | Kilobytes per second |
+ * | #BWMGR_INT_UNIT_KHZ | Kilohertz |
+ */
+ uint8_t floor_unit;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_BWMGR_INT -command.
*
+ * |Sub-command |Payload |
+ * |---------------------------------|-----------------------------------------|
+ * |#CMD_BWMGR_INT_QUERY_ABI |#cmd_bwmgr_int_query_abi_request |
+ * |#CMD_BWMGR_INT_CALC_AND_SET |#cmd_bwmgr_int_calc_and_set_request |
+ * |#CMD_BWMGR_INT_CAP_SET |#cmd_bwmgr_int_cap_set_request |
+ * |#CMD_BWMGR_INT_GET_LAST_REQUEST |#cmd_bwmgr_int_get_last_request_request |
*
- * |sub-command |payload |
- * |----------------------------|-----------------------------------|
- * |CMD_BWMGR_INT_QUERY_ABI | cmd_bwmgr_int_query_abi_request |
- * |CMD_BWMGR_INT_CALC_AND_SET | cmd_bwmgr_int_calc_and_set_request|
- * |CMD_BWMGR_INT_CAP_SET | cmd_bwmgr_int_cap_set_request |
+ * The following additional sub-commands are supported on T264 platforms:
*
+ * |Sub-command |Payload |
+ * |---------------------------------|-----------------------------------------|
+ * |#CMD_BWMGR_INT_CURR_AVAILABLE_BW |#cmd_bwmgr_int_curr_available_bw_request |
*/
struct mrq_bwmgr_int_request {
+ /** @brief Sub-command identifier from @ref mrq_bwmgr_int_cmd. */
uint32_t cmd;
union {
struct cmd_bwmgr_int_query_abi_request query_abi;
struct cmd_bwmgr_int_calc_and_set_request bwmgr_calc_set_req;
struct cmd_bwmgr_int_cap_set_request bwmgr_cap_set_req;
+ struct cmd_bwmgr_int_curr_available_bw_request bwmgr_curr_available_bw_req;
+ struct cmd_bwmgr_int_get_last_request_request bwmgr_get_last_request_req;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/*
- * @brief Response to MRQ_BWMGR_INT
+/**
+ * @brief Response payload for the #MRQ_BWMGR_INT -command.
+ *
+ * |Sub-command |Payload |
+ * |---------------------------------|------------------------------------------|
+ * |#CMD_BWMGR_INT_CALC_AND_SET |#cmd_bwmgr_int_calc_and_set_response |
+ * |#CMD_BWMGR_INT_GET_LAST_REQUEST |#cmd_bwmgr_int_get_last_request_response |
*
- * |sub-command |payload |
- * |----------------------------|---------------------------------------|
- * |CMD_BWMGR_INT_CALC_AND_SET | cmd_bwmgr_int_calc_and_set_response |
+ * The following additional sub-commands are supported on T264 platforms:
+ * |Sub-command |Payload |
+ * |---------------------------------|------------------------------------------|
+ * |#CMD_BWMGR_INT_CURR_AVAILABLE_BW |#cmd_bwmgr_int_curr_available_bw_response |
*/
struct mrq_bwmgr_int_response {
union {
struct cmd_bwmgr_int_calc_and_set_response bwmgr_calc_set_resp;
+ struct cmd_bwmgr_int_curr_available_bw_response bwmgr_curr_available_bw_resp;
+ struct cmd_bwmgr_int_get_last_request_response bwmgr_get_last_request_resp;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
/** @} BWMGR_INT */
-/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (!bpmp_safe && (bpmp_t234 || bpmp_t238 || bpmp_t264))
* @ingroup MRQ_Codes
* @def MRQ_ISO_CLIENT
* @brief ISO client requests
*
- * * Platforms: T234 onwards
- * @cond (bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_iso_client_request
@@ -2463,148 +3277,178 @@ struct mrq_bwmgr_int_response {
* @{
*/
+/**
+ * @brief Sub-command identifiers for #MRQ_ISO_CLIENT.
+ */
enum mrq_iso_client_cmd {
/**
- * @brief Check whether the BPMP driver supports the specified
- * request type
+ * @brief Check whether BPMP-FW supports a specified
+ * #MRQ_ISO_CLIENT sub-command.
*
* mrq_response::err is 0 if the specified request is
* supported and -#BPMP_ENODEV otherwise.
*/
CMD_ISO_CLIENT_QUERY_ABI = 0,
- /*
- * @brief check for legal LA for the iso client. Without programming
- * LA MC registers, calculate and ensure that legal LA is possible for
- * iso bw requested by the ISO client.
+ /**
+ * @brief Determine legal LA for ISO client.
*
- * mrq_response::err is
- * * 0: check la succeeded.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * * -#BPMP_EFAULT: Legal LA is not possible for client requested iso_bw
+ * Without programming LA MC registers, calculate and ensure that
+ * a legal LA is possible for the ISO bandwidth requested by the
+ * ISO client.
+ *
+ * mrq_response::err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * |---------------|--------------------------------------------------------------|
+ * | 0 | Request successful and legal LA is possible. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_EFAULT | Legal LA is not possible for client requested ISO bandwidth. |
+ * | <0 | Any other internal error. |
*/
CMD_ISO_CLIENT_CALCULATE_LA = 1,
- /*
- * @brief set LA for the iso client. Calculate and program the LA/PTSA
- * MC registers corresponding to the client making bw request
+ /**
+ * @brief Set LA for ISO client.
*
- * mrq_response::err is
- * * 0: set la succeeded.
- * * -#BPMP_EINVAL: Invalid request parameters.
- * * -#BPMP_EFAULT: Failed to calculate or program MC registers.
+ * Calculate and program the LA/PTSA MC registers corresponding to the
+ * ISO client making the bandwidth request.
+ *
+ * mrq_response::err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * |---------------|----------------------------------------------|
+ * | 0 | Setting LA succeeded. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_EFAULT | Failed to calculate or program MC registers. |
+ * | <0 | Any other internal error. |
*/
CMD_ISO_CLIENT_SET_LA = 2,
- /*
- * @brief Get max possible bw for iso client
+ /**
+ * @brief Get maximum possible bandwidth for ISO client.
*
- * mrq_response::err is
- * * 0: get_max_bw succeeded.
- * * -#BPMP_EINVAL: Invalid request parameters.
+ * mrq_response::err for this sub-command is defined as:
+ *
+ * | Value | Description |
+ * |---------------|----------------------------------------------|
+ * | 0 | Operation successful. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | <0 | Any other internal error. |
*/
CMD_ISO_CLIENT_GET_MAX_BW = 3
};
-/*
- * request data for request type CMD_ISO_CLIENT_QUERY_ABI
- *
- * type: Request type for which to check existence.
+/**
+ * @brief Request payload for #MRQ_ISO_CLIENT sub-command #CMD_ISO_CLIENT_QUERY_ABI
*/
struct cmd_iso_client_query_abi_request {
+ /**
+ * @brief Sub-command identifier from @ref mrq_iso_client_cmd
+ * for which to check existence.
+ */
uint32_t type;
} BPMP_ABI_PACKED;
-/*
- * request data for request type CMD_ISO_CLIENT_CALCULATE_LA
- *
- * id: client ID in @ref bpmp_bwmgr_ids
- * bw: bw requested in kBps by client ID.
- * init_bw_floor: initial dram_bw_floor in kBps passed by client ID.
- * ISO client will perform mempool allocation and DVFS buffering based
- * on this dram_bw_floor.
+/**
+ * @brief Request payload #MRQ_ISO_CLIENT sub-command #CMD_ISO_CLIENT_CALCULATE_LA
*/
struct cmd_iso_client_calculate_la_request {
+ /** @brief BWMGR client ID from @ref bpmp_bwmgr_ids */
uint32_t id;
+ /** @brief Bandwidth requested in kBps for the client specified in #id. */
uint32_t bw;
+ /**
+ * @brief Initial DRAM bandwidth floor in kBps for the ISO client specified in #id.
+ *
+ * ISO client will perform mempool allocation and DVFS buffering based
+ * on this value.
+ */
uint32_t init_bw_floor;
} BPMP_ABI_PACKED;
-/*
- * request data for request type CMD_ISO_CLIENT_SET_LA
- *
- * id: client ID in @ref bpmp_bwmgr_ids
- * bw: bw requested in kBps by client ID.
- * final_bw_floor: final dram_bw_floor in kBps.
- * Sometimes the initial dram_bw_floor passed by ISO client may need to be
- * updated by considering higher dram freq's. This is the final dram_bw_floor
- * used to calculate and program MC registers.
+/**
+ * @brief Response payload for #MRQ_ISO_CLIENT sub-command #CMD_ISO_CLIENT_CALCULATE_LA
+ */
+struct cmd_iso_client_calculate_la_response {
+ /** @brief Minimum DRAM rate in kHz at which a legal LA is possible */
+ uint32_t la_rate_floor;
+ /**
+ * Minimum DRAM frequency in kHz required to satisfy this clients
+ * ISO bandwidth request, assuming all other ISO clients are inactive.
+ */
+ uint32_t iso_client_only_rate;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for #MRQ_ISO_CLIENT sub-command #CMD_ISO_CLIENT_SET_LA
*/
struct cmd_iso_client_set_la_request {
+ /** @brief BMWGR client ID from @ref bpmp_bwmgr_ids */
uint32_t id;
+ /** @brief Bandwidth requested in kBps for the client specified in #id. */
uint32_t bw;
+ /**
+ * @brief Final DRAM bandwidth floor in kBps.
+ *
+ * Sometimes the initial cmd_iso_client_calculate_la_request::dram_bw_floor
+ * passed by ISO client may need to be updated by considering higher
+ * DRAM frequencies. This is the final DRAM bandwidth floor value used
+ * to calculate and program MC registers.
+ */
uint32_t final_bw_floor;
} BPMP_ABI_PACKED;
-/*
- * request data for request type CMD_ISO_CLIENT_GET_MAX_BW
- *
- * id: client ID in @ref bpmp_bwmgr_ids
+/**
+ * @brief Request payload for #MRQ_ISO_CLIENT sub-command #CMD_ISO_CLIENT_GET_MAX_BW
*/
struct cmd_iso_client_get_max_bw_request {
+ /** @brief BWMGR client ID from @ref bpmp_bwmgr_ids */
uint32_t id;
} BPMP_ABI_PACKED;
-/*
- * response data for request type CMD_ISO_CLIENT_CALCULATE_LA
- *
- * la_rate_floor: minimum dram_rate_floor in kHz at which a legal la is possible
- * iso_client_only_rate: Minimum dram freq in kHz required to satisfy this clients
- * iso bw request, assuming all other iso clients are inactive
- */
-struct cmd_iso_client_calculate_la_response {
- uint32_t la_rate_floor;
- uint32_t iso_client_only_rate;
-} BPMP_ABI_PACKED;
-
/**
* @brief Used by @ref cmd_iso_client_get_max_bw_response
*/
struct iso_max_bw {
- /* @brief dram frequency in kHz */
+ /** @brief dram frequency in kHz */
uint32_t freq;
- /* @brief max possible iso-bw in kBps */
+ /** @brief max possible iso-bw in kBps */
uint32_t iso_bw;
} BPMP_ABI_PACKED;
+/**
+ * @brief Size of the cmd_iso_client_get_max_bw_response::pairs -array.
+ */
#define ISO_MAX_BW_MAX_SIZE 14U
-/*
- * response data for request type CMD_ISO_CLIENT_GET_MAX_BW
+
+/**
+ * @brief Response payload for #MRQ_ISO_CLIENT sub-command #CMD_ISO_CLIENT_GET_MAX_BW
*/
struct cmd_iso_client_get_max_bw_response {
- /* @brief The number valid entries in iso_max_bw pairs */
+ /** @brief The number valid entries in iso_max_bw pairs */
uint32_t num_pairs;
- /* @brief max ISOBW <dram freq, max bw> information */
+ /** @brief max ISOBW <dram freq, max bw> information */
struct iso_max_bw pairs[ISO_MAX_BW_MAX_SIZE];
} BPMP_ABI_PACKED;
/**
- * @brief Request with #MRQ_ISO_CLIENT
- *
- * Used by the sender of an #MRQ_ISO_CLIENT message.
+ * @brief Request payload for #MRQ_ISO_CLIENT command.
*
- * |sub-command |payload |
- * |------------------------------------ |----------------------------------------|
- * |CMD_ISO_CLIENT_QUERY_ABI |cmd_iso_client_query_abi_request |
- * |CMD_ISO_CLIENT_CALCULATE_LA |cmd_iso_client_calculate_la_request |
- * |CMD_ISO_CLIENT_SET_LA |cmd_iso_client_set_la_request |
- * |CMD_ISO_CLIENT_GET_MAX_BW |cmd_iso_client_get_max_bw_request |
+ * Each #MRQ_ISO_CLIENT -command is expected to include a sub-command specific
+ * payload as defined in table below:
*
+ * |Sub-command |Request payload |
+ * |-----------------------------|--------------------------------------|
+ * |#CMD_ISO_CLIENT_QUERY_ABI |#cmd_iso_client_query_abi_request |
+ * |#CMD_ISO_CLIENT_CALCULATE_LA |#cmd_iso_client_calculate_la_request |
+ * |#CMD_ISO_CLIENT_SET_LA |#cmd_iso_client_set_la_request |
+ * |#CMD_ISO_CLIENT_GET_MAX_BW |#cmd_iso_client_get_max_bw_request |
*/
-
struct mrq_iso_client_request {
- /* Type of request. Values listed in enum mrq_iso_client_cmd */
+ /** @brief Sub-command identifier from @ref mrq_iso_client_cmd. */
uint32_t cmd;
+
union {
struct cmd_iso_client_query_abi_request query_abi;
struct cmd_iso_client_calculate_la_request calculate_la_req;
@@ -2614,20 +3458,20 @@ struct mrq_iso_client_request {
} BPMP_ABI_PACKED;
/**
- * @brief Response to MRQ_ISO_CLIENT
+ * @brief Response payload for #MRQ_ISO_CLIENT command.
*
- * Each sub-command supported by @ref mrq_iso_client_request may return
- * sub-command-specific data. Some do and some do not as indicated in
- * the following table
+ * Some of the sub-commands supported by #MRQ_ISO_CLIENT may return
+ * a sub-command -specific payload in the MRQ response as defined in table
+ * below:
*
- * |sub-command |payload |
- * |---------------------------- |------------------------------------|
- * |CMD_ISO_CLIENT_CALCULATE_LA |cmd_iso_client_calculate_la_response|
- * |CMD_ISO_CLIENT_SET_LA |N/A |
- * |CMD_ISO_CLIENT_GET_MAX_BW |cmd_iso_client_get_max_bw_response |
+ * |Sub-command |Response payload |
+ * |---------------------------- |--------------------------------------|
+ * |#CMD_ISO_CLIENT_QUERY_ABI |- |
+ * |#CMD_ISO_CLIENT_CALCULATE_LA |#cmd_iso_client_calculate_la_response |
+ * |#CMD_ISO_CLIENT_SET_LA |- |
+ * |#CMD_ISO_CLIENT_GET_MAX_BW |#cmd_iso_client_get_max_bw_response |
*
*/
-
struct mrq_iso_client_response {
union {
struct cmd_iso_client_calculate_la_response calculate_la_resp;
@@ -2636,15 +3480,13 @@ struct mrq_iso_client_response {
} BPMP_ABI_PACKED;
/** @} ISO_CLIENT */
-/** @endcond (bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (!bpmp_t186)
* @ingroup MRQ_Codes
* @def MRQ_CPU_NDIV_LIMITS
- * @brief CPU freq. limits in ndiv
+ * @brief Return CPU cluster NDIV limits
*
- * * Platforms: T194 onwards
- * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_cpu_ndiv_limits_request
@@ -2654,10 +3496,10 @@ struct mrq_iso_client_response {
*/
/**
- * @brief Request for ndiv limits of a cluster
+ * @brief Request payload for the #MRQ_CPU_NDIV_LIMITS -command
*/
struct mrq_cpu_ndiv_limits_request {
- /** @brief Enum cluster_id */
+ /** @brief Logical CPU cluster identifier */
uint32_t cluster_id;
} BPMP_ABI_PACKED;
@@ -2678,15 +3520,14 @@ struct mrq_cpu_ndiv_limits_response {
} BPMP_ABI_PACKED;
/** @} CPU */
-/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+
+/** @cond (bpmp_t194)
* @ingroup MRQ_Codes
* @def MRQ_CPU_AUTO_CC3
* @brief Query CPU cluster auto-CC3 configuration
*
- * * Platforms: T194
- * @cond bpmp_t194
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_cpu_auto_cc3_request
@@ -2702,15 +3543,15 @@ struct mrq_cpu_ndiv_limits_response {
*/
/**
- * @brief Request for auto-CC3 configuration of a cluster
+ * @brief Request payload for the #MRQ_CPU_AUTO_CC3 -command
*/
struct mrq_cpu_auto_cc3_request {
- /** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */
+ /** @brief Logical CPU cluster ID */
uint32_t cluster_id;
} BPMP_ABI_PACKED;
/**
- * @brief Response to #MRQ_CPU_AUTO_CC3
+ * @brief Response payload for the #MRQ_CPU_AUTO_CC3 -command
*/
struct mrq_cpu_auto_cc3_response {
/**
@@ -2724,9 +3565,9 @@ struct mrq_cpu_auto_cc3_response {
} BPMP_ABI_PACKED;
/** @} CC3 */
-/** @endcond bpmp_t194 */
+/** @endcond */
-/**
+/** @cond (bpmp_t186 || bpmp_t194 || bpmp_t234)
* @ingroup MRQ_Codes
* @def MRQ_RINGBUF_CONSOLE
* @brief A ring buffer debug console for BPMP
@@ -2811,7 +3652,9 @@ struct cmd_ringbuf_console_query_abi_resp {
*/
struct cmd_ringbuf_console_read_req {
/**
- * @brief Number of bytes requested to be read from the BPMP TX buffer
+ * @brief Number of bytes requested to be read from the BPMP TX buffer.
+ *
+ * Valid range is [0, #MRQ_RINGBUF_CONSOLE_MAX_READ_LEN]
*/
uint8_t len;
} BPMP_ABI_PACKED;
@@ -2823,7 +3666,11 @@ struct cmd_ringbuf_console_read_req {
struct cmd_ringbuf_console_read_resp {
/** @brief The actual data read from the BPMP TX buffer */
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_READ_LEN];
- /** @brief Number of bytes in cmd_ringbuf_console_read_resp::data */
+ /**
+ * @brief Number of bytes in cmd_ringbuf_console_read_resp::data
+ *
+ * Valid range is [0, #MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN]
+ */
uint8_t len;
} BPMP_ABI_PACKED;
@@ -2904,14 +3751,13 @@ union mrq_ringbuf_console_bpmp_to_host_response {
} BPMP_ABI_PACKED;
/** @} RingbufConsole */
+/** @endcond */
-/**
+/** @cond (!bpmp_t186 && !(bpmp_safe && bpmp_t234))
* @ingroup MRQ_Codes
* @def MRQ_STRAP
* @brief Set a strap value controlled by BPMP
*
- * * Platforms: T194 onwards
- * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_strap_request
@@ -2925,10 +3771,14 @@ union mrq_ringbuf_console_bpmp_to_host_response {
* deasserted.
*
* BPMP owns registers which act as straps to various units. It
- * exposes limited control of those straps via #MRQ_STRAP.
+ * exposes limited control of those registers via #MRQ_STRAP.
*
* @{
*/
+
+/**
+ * @brief Sub-command identifiers for the #MRQ_STRAP -command.
+ */
enum mrq_strap_cmd {
/** @private */
STRAP_RESERVED = 0,
@@ -2937,27 +3787,31 @@ enum mrq_strap_cmd {
};
/**
- * @brief Request with #MRQ_STRAP
+ * @brief Request payload for the #MRQ_STRAP -command.
*/
struct mrq_strap_request {
- /** @brief @ref mrq_strap_cmd */
+ /** @brief Sub-command identifier from @ref mrq_strap_cmd */
uint32_t cmd;
- /** @brief Strap ID from @ref Strap_Identifiers */
+ /**
+ * @if (bpmp_t234 || bpmp_th500 || bpmp_t264)
+ * @brief Strap ID from @ref bpmp_strap_ids
+ * @else
+ * @brief Strap ID (undefined)
+ * @endif
+ */
uint32_t id;
- /** @brief Desired value for strap (if cmd is #STRAP_SET) */
+ /** @brief Desired value for strap (if #cmd is #STRAP_SET) */
uint32_t value;
} BPMP_ABI_PACKED;
/** @} Strap */
-/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (bpmp_t194 || bpmp_t234 || bpmp_th500)
* @ingroup MRQ_Codes
* @def MRQ_UPHY
* @brief Perform a UPHY operation
*
- * * Platforms: T194 onwards
- * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_uphy_request
@@ -2966,90 +3820,215 @@ struct mrq_strap_request {
* @addtogroup UPHY
* @{
*/
-enum {
+
+/**
+ * @brief Sub-command identifiers for #MRQ_UPHY.
+ */
+enum mrq_uphy_cmd {
+ /** @brief Trigger PCIE lane margining procedure. */
CMD_UPHY_PCIE_LANE_MARGIN_CONTROL = 1,
+ /** @brief Return PCIE lane margining status. */
CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2,
+ /** @brief Initialize PCIE EP PLL controller. */
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
+ /** @brief Set state of the PCIE RP/EP controller. */
CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
+ /** @brief Disable PCIE EP PLL controller. */
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5,
+
+ /**
+ * @cond bpmp_t238
+ * @brief Initialize and enable UPHY display port.
+ */
CMD_UPHY_DISPLAY_PORT_INIT = 6,
+ /** @brief Disable UPHY display port. */
CMD_UPHY_DISPLAY_PORT_OFF = 7,
+ /** @brief Trigger sequence to restore XUSB DYN lanes during SC7 exit. */
CMD_UPHY_XUSB_DYN_LANES_RESTORE = 8,
+ /** @endcond */
+
+ /**
+ * @cond bpmp_th500
+ * @brief Perform UPHY Lane EOM scan.
+ */
+ CMD_UPHY_LANE_EOM_SCAN = 9,
+ /** @brief Config PCIe VDM with a given BDF ID. */
+ CMD_UPHY_PCIE_CONFIG_VDM = 10,
+ /** @endcond */
+
CMD_UPHY_MAX,
};
+/**
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_PCIE_LANE_MARGIN_CONTROL.
+ */
struct cmd_uphy_margin_control_request {
- /** @brief Enable margin */
+ /**
+ * @brief Enable margin.
+ *
+ * Valid values:
+ * * Value 0 disables margin,
+ * * Value 1 enables margin.
+ */
int32_t en;
- /** @brief Clear the number of error and sections */
+ /**
+ * @brief Clear the number of error and sections.
+ *
+ * Valid values:
+ *
+ * * Value 0: Skip clear,
+ * * Value 1: Perform clear.
+ */
int32_t clr;
- /** @brief Set x offset (1's complement) for left/right margin type (y should be 0) */
+ /**
+ * @brief Set x offset (1's complement) for left/right margin type (y should be 0).
+ *
+ * Valid range is [0, 127]
+ */
uint32_t x;
- /** @brief Set y offset (1's complement) for left/right margin type (x should be 0) */
+ /**
+ * @brief Set y offset (1's complement) for left/right margin type (x should be 0)
+ *
+ * Valid range is [0, 63]
+ */
uint32_t y;
- /** @brief Set number of bit blocks for each margin section */
+ /**
+ * @brief Set number of bit blocks for each margin section.
+ *
+ * Valid range is [0, 15]
+ */
uint32_t nblks;
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_UPHY sub-command #CMD_UPHY_PCIE_LANE_MARGIN_STATUS.
+ */
struct cmd_uphy_margin_status_response {
/** @brief Number of errors observed */
uint32_t status;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT.
+ */
struct cmd_uphy_ep_controller_pll_init_request {
- /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T239 valid: 0 */
+ /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T238 valid: 0 */
uint8_t ep_controller;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_PCIE_CONTROLLER_STATE.
+ */
struct cmd_uphy_pcie_controller_state_request {
- /** @brief PCIE controller number, T194 valid: 0-4; T234 valid: 0-10; T239 valid: 0-3 */
+ /** @brief PCIE controller number, T194 valid: 0-4; T234 valid: 0-10; T238 valid: 0-3 */
uint8_t pcie_controller;
+ /** @brief Nonzero value to enable controller, zero value to disable */
uint8_t enable;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF.
+ */
struct cmd_uphy_ep_controller_pll_off_request {
- /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T239 valid: 0 */
+ /** @brief EP controller number, T194 valid: 0, 4, 5; T234 valid: 5, 6, 7, 10; T238 valid: 0 */
uint8_t ep_controller;
} BPMP_ABI_PACKED;
+/**
+ * @cond bpmp_t238
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_DISPLAY_PORT_INIT.
+ */
struct cmd_uphy_display_port_init_request {
- /** @brief DisplayPort link rate, T239 valid: 1620, 2700, 5400, 8100, 2160, 2430, 3240, 4320, 6750 */
+ /** @brief DisplayPort link rate, T238 valid: 1620, 2700, 5400, 8100, 2160, 2430, 3240, 4320, 6750 */
uint16_t link_rate;
/** @brief 1: lane 0; 2: lane 1; 3: lane 0 and 1 */
uint16_t lanes_bitmap;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_XUSB_DYN_LANES_RESTORE.
+ */
struct cmd_uphy_xusb_dyn_lanes_restore_request {
/** @brief 1: lane 0; 2: lane 1; 3: lane 0 and 1 */
uint16_t lanes_bitmap;
} BPMP_ABI_PACKED;
+/** @endcond */
+
+/**
+ * @cond bpmp_th500
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_LANE_EOM_SCAN
+ */
+struct cmd_uphy_lane_eom_scan_request {
+ /** @brief UPHY brick number, valid: 0-5 */
+ uint32_t brick;
+ /** @brief UPHY lane number, valid: 0-15 for UPHY0-UPHY3, 0-1 for UPHY4-UPHY5 */
+ uint32_t lane;
+ /** @brief Perform EOM for PCIE GEN5 link: 1 for yes, 0 for no. */
+ uint32_t pcie_gen5;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for #MRQ_UPHY sub-command #CMD_UPHY_LANE_EOM_SCAN
+ */
+struct cmd_uphy_lane_eom_scan_response {
+ uint32_t data;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for #MRQ_UPHY sub-command #CMD_UPHY_PCIE_CONFIG_VDM
+ */
+struct cmd_uphy_pcie_config_vdm_request {
+ uint8_t pcie_controller;
+ /**
+ * @brief Bus/Dev/Func ID to be programmed for VDM.
+ *
+ * - bits[15..8] Bus
+ * - bits[7..3] Dev
+ * - bit [2..0] Func
+ */
+ uint16_t bdf;
+} BPMP_ABI_PACKED;
+/** @endcond */
/**
* @ingroup UPHY
- * @brief Request with #MRQ_UPHY
+ * @brief Request payload for the #MRQ_UPHY -command.
*
* Used by the sender of an #MRQ_UPHY message to control UPHY.
* The uphy_request is split into several sub-commands. CMD_UPHY_PCIE_LANE_MARGIN_STATUS
* requires no additional data. Others have a sub-command specific payload. Below table
* shows sub-commands with their corresponding payload data.
*
- * |sub-command |payload |
- * |------------------------------------ |----------------------------------------|
- * |CMD_UPHY_PCIE_LANE_MARGIN_CONTROL |uphy_set_margin_control |
- * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | |
- * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
- * |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
- * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request |
- * |CMD_UPHY_PCIE_DISPLAY_PORT_INIT |cmd_uphy_display_port_init_request |
- * |CMD_UPHY_PCIE_DISPLAY_PORT_OFF | |
- * |CMD_UPHY_XUSB_DYN_LANES_RESTORE |cmd_uphy_xusb_dyn_lanes_restore_request |
+ * |sub-command |payload |
+ * |--------------------------------------|-----------------------------------------|
+ * |#CMD_UPHY_PCIE_LANE_MARGIN_CONTROL |#cmd_uphy_margin_control_request |
+ * |#CMD_UPHY_PCIE_LANE_MARGIN_STATUS |- |
+ * |#CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |#cmd_uphy_ep_controller_pll_init_request |
+ * |#CMD_UPHY_PCIE_CONTROLLER_STATE |#cmd_uphy_pcie_controller_state_request |
+ * |#CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |#cmd_uphy_ep_controller_pll_off_request |
+ *
+ * @cond bpmp_t238
+ * The following additional sub-commands are supported on T238 platforms:
*
+ * |sub-command |payload |
+ * |--------------------------------------|-----------------------------------------|
+ * |#CMD_UPHY_DISPLAY_PORT_INIT |#cmd_uphy_display_port_init_request |
+ * |#CMD_UPHY_DISPLAY_PORT_OFF |- |
+ * |#CMD_UPHY_XUSB_DYN_LANES_RESTORE |#cmd_uphy_xusb_dyn_lanes_restore_request |
+ * @endcond
+ *
+ * @cond bpmp_th500
+ * The following additional sub-commands are supported on TH500 platforms:
+ * |sub-command |payload |
+ * |--------------------------------------|-----------------------------------------|
+ * |#CMD_UPHY_LANE_EOM_SCAN |#cmd_uphy_lane_eom_scan_request |
+ * |#CMD_UPHY_PCIE_CONFIG_VDM |#cmd_uphy_pcie_config_vdm_request |
+ * @endcond
*/
-
struct mrq_uphy_request {
/** @brief Lane number. */
uint16_t lane;
- /** @brief Sub-command id. */
+ /** @brief Sub-command ID from @ref mrq_uphy_cmd. */
uint16_t cmd;
union {
@@ -3057,53 +4036,68 @@ struct mrq_uphy_request {
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
+ /** @cond bpmp_t238 */
struct cmd_uphy_display_port_init_request display_port_init;
struct cmd_uphy_xusb_dyn_lanes_restore_request xusb_dyn_lanes_restore;
+ /** @endcond */
+ /** @cond bpmp_th500 */
+ struct cmd_uphy_lane_eom_scan_request lane_eom_scan;
+ struct cmd_uphy_pcie_config_vdm_request pcie_vdm;
+ /** @endcond */
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
- * @brief Response to MRQ_UPHY
+ * @brief Response payload for the #MRQ_UPHY -command.
*
* Each sub-command supported by @ref mrq_uphy_request may return
* sub-command-specific data. Some do and some do not as indicated in
* the following table
*
- * |sub-command |payload |
- * |---------------------------- |------------------------|
- * |CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | |
- * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS |uphy_get_margin_status |
+ * |sub-command |payload |
+ * |-----------------------------------|---------------------------------|
+ * |#CMD_UPHY_PCIE_LANE_MARGIN_CONTROL |- |
+ * |#CMD_UPHY_PCIE_LANE_MARGIN_STATUS |#cmd_uphy_margin_status_response |
*
+ * @cond bpmp_th500
+ * The following additional sub-commands are supported on TH500 platforms:
+ * |sub-command |payload |
+ * |-----------------------------------|---------------------------------|
+ * |#CMD_UPHY_LANE_EOM_SCAN |#cmd_uphy_lane_eom_scan_response |
+ * |#CMD_UPHY_PCIE_CONFIG_VDM |- |
+ * @endcond
*/
-
struct mrq_uphy_response {
union {
struct cmd_uphy_margin_status_response uphy_get_margin_status;
+ /** @cond bpmp_th500 */
+ struct cmd_uphy_lane_eom_scan_response eom_status;
+ /** @endcond */
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
/** @} UPHY */
-/** @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500) */
+/** @endcond */
-/**
+/** @cond (bpmp_t194 || bpmp_t234 || bpmp_t238 || bpmp_t264)
* @ingroup MRQ_Codes
* @def MRQ_FMON
- * @brief Perform a frequency monitor configuration operations
+ * @brief Perform a frequency monitor configuration operation
*
- * * Platforms: T194 onwards
- * @cond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_fmon_request
* * Response Payload: @ref mrq_fmon_response
- * @endcond (bpmp_t194 || bpmp_t234 || bpmp_t239 || bpmp_th500)
*
* @addtogroup FMON
* @{
- * @cond (bpmp_t194 || bpmp_t234)
*/
-enum {
+
+/**
+ * @brief Sub-command identifiers for #MRQ_FMON
+ */
+enum mrq_fmon_cmd {
/**
* @brief Clamp FMON configuration to specified rate.
*
@@ -3111,62 +4105,80 @@ enum {
* clamped, FMON configuration is preserved when clock rate
* and/or state is changed.
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EACCES: FMON access error @n
- * -#BPMP_EBADCMD if subcommand is not supported @n
- * -#BPMP_EBADSLT: clamp FMON on cluster with auto-CC3 enabled @n
- * -#BPMP_EBUSY: fmon is already clamped at different rate @n
- * -#BPMP_EFAULT: self-diagnostic error @n
- * -#BPMP_EINVAL: invalid FMON configuration @n
- * -#BPMP_EOPNOTSUPP: not in production mode @n
- * -#BPMP_ENODEV: invalid clk_id @n
- * -#BPMP_ENOENT: no calibration data, uninitialized @n
- * -#BPMP_ENOTSUP: avfs config not set @n
- * -#BPMP_ENOSYS: clamp FMON on cluster clock w/ no NAFLL @n
- * -#BPMP_ETIMEDOUT: operation timed out @n
+ * mrq_response::err for this sub-command is defined to be:
+ *
+ * | Value | Description |
+ * |-------------------|---------------------------------------------------|
+ * | 0 | Operation was successful. |
+ * | -#BPMP_EBADCMD | Subcommand is not supported. |
+ * | -#BPMP_EACCES | FMON access error. |
+ * | -#BPMP_EBADSLT | Clamp FMON on cluster with auto-CC3 enabled. |
+ * | -#BPMP_EBUSY | FMON is already clamped at different rate. |
+ * | -#BPMP_EFAULT | Self-diagnostic error detected. |
+ * | -#BPMP_EINVAL | Invalid FMON configuration. |
+ * | -#BPMP_EOPNOTSUPP | Not in production mode. |
+ * | -#BPMP_ENODEV | Invalid clock ID in mrq_fmon_request::cmd_and_id. |
+ * | -#BPMP_ENOENT | No calibration data, uninitialized. |
+ * | -#BPMP_ENOTSUP | AVFS config not set. |
+ * | -#BPMP_ENOSYS | Clamp FMON on cluster clock w/ no NAFLL. |
+ * | -#BPMP_ETIMEDOUT | Operation timed out. |
*/
CMD_FMON_GEAR_CLAMP = 1,
+
/**
* @brief Release clamped FMON configuration.
*
* Allow FMON configuration to follow monitored clock rate
* and/or state changes.
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EBADCMD if subcommand is not supported @n
- * -#BPMP_ENODEV: invalid clk_id @n
- * -#BPMP_ENOENT: no calibration data, uninitialized @n
- * -#BPMP_ENOTSUP: avfs config not set @n
- * -#BPMP_EOPNOTSUPP: not in production mode @n
+ * mrq_response::err for this sub-command is defined to be:
+ *
+ * | Value | Description |
+ * |-------------------|---------------------------------------------------|
+ * | 0 | Operation was successful. |
+ * | -#BPMP_EBADCMD | Subcommand is not supported. |
+ * | -#BPMP_ENODEV | Invalid clock ID in mrq_fmon_request::cmd_and_id. |
+ * | -#BPMP_ENOENT | No calibration data, uninitialized. |
+ * | -#BPMP_ENOTSUP | AVFS config not set. |
+ * | -#BPMP_EOPNOTSUPP | Not in production mode. |
*/
CMD_FMON_GEAR_FREE = 2,
+
/**
- * @brief Return rate FMON is clamped at, or 0 if FMON is not
- * clamped.
+ * @brief Return rate FMON is clamped at, or 0 if FMON is not clamped.
*
- * Inherently racy, since clamp state can be changed
- * concurrently. Useful for testing.
+ * Inherently racy, since clamp state can be changed concurrently,
+ * only provided and useful for testing purposes.
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EBADCMD if subcommand is not supported @n
- * -#BPMP_ENODEV: invalid clk_id @n
- * -#BPMP_ENOENT: no calibration data, uninitialized @n
- * -#BPMP_ENOTSUP: avfs config not set @n
- * -#BPMP_EOPNOTSUPP: not in production mode @n
+ * mrq_response::err for this sub-command is defined to be:
+ *
+ * | Value | Description |
+ * |-------------------|---------------------------------------------------|
+ * | 0 | Operation was successful. |
+ * | -#BPMP_EBADCMD | Subcommand is not supported. |
+ * | -#BPMP_ENODEV | Invalid clock ID in mrq_fmon_request::cmd_and_id. |
+ * | -#BPMP_ENOENT | No calibration data, uninitialized. |
+ * | -#BPMP_ENOTSUP | AVFS config not set. |
+ * | -#BPMP_EOPNOTSUPP | Not in production mode. |
*/
CMD_FMON_GEAR_GET = 3,
+
/**
* @brief Return current status of FMON faults detected by FMON
- * h/w or s/w since last invocation of this command.
- * Clears fault status.
+ * HW or SW since last invocation of this sub-command.
+ * Clears fault status.
*
- * mrq_response::err is 0 if the operation was successful, or @n
- * -#BPMP_EBADCMD if subcommand is not supported @n
- * -#BPMP_EINVAL: invalid fault type @n
- * -#BPMP_ENODEV: invalid clk_id @n
- * -#BPMP_ENOENT: no calibration data, uninitialized @n
- * -#BPMP_ENOTSUP: avfs config not set @n
- * -#BPMP_EOPNOTSUPP: not in production mode @n
+ * mrq_response::err for this sub-command is defined to be:
+ *
+ * | Value | Description |
+ * |-------------------|---------------------------------------------------|
+ * | 0 | Operation was successful. |
+ * | -#BPMP_EBADCMD | Subcommand is not supported. |
+ * | -#BPMP_ENODEV | Invalid clock ID in mrq_fmon_request::cmd_and_id. |
+ * | -#BPMP_ENOENT | No calibration data, uninitialized. |
+ * | -#BPMP_ENOTSUP | AVFS config not set. |
+ * | -#BPMP_EOPNOTSUPP | Not in production mode. |
+ * | -#BPMP_EINVAL | Invalid fault type. |
*/
CMD_FMON_FAULT_STS_GET = 4,
};
@@ -3177,25 +4189,30 @@ enum {
*/
#define CMD_FMON_NUM 4
-/** @endcond DEPRECATED */
+/** @endcond */
/**
- * @defgroup fmon_fault_type FMON fault type
+ * @defgroup fmon_fault_type FMON fault types
* @addtogroup fmon_fault_type
* @{
*/
-/** @brief All detected FMON faults (h/w or s/w) */
+/** @brief All detected FMON faults (HW or SW) */
#define FMON_FAULT_TYPE_ALL 0U
-/** @brief FMON faults detected by h/w */
+/** @brief FMON faults detected by HW */
#define FMON_FAULT_TYPE_HW 1U
-/** @brief FMON faults detected by s/w */
+/** @brief FMON faults detected by SW */
#define FMON_FAULT_TYPE_SW 2U
/** @} fmon_fault_type */
-
+/**
+ * @brief Request payload for #MRQ_FMON sub-command #CMD_FMON_GEAR_CLAMP.
+ */
struct cmd_fmon_gear_clamp_request {
+ /** @brief Unused / reserved */
int32_t unused;
+
+ /** @brief Target rate in Hz. Valid range for the rate is [1, INT64_MAX] */
int64_t rate;
} BPMP_ABI_PACKED;
@@ -3219,40 +4236,63 @@ struct cmd_fmon_gear_get_request {
BPMP_ABI_EMPTY
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_FMON sub-command #CMD_FMON_GEAR_GET.
+ */
struct cmd_fmon_gear_get_response {
int64_t rate;
} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for #MRQ_FMON sub-command #CMD_FMON_FAULT_STS_GET
+ */
struct cmd_fmon_fault_sts_get_request {
- uint32_t fault_type; /**< @ref fmon_fault_type */
+ /**
+ * @brief Which fault types to return in response:
+ *
+ * | Value | Description |
+ * |----------------------|-----------------------------------------|
+ * | #FMON_FAULT_TYPE_ALL | Return all detected faults (HW and SW). |
+ * | #FMON_FAULT_TYPE_HW | Return only HW detected faults. |
+ * | #FMON_FAULT_TYPE_SW | Return only SW detected faults. |
+ */
+ uint32_t fault_type;
} BPMP_ABI_PACKED;
+/**
+ * @brief Response payload for #MRQ_FMON sub-command #CMD_FMON_FAULT_STS_GET
+ */
struct cmd_fmon_fault_sts_get_response {
+ /**
+ * Bitmask of detected HW / SW specific faults, or 0 if no faults have
+ * been detected since last invocation of #CMD_FMON_FAULT_STS_GET.
+ */
uint32_t fault_sts;
} BPMP_ABI_PACKED;
/**
* @ingroup FMON
- * @brief Request with #MRQ_FMON
+ * @brief Request payload for the #MRQ_FMON -command.
*
* Used by the sender of an #MRQ_FMON message to configure clock
* frequency monitors. The FMON request is split into several
- * sub-commands. Some sub-commands require no additional data.
- * Others have a sub-command specific payload
+ * sub-commands. Sub-command specific payloads are defined in
+ * the following table:
*
- * |sub-command |payload |
- * |----------------------------|-----------------------|
- * |CMD_FMON_GEAR_CLAMP |fmon_gear_clamp |
- * |CMD_FMON_GEAR_FREE |- |
- * |CMD_FMON_GEAR_GET |- |
- * |CMD_FMON_FAULT_STS_GET |fmon_fault_sts_get |
+ * |Sub-command |Payload |
+ * |------------------------|--------------------------------|
+ * |#CMD_FMON_GEAR_CLAMP |#cmd_fmon_gear_clamp_request |
+ * |#CMD_FMON_GEAR_FREE |- |
+ * |#CMD_FMON_GEAR_GET |- |
+ * |#CMD_FMON_FAULT_STS_GET |#cmd_fmon_fault_sts_get_request |
*
*/
struct mrq_fmon_request {
- /** @brief Sub-command and clock id concatenated to 32-bit word.
- * - bits[31..24] is the sub-cmd.
- * - bits[23..0] is monitored clock id used to select target
- * FMON
+ /**
+ * @brief Sub-command and clock id concatenated to 32-bit word.
+ *
+ * - bits[31..24] -> Sub-command identifier from @ref mrq_fmon_cmd.
+ * - bits[23..0] -> Monitored clock identifier used to select target FMON.
*/
uint32_t cmd_and_id;
@@ -3268,20 +4308,19 @@ struct mrq_fmon_request {
/**
* @ingroup FMON
- * @brief Response to MRQ_FMON
+ * @brief Response payload for the #MRQ_FMON -command.
*
* Each sub-command supported by @ref mrq_fmon_request may
* return sub-command-specific data as indicated below.
*
- * |sub-command |payload |
- * |----------------------------|------------------------|
- * |CMD_FMON_GEAR_CLAMP |- |
- * |CMD_FMON_GEAR_FREE |- |
- * |CMD_FMON_GEAR_GET |fmon_gear_get |
- * |CMD_FMON_FAULT_STS_GET |fmon_fault_sts_get |
+ * |Sub-command |Payload |
+ * |------------------------|---------------------------------|
+ * |#CMD_FMON_GEAR_CLAMP |- |
+ * |#CMD_FMON_GEAR_FREE |- |
+ * |#CMD_FMON_GEAR_GET |#cmd_fmon_gear_get_response |
+ * |#CMD_FMON_FAULT_STS_GET |#cmd_fmon_fault_sts_get_response |
*
*/
-
struct mrq_fmon_response {
union {
/** @private */
@@ -3293,17 +4332,15 @@ struct mrq_fmon_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @endcond (bpmp_t194 || bpmp_t234) */
/** @} FMON */
+/** @endcond */
-/**
+/** @cond (bpmp_t194)
* @ingroup MRQ_Codes
* @def MRQ_EC
* @brief Provide status information on faults reported by Error
* Collator (EC) to HSM.
*
- * * Platforms: T194
- * @cond bpmp_t194
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_ec_request
@@ -3311,10 +4348,8 @@ struct mrq_fmon_response {
*
* @note This MRQ ABI is under construction, and subject to change
*
- * @endcond bpmp_t194
* @addtogroup EC
* @{
- * @cond bpmp_t194
*/
enum {
/**
@@ -3325,7 +4360,7 @@ enum {
* -#BPMP_ENODEV if target EC is not owned by BPMP @n
* -#BPMP_EACCES if target EC power domain is turned off @n
* -#BPMP_EBADCMD if subcommand is not supported
- * @endcond DEPRECATED
+ * @endcond
*/
CMD_EC_STATUS_GET = 1, /* deprecated */
@@ -3572,7 +4607,7 @@ struct cmd_ec_status_get_response {
/** @brief EC error descriptors */
union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
} BPMP_ABI_PACKED;
-/** @endcond DEPRECATED */
+/** @endcond */
struct cmd_ec_status_ex_get_response {
/** @brief Target EC id (the same id received with request). */
@@ -3610,7 +4645,7 @@ struct cmd_ec_status_ex_get_response {
* |sub-command |payload |
* |----------------------------|-----------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
- * @endcond DEPRECATED
+ * @endcond
*
* |sub-command |payload |
* |----------------------------|-----------------------|
@@ -3638,7 +4673,7 @@ struct mrq_ec_request {
* |sub-command |payload |
* |----------------------------|------------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
- * @endcond DEPRECATED
+ * @endcond
*
* |sub-command |payload |
* |----------------------------|------------------------|
@@ -3652,22 +4687,20 @@ struct mrq_ec_response {
* @cond DEPRECATED
*/
struct cmd_ec_status_get_response ec_status_get;
- /** @endcond DEPRECATED */
+ /** @endcond */
struct cmd_ec_status_ex_get_response ec_status_ex_get;
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @endcond bpmp_t194 */
/** @} EC */
+/** @endcond */
-/**
+/** @cond (bpmp_th500)
* @ingroup MRQ_Codes
* @def MRQ_TELEMETRY
* @brief Get address of memory buffer refreshed with recently sampled
* telemetry data
*
- * * Platforms: TH500 onwards
- * @cond bpmp_th500
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -3677,14 +4710,17 @@ struct mrq_ec_response {
*/
/**
- * @brief Response to #MRQ_TELEMETRY
+ * @brief Response payload for the #MRQ_TELEMETRY -command
*
- * mrq_response::err is
- * * 0: Telemetry data is available at returned address
- * * -#BPMP_EACCES: MRQ master is not allowed to request buffer refresh
- * * -#BPMP_ENAVAIL: Telemetry buffer cannot be refreshed via this MRQ channel
- * * -#BPMP_ENOTSUP: Telemetry buffer is not supported by BPMP-FW
- * * -#BPMP_ENODEV: Telemetry mrq is not supported by BPMP-FW
+ * mrq_response::err is defined as:
+ *
+ * | Value | Description |
+ * |-----------------|------------------------------------------------------------|
+ * | 0 | Telemetry data is available at returned address. |
+ * | -#BPMP_EACCES | MRQ master is not allowed to request buffer refresh. |
+ * | -#BPMP_ENAVAIL | Telemetry buffer cannot be refreshed via this MRQ channel. |
+ * | -#BPMP_ENOTSUP | Telemetry buffer is not supported by BPMP-FW. |
+ * | -#BPMP_ENODEV | Telemetry MRQ is not supported by BPMP-FW. |
*/
struct mrq_telemetry_response {
/** @brief Physical address of telemetry data buffer */
@@ -3692,15 +4728,112 @@ struct mrq_telemetry_response {
} BPMP_ABI_PACKED;
/** @} Telemetry */
-/** @endcond bpmp_th500 */
+/** @endcond */
+
+/** @cond (bpmp_tb500)
+ * @ingroup MRQ_Codes
+ * @def MRQ_TELEMETRY_EX
+ * @brief Get telemetry configuration settings.
+ *
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_telemetry_ex_request
+ * * Response Payload: @ref mrq_telemetry_ex_response
+ *
+ * @addtogroup Telemetry_ex
+ * @{
+ */
/**
+ * @brief Sub-command identifiers for #MRQ_TELEMETRY_EX.
+ */
+enum mrq_telemetry_ex_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_TELEMETRY_EX sub-command.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_TELEMETRY_EX_QUERY_ABI = 0,
+
+ /**
+ * @brief Get telemetry buffer base address and data size
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_TELEMETRY_EX is not supported by BPMP-FW. |
+ */
+ CMD_TELEMETRY_EX_BASE_SZ_GET = 1,
+};
+
+/**
+ * @brief Request data for #MRQ_TELEMETRY_EX sub-command
+ * #CMD_TELEMETRY_EX_QUERY_ABI
+ */
+struct cmd_telemetry_ex_query_abi_request {
+ /** @brief Sub-command identifier from @ref mrq_telemetry_ex_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for #MRQ_TELEMETRY_EX sub-command
+ * #CMD_TELEMETRY_EX_BASE_SZ_GET
+ */
+struct cmd_telemetry_ex_base_sz_get_response {
+ /**
+ * @brief Physical address of telemetry data buffer
+ *
+ * 0 if no buffer is allocated for the initiator sending MRQ.
+ */
+ uint64_t buf_base_addr;
+ /** @brief Telemetry data size in bytes */
+ uint32_t buf_size;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_TELEMETRY_EX -command
+ *
+ * | Sub-command | Request payload |
+ * |-------------------------------|----------------------------------------|
+ * | #CMD_TELEMETRY_EX_QUERY_ABI | #cmd_telemetry_ex_query_abi_request |
+ * | #CMD_TELEMETRY_EX_BASE_SZ_GET | - |
+ */
+struct mrq_telemetry_ex_request {
+ /** @brief Sub-command ID from @ref mrq_telemetry_ex_cmd. */
+ uint32_t cmd;
+ union {
+ struct cmd_telemetry_ex_query_abi_request
+ telemetry_ex_query_abi_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for the #MRQ_TELEMETRY_EX -command.
+ *
+ * | Sub-command | Response payload |
+ * |-------------------------------|----------------------------------------|
+ * | #CMD_TELEMETRY_EX_QUERY_ABI | - |
+ * | #CMD_TELEMETRY_EX_BASE_SZ_GET | #cmd_telemetry_ex_base_sz_get_response |
+ */
+struct mrq_telemetry_ex_response {
+ union {
+ struct cmd_telemetry_ex_base_sz_get_response
+ telemetry_ex_base_sz_get_rsp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} Telemetry_ex */
+/** @endcond */
+
+/** @cond (bpmp_th500 || bpmp_tb500)
* @ingroup MRQ_Codes
* @def MRQ_PWR_LIMIT
* @brief Control power limits.
*
- * * Platforms: TH500 onwards
- * @cond bpmp_th500
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_pwr_limit_request
@@ -3709,10 +4842,14 @@ struct mrq_telemetry_response {
* @addtogroup Pwrlimit
* @{
*/
+
+/**
+ * @brief Sub-command identifiers for #MRQ_PWR_LIMIT.
+ */
enum mrq_pwr_limit_cmd {
/**
* @brief Check whether the BPMP-FW supports the specified
- * command
+ * #MRQ_PWR_LIMIT sub-command.
*
* mrq_response::err is 0 if the specified request is
* supported and -#BPMP_ENODEV otherwise.
@@ -3722,31 +4859,43 @@ enum mrq_pwr_limit_cmd {
/**
* @brief Set power limit
*
- * mrq_response:err is
- * * 0: Success
- * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
- * * -#BPMP_ENAVAIL: Invalid request parameters
- * * -#BPMP_EACCES: Request is not accepted
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_PWR_LIMIT is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_EACCES | Request is not accepted. |
*/
CMD_PWR_LIMIT_SET = 1,
/**
* @brief Get power limit setting
*
- * mrq_response:err is
- * * 0: Success
- * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
- * * -#BPMP_ENAVAIL: Invalid request parameters
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_PWR_LIMIT is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
*/
CMD_PWR_LIMIT_GET = 2,
/**
- * @brief Get current power cap
+ * @brief Get current aggregated power cap
*
- * mrq_response:err is
- * * 0: Success
- * * -#BPMP_ENODEV: Pwr limit mrq is not supported by BPMP-FW
- * * -#BPMP_ENAVAIL: Invalid request parameters
+ * Get currently applied power cap for the specified limit id
+ * aggregated across all limit sources and types.
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_PWR_LIMIT is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
*/
CMD_PWR_LIMIT_CURR_CAP = 3,
};
@@ -3761,7 +4910,7 @@ enum mrq_pwr_limit_cmd {
#define PWR_LIMIT_TYPE_BOUND_MAX 1U
/** @brief Limit value specifies minimum possible target cap */
#define PWR_LIMIT_TYPE_BOUND_MIN 2U
-/** @brief Number of limit types supported by mrq interface */
+/** @brief Number of limit types supported by #MRQ_PWR_LIMIT command */
#define PWR_LIMIT_TYPE_NUM 3U
/** @} bpmp_pwr_limit_type */
@@ -3770,7 +4919,8 @@ enum mrq_pwr_limit_cmd {
* @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_QUERY_ABI
*/
struct cmd_pwr_limit_query_abi_request {
- uint32_t cmd_code; /**< @ref mrq_pwr_limit_cmd */
+ /** @brief Sub-command identifier from @ref mrq_pwr_limit_cmd */
+ uint32_t cmd_code;
} BPMP_ABI_PACKED;
/**
@@ -3782,56 +4932,66 @@ struct cmd_pwr_limit_query_abi_request {
* is ignored by the arbitration (i.e., indicates "no limit set").
*/
struct cmd_pwr_limit_set_request {
- uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ /** @brief Power limit identifier from @ref bpmp_pwr_limit_id */
+ uint32_t limit_id;
+ /** @brief Power limit source identifier from @ref bpmp_pwr_limit_src */
uint32_t limit_src; /**< @ref bpmp_pwr_limit_src */
- uint32_t limit_type; /**< @ref bpmp_pwr_limit_type */
+ /** @brief Power limit type from @ref bpmp_pwr_limit_type */
+ uint32_t limit_type;
+ /** @brief New power limit value */
uint32_t limit_setting;
} BPMP_ABI_PACKED;
/**
- * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_GET
+ * @brief Request payload for #MRQ_PWR_LIMIT sub-command #CMD_PWR_LIMIT_GET
*
* Get previously set from specified source specified limit value of specified
* type.
*/
struct cmd_pwr_limit_get_request {
- uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ /** @brief Power limit identifier from @ref bpmp_pwr_limit_id */
+ uint32_t limit_id;
+ /** @brief Power limit source identifier from @ref bpmp_pwr_limit_src */
uint32_t limit_src; /**< @ref bpmp_pwr_limit_src */
- uint32_t limit_type; /**< @ref bpmp_pwr_limit_type */
+ /** @brief Power limit type from @ref bpmp_pwr_limit_type */
+ uint32_t limit_type;
} BPMP_ABI_PACKED;
/**
- * @brief Response data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_GET
+ * @brief Response payload for #MRQ_PWR_LIMIT sub-command #CMD_PWR_LIMIT_GET
*/
struct cmd_pwr_limit_get_response {
+ /** @brief Power limit value */
uint32_t limit_setting;
} BPMP_ABI_PACKED;
/**
- * @brief Request data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_CURR_CAP
+ * @brief Request payload for #MRQ_PWR_LIMIT sub-command #CMD_PWR_LIMIT_CURR_CAP
*
* For specified limit get current power cap aggregated from all sources.
*/
struct cmd_pwr_limit_curr_cap_request {
- uint32_t limit_id; /**< @ref bpmp_pwr_limit_id */
+ /** @brief Power limit identifier from @ref bpmp_pwr_limit_id */
+ uint32_t limit_id;
} BPMP_ABI_PACKED;
/**
- * @brief Response data for #MRQ_PWR_LIMIT command CMD_PWR_LIMIT_CURR_CAP
+ * @brief Response payload for #MRQ_PWR_LIMIT sub-command #CMD_PWR_LIMIT_CURR_CAP
*/
struct cmd_pwr_limit_curr_cap_response {
+ /** @brief Current power cap value */
uint32_t curr_cap;
} BPMP_ABI_PACKED;
/**
- * @brief Request with #MRQ_PWR_LIMIT
+ * @brief Request payload for the #MRQ_PWR_LIMIT -command
*
- * |sub-command |payload |
- * |----------------------------|---------------------------------|
- * |CMD_PWR_LIMIT_QUERY_ABI | cmd_pwr_limit_query_abi_request |
- * |CMD_PWR_LIMIT_SET | cmd_pwr_limit_set_request |
- * |CMD_PWR_LIMIT_GET | cmd_pwr_limit_get_request |
- * |CMD_PWR_LIMIT_CURR_CAP | cmd_pwr_limit_curr_cap_request |
+ * | Sub-command | Request payload |
+ * |--------------------------|----------------------------------|
+ * | #CMD_PWR_LIMIT_QUERY_ABI | #cmd_pwr_limit_query_abi_request |
+ * | #CMD_PWR_LIMIT_SET | #cmd_pwr_limit_set_request |
+ * | #CMD_PWR_LIMIT_GET | #cmd_pwr_limit_get_request |
+ * | #CMD_PWR_LIMIT_CURR_CAP | #cmd_pwr_limit_curr_cap_request |
*/
struct mrq_pwr_limit_request {
uint32_t cmd;
@@ -3844,14 +5004,14 @@ struct mrq_pwr_limit_request {
} BPMP_ABI_PACKED;
/**
- * @brief Response to MRQ_PWR_LIMIT
+ * @brief Response payload for the #MRQ_PWR_LIMIT -command.
*
- * |sub-command |payload |
- * |----------------------------|---------------------------------|
- * |CMD_PWR_LIMIT_QUERY_ABI | - |
- * |CMD_PWR_LIMIT_SET | - |
- * |CMD_PWR_LIMIT_GET | cmd_pwr_limit_get_response |
- * |CMD_PWR_LIMIT_CURR_CAP | cmd_pwr_limit_curr_cap_response |
+ * | Sub-command | Response payload |
+ * |--------------------------|----------------------------------|
+ * | #CMD_PWR_LIMIT_QUERY_ABI | - |
+ * | #CMD_PWR_LIMIT_SET | - |
+ * | #CMD_PWR_LIMIT_GET | #cmd_pwr_limit_get_response |
+ * | #CMD_PWR_LIMIT_CURR_CAP | #cmd_pwr_limit_curr_cap_response |
*/
struct mrq_pwr_limit_response {
union {
@@ -3860,17 +5020,395 @@ struct mrq_pwr_limit_response {
} BPMP_UNION_ANON;
} BPMP_ABI_PACKED;
-/** @} PwrLimit */
-/** @endcond bpmp_th500 */
+/** @} Pwrlimit */
+/** @endcond */
+
+
+
+/** @cond (bpmp_th500)
+ * @ingroup MRQ_Codes
+ * @def MRQ_PWRMODEL
+ * @brief Retrieve power evaluted by SoC power model.
+ *
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pwrmodel_request
+ * * Response Payload: @ref mrq_pwrmodel_response
+ *
+ * @addtogroup Pwrmodel
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_PWRMODEL.
+ */
+enum mrq_pwrmodel_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_PWRMODEL sub-command.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_PWRMODEL_QUERY_ABI = 0,
+
+ /**
+ * @brief Get power model output power
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_PWRMODEL is not supported by BPMP-FW. |
+ * | -#BPMP_ERANGE | Power model calculation overflow. |
+ */
+ CMD_PWRMODEL_PWR_GET = 1,
+};
+
+/**
+ * @brief Request data for #MRQ_PWRMODEL sub-command #CMD_PWRMODEL_QUERY_ABI
+ */
+struct cmd_pwrmodel_query_abi_request {
+ /** @brief Sub-command identifier from @ref mrq_pwrmodel_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for #MRQ_PWRMODEL sub-command #CMD_PWRMODEL_PWR_GET
+ *
+ * Rerieve power evaluated by power model for specified work-load factor,
+ * temperature, and cpu iso frequency for all cores.
+ */
+struct cmd_pwrmodel_pwr_get_request {
+ /** @brief Unitless work load factor to evaluate power model at */
+ uint32_t work_load_factor;
+ /** @brief CPU frequency in kHz to evaluate power model at */
+ uint32_t cpu_frequency;
+ /** @brief Temperature in mC to evaluate power model at */
+ int32_t temperature;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for #MRQ_PWRMODEL sub-command #CMD_PWRMODEL_PWR_GET
+ */
+struct cmd_pwrmodel_pwr_get_response {
+ /** @brief Power model output in mW */
+ uint32_t power;
+} BPMP_ABI_PACKED;
+/**
+ * @brief Request payload for the #MRQ_PWRMODEL -command
+ *
+ * | Sub-command | Request payload |
+ * |--------------------------|----------------------------------|
+ * | #CMD_PWRMODEL_QUERY_ABI | #cmd_pwrmodel_query_abi_request |
+ * | #CMD_PWRMODEL_PWR_GET | #cmd_pwrmodel_pwr_get_request |
+ */
+struct mrq_pwrmodel_request {
+ uint32_t cmd;
+ union {
+ struct cmd_pwrmodel_query_abi_request pwrmodel_query_abi_req;
+ struct cmd_pwrmodel_pwr_get_request pwrmodel_pwr_get_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
+ * @brief Response payload for the #MRQ_PWRMODEL -command.
+ *
+ * | Sub-command | Response payload |
+ * |--------------------------|----------------------------------|
+ * | #CMD_PWRMODEL_QUERY_ABI | - |
+ * | #CMD_PWRMODEL_PWR_GET | #cmd_pwrmodel_pwr_get_response |
+ */
+struct mrq_pwrmodel_response {
+ union {
+ struct cmd_pwrmodel_pwr_get_response pwrmodel_pwr_get_rsp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} Pwrmodel */
+/** @endcond */
+
+
+/** @cond (bpmp_th500)
+ * @ingroup MRQ_Codes
+ * @def MRQ_PWR_CNTRL
+ * @brief Configure power controllers.
+ *
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pwr_cntrl_request
+ * * Response Payload: @ref mrq_pwr_cntrl_response
+ *
+ * @addtogroup Pwrcntrl
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_PWR_CNTRL.
+ */
+enum mrq_pwr_cntrl_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_PWR_CNTRL sub-command.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_PWR_CNTRL_QUERY_ABI = 0,
+
+ /**
+ * @brief Switch power controller to/out of bypass mode
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_PWR_CNTRL is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_ENOTSUP | Bypass mode is not supported. |
+ */
+ CMD_PWR_CNTRL_BYPASS_SET = 1,
+
+ /**
+ * @brief Get power controller bypass mode status
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_PWR_CNTRL is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_PWR_CNTRL_BYPASS_GET = 2,
+};
+
+/**
+ * @brief Request data for #MRQ_PWR_CNTRL sub-command #CMD_PWR_CNTRL_QUERY_ABI
+ */
+struct cmd_pwr_cntrl_query_abi_request {
+ /** @brief Sub-command identifier from @ref mrq_pwr_cntrl_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_CNTRL sub-command #CMD_PWR_CNTRL_BYPASS_SET
+ *
+ * Switch specified power controller to / out of bypass mode provided such
+ * mode is supported by the controller.
+ */
+struct cmd_pwr_cntrl_bypass_set_request {
+ /** @brief Power controller identifier from @ref bpmp_pwr_cntrl_id */
+ uint32_t cntrl_id;
+ /**
+ * @brief Bypass setting.
+ *
+ * Valid values:
+ *
+ * * 1 to enter bypass mode,
+ * * 0 to exit bypass mode.
+ */
+ uint32_t bypass_setting;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PWR_CNTRL sub-command #CMD_PWR_CNTRL_BYPASS_GET
+ *
+ * Get bypass mode status of the specified power controller.
+ */
+struct cmd_pwr_cntrl_bypass_get_request {
+ /** @brief Power controller identifier from @ref bpmp_pwr_cntrl_id */
+ uint32_t cntrl_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PWR_CNTRL sub-command #CMD_PWR_CNTRL_BYPASS_GET
+ *
+ * Get current bypass mode status if such mode is supported by the controller.
+ * Otherwise, return "out of bypass" .
+ */
+struct cmd_pwr_cntrl_bypass_get_response {
+ /**
+ * @brief Bypass mode status: 1 controller is in bypass,
+ * 0 controller is out of bypass.
+ */
+ uint32_t bypass_status;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_PWR_CNTRL -command
+ *
+ * | Sub-command | Request payload |
+ * |---------------------------|-----------------------------------|
+ * | #CMD_PWR_CNTRL_QUERY_ABI | #cmd_pwr_cntrl_query_abi_request |
+ * | #CMD_PWR_CNTRL_BYPASS_SET | #cmd_pwr_cntrl_bypass_set_request |
+ * | #CMD_PWR_CNTRL_BYPASS_GET | #cmd_pwr_cntrl_bypass_get_request |
+ */
+struct mrq_pwr_cntrl_request {
+ uint32_t cmd;
+ union {
+ struct cmd_pwr_cntrl_query_abi_request pwr_cntrl_query_abi_req;
+ struct cmd_pwr_cntrl_bypass_set_request pwr_cntrl_bypass_set_req;
+ struct cmd_pwr_cntrl_bypass_get_request pwr_cntrl_bypass_get_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for the #MRQ_PWR_CNTRL -command.
+ *
+ * | Sub-command | Response payload |
+ * |---------------------------|-----------------------------------|
+ * | #CMD_PWR_CNTRL_QUERY_ABI | - |
+ * | #CMD_PWR_CNTRL_BYPASS_SET | - |
+ * | #CMD_PWR_CNTRL_BYPASS_GET | #cmd_pwr_cntrl_bypass_get_response|
+ */
+struct mrq_pwr_cntrl_response {
+ union {
+ struct cmd_pwr_cntrl_bypass_get_response pwr_cntrl_bypass_get_rsp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} Pwrcntrl */
+/** @endcond */
+
+
+/** @cond (bpmp_t264)
+ * @ingroup MRQ_Codes
+ * @def MRQ_SLC
+ * @brief Configure SLC state.
+ *
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_slc_request
+ * * Response Payload: @ref mrq_slc_response
+ *
+ * @addtogroup Slc
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_SLC.
+ */
+enum mrq_slc_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_SLC sub-command.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_SLC_QUERY_ABI = 0,
+
+ /**
+ * @brief Switch SLC to/out of bypass mode
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_SLC is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_ENOTSUP | Bypass mode is not supported. |
+ */
+ CMD_SLC_BYPASS_SET = 1,
+
+ /**
+ * @brief Get SLC bypass mode status
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|---------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_SLC is not supported by BPMP-FW. |
+ */
+ CMD_SLC_BYPASS_GET = 2,
+};
+
+/**
+ * @brief Request data for #MRQ_SLC sub-command #CMD_SLC_QUERY_ABI
+ */
+struct cmd_slc_query_abi_request {
+ /** @brief Sub-command identifier from @ref mrq_slc_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_SLC sub-command #CMD_SLC_BYPASS_SET
+ *
+ * Switch SLC to / out of bypass mode provided such
+ * mode is supported by the SLC.
+ */
+struct cmd_slc_bypass_set_request {
+ /**
+ * @brief Bypass setting.
+ *
+ * Valid values:
+ *
+ * * 1 to enter bypass mode,
+ * * 0 to exit bypass mode.
+ */
+ uint32_t bypass_setting;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_SLC sub-command #CMD_SLC_BYPASS_GET
+ *
+ * Get current bypass mode status if such mode is supported by the SLC.
+ * Otherwise, return "out of bypass" .
+ */
+struct cmd_slc_bypass_get_response {
+ /**
+ * @brief Bypass mode status: 1 SLC is in bypass,
+ * 0 SLC is out of bypass.
+ */
+ uint32_t bypass_status;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_SLC -command
+ *
+ * | Sub-command | Request payload |
+ * |---------------------------|-----------------------------------|
+ * | #CMD_SLC_QUERY_ABI | #cmd_slc_query_abi_request |
+ * | #CMD_SLC_BYPASS_SET | #cmd_slc_bypass_set_request |
+ * | #CMD_SLC_BYPASS_GET | - |
+ */
+struct mrq_slc_request {
+ uint32_t cmd;
+ union {
+ struct cmd_slc_query_abi_request slc_query_abi_req;
+ struct cmd_slc_bypass_set_request slc_bypass_set_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for the #MRQ_SLC -command.
+ *
+ * | Sub-command | Response payload |
+ * |---------------------------|-----------------------------------|
+ * | #CMD_SLC_QUERY_ABI | - |
+ * | #CMD_SLC_BYPASS_SET | - |
+ * | #CMD_SLC_BYPASS_GET | #cmd_slc_bypass_get_response |
+ */
+struct mrq_slc_response {
+ union {
+ struct cmd_slc_bypass_get_response slc_bypass_get_rsp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} Slc */
+/** @endcond */
+
+/** @cond (bpmp_th500)
* @ingroup MRQ_Codes
* @def MRQ_GEARS
* @brief Get thresholds for NDIV offset switching
*
- * * Platforms: TH500 onwards
- * @cond bpmp_th500
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -3905,11 +5443,1248 @@ struct mrq_pwr_limit_response {
*/
struct mrq_gears_response {
/** @brief number of online CPUs for each gear */
- uint32_t ncpu[16];
+ uint32_t ncpu[8];
+ /** @brief ndiv offset for each gear */
+ uint32_t ndiv_offset[8];
+ /** @brief voltage below which gears are disabled */
+ uint32_t uv_threshold;
} BPMP_ABI_PACKED;
/** @} Gears */
-/** @endcond bpmp_th500 */
+/** @endcond */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_SHUTDOWN
+ * @brief System shutdown
+ *
+ * This message indicates system shutdown or reboot request. BPMP will
+ * initiate system shutdown/reboot after receiving this message, it
+ * may include turning off some rails in sequence and programming
+ * PMIC.
+ *
+ * * Initiators: CPU_S, MCE
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_shutdown_request
+ * * Response Payload: N/A
+ * @addtogroup Shutdown
+ * @{
+ */
+
+/**
+ * @brief Request with #MRQ_SHUTDOWN
+ */
+struct mrq_shutdown_request {
+ /**
+ * @brief Shutdown state ID
+ *
+ * Legal values:
+ * * 0 - Power off
+ * * 1 - Reboot
+ * @cond bpmp_t264
+ * * 2 - Suspend
+ * @endcond
+ */
+ uint32_t state;
+} BPMP_ABI_PACKED;
+
+/** @} Shutdown */
+
+/** @cond (bpmp_th500 || bpmp_tb500)
+ * @defgroup bpmp_c2c_status C2C link status
+ * @addtogroup bpmp_c2c_status
+ * @{
+ */
+/** @brief initial status code */
+#define BPMP_C2C_STATUS_INIT_NOT_STARTED 0
+/** @brief Invalid speedo code */
+#define BPMP_C2C_STATUS_C2C_INVALID_SPEEDO_CODE 7
+/** @brief Invalid frequency */
+#define BPMP_C2C_STATUS_C2C_INVALID_FREQ 8
+/** @brief Invalid link */
+#define BPMP_C2C_STATUS_C2C_INVALID_LINK 9
+/** @brief refpll lock polling times out - partition 0 */
+#define BPMP_C2C_STATUS_C2C0_REFPLL_FAIL 10
+/** @brief refpll lock polling times out - partition 1 */
+#define BPMP_C2C_STATUS_C2C1_REFPLL_FAIL 11
+/** @brief PLL cal times out - partition 0 */
+#define BPMP_C2C_STATUS_C2C0_PLLCAL_FAIL 12
+/** @brief PLL cal times out - partition 1 */
+#define BPMP_C2C_STATUS_C2C1_PLLCAL_FAIL 13
+/** @brief clock detection times out - partition 0 */
+#define BPMP_C2C_STATUS_C2C0_CLKDET_FAIL 14
+/** @brief clock detection times out - partition 1 */
+#define BPMP_C2C_STATUS_C2C1_CLKDET_FAIL 15
+/** @brief Final trainings fail partition 0 */
+#define BPMP_C2C_STATUS_C2C0_TR_FAIL 16
+/** @brief Final trainings fail partition 1 */
+#define BPMP_C2C_STATUS_C2C1_TR_FAIL 17
+/** @brief C2C FW init done */
+#define NV_GFW_GLOBAL_DEVINIT_C2C_STATUS_C2C_FW_INIT_DONE 20
+/** @brief C2C FW init failed partition 0 */
+#define NV_GFW_GLOBAL_DEVINIT_C2C_STATUS_C2C0_FW_INIT_FAIL 21
+/** @brief C2C FW init failed partition 1 */
+#define NV_GFW_GLOBAL_DEVINIT_C2C_STATUS_C2C1_FW_INIT_FAIL 22
+/** @brief no failure seen, c2c init was successful */
+#define BPMP_C2C_STATUS_C2C_LINK_TRAIN_PASS 255
+/** @} bpmp_c2c_status */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_C2C
+ * @brief Control C2C partitions initialization.
+ *
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_c2c_request
+ * * Response Payload: @ref mrq_c2c_response
+ *
+ * @addtogroup C2C
+ * @{
+ */
+enum mrq_c2c_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified request
+ * type
+ *
+ * mrq_response:: err is 0 if the specified request is supported and
+ * -#BPMP_ENODEV otherwise
+ */
+ CMD_C2C_QUERY_ABI = 0,
+
+ /**
+ * @brief Start C2C initialization
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: MRQ_C2C is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ * * -#BPMP_EACCES: Request is not accepted
+ */
+ CMD_C2C_START_INITIALIZATION = 1,
+
+ /**
+ * @brief Command to query current C2C training status
+ *
+ * This command will return the result of the latest C2C re-training that is initiated with
+ * MRQ_C2C.CMD_C2C_START_INITIALIZATION or MRQ_C2C.CMD_C2C_START_HOTRESET calls.
+ * If no training has been initiated yet, the command will return code BPMP_C2C_STATUS_INIT_NOT_STARTED.
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: MRQ_C2C is not supported by BPMP-FW
+ * * -#BPMP_EACCES: Request is not accepted
+ */
+ CMD_C2C_GET_STATUS = 2,
+ /**
+ * @brief C2C hot-reset precondition
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: MRQ_C2C is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ * * -#BPMP_EACCES: Request is not accepted
+ */
+ CMD_C2C_HOTRESET_PREP = 3,
+ /**
+ * @brief Start C2C hot-reset
+ *
+ * mrq_response:err is
+ * * 0: Success
+ * * -#BPMP_ENODEV: MRQ_C2C is not supported by BPMP-FW
+ * * -#BPMP_ENAVAIL: Invalid request parameters
+ * * -#BPMP_EACCES: Request is not accepted
+ */
+ CMD_C2C_START_HOTRESET = 4,
+
+ CMD_C2C_MAX
+};
+
+/**
+ * @brief Request data for #MRQ_C2C command CMD_C2C_QUERY_ABI
+ */
+struct cmd_c2c_query_abi_request {
+ /** @brief Command identifier to be queried */
+ uint32_t cmd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_C2C command CMD_C2C_START_INITIALIZATION
+ */
+struct cmd_c2c_start_init_request {
+ /** @brief 1: partition 0; 2: partition 1; 3: partition 0 and 1; */
+ uint8_t partitions;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_C2C command CMD_C2C_START_INITIALIZATION
+ */
+struct cmd_c2c_start_init_response {
+ /** @brief Refer to @ref bpmp_c2c_status */
+ uint8_t status;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_C2C command CMD_C2C_GET_STATUS
+ */
+struct cmd_c2c_get_status_response {
+ /** @brief Refer to @ref bpmp_c2c_status */
+ uint8_t status;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_C2C command CMD_C2C_HOTRESET_PREP
+ */
+struct cmd_c2c_hotreset_prep_request {
+ /** @brief 1: partition 0; 2: partition 1; 3: partition 0 and 1; */
+ uint8_t partitions;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_C2C command CMD_C2C_START_HOTRESET
+ */
+struct cmd_c2c_start_hotreset_request {
+ /** @brief 1: partition 0; 2: partition 1; 3: partition 0 and 1; */
+ uint8_t partitions;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_C2C command CMD_C2C_START_HOTRESET
+ */
+struct cmd_c2c_start_hotreset_response {
+ /** @brief Refer to @ref bpmp_c2c_status */
+ uint8_t status;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request with #MRQ_C2C
+ *
+ * |sub-command |payload |
+ * |-----------------------------|-------------------------------|
+ * |CMD_C2C_QUERY_ABI |cmd_c2c_query_abi_request |
+ * |CMD_C2C_START_INITIALIZATION |cmd_c2c_start_init_request |
+ * |CMD_C2C_GET_STATUS | |
+ * |CMD_C2C_HOTRESET_PREP |cmd_c2c_hotreset_prep_request |
+ * |CMD_C2C_START_HOTRESET |cmd_c2c_start_hotreset_request |
+
+ */
+struct mrq_c2c_request {
+ uint32_t cmd;
+ union {
+ struct cmd_c2c_query_abi_request c2c_query_abi_req;
+ struct cmd_c2c_start_init_request c2c_start_init_req;
+ struct cmd_c2c_hotreset_prep_request c2c_hotreset_prep_req;
+ struct cmd_c2c_start_hotreset_request c2c_start_hotreset_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response to MRQ_C2C
+ *
+ * |sub-command |payload |
+ * |-----------------------------|--------------------------------|
+ * |CMD_C2C_QUERY_ABI | |
+ * |CMD_C2C_START_INITIALIZATION |cmd_c2c_start_init_response |
+ * |CMD_C2C_GET_STATUS |cmd_c2c_get_status_response |
+ * |CMD_C2C_HOTRESET_PREP | |
+ * |CMD_C2C_START_HOTRESET |cmd_c2c_start_hotreset_response |
+ */
+struct mrq_c2c_response {
+ union {
+ struct cmd_c2c_start_init_response c2c_start_init_resp;
+ struct cmd_c2c_get_status_response c2c_get_status_resp;
+ struct cmd_c2c_start_hotreset_response c2c_start_hotreset_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+/** @} */
+/** @endcond */
+
+
+/** @cond (bpmp_t264)
+ * @ingroup MRQ_Codes
+ * @def MRQ_PCIE
+ * @brief Perform a PCIE operation
+ *
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pcie_request
+ *
+ * @addtogroup PCIE
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_PCIE.
+ */
+enum mrq_pcie_cmd {
+ /** @brief Initialize PCIE EP controller. */
+ CMD_PCIE_EP_CONTROLLER_INIT = 0,
+ /** @brief Disable PCIE EP controller. */
+ CMD_PCIE_EP_CONTROLLER_OFF = 1,
+
+ /** @brief Disable PCIE RP controller. */
+ CMD_PCIE_RP_CONTROLLER_OFF = 100,
+
+ CMD_PCIE_MAX,
+};
+
+/**
+ * @brief Request payload for #MRQ_PCIE sub-command #CMD_PCIE_EP_CONTROLLER_INIT.
+ */
+struct cmd_pcie_ep_controller_init_request {
+ /**
+ * @brief PCIe EP controller number.
+ * Valid entries for T264 are 2, 4 and 5.
+ */
+ uint8_t ep_controller;
+ /**
+ * @brief PCIe EP function programming interface code.
+ * Valid range in HW is [0, 0xFFU], BPMP-FW programs the input value without any check.
+ * It is up to the requester to send valid input as documented in "PCI CODE AND ID
+ * ASSIGNMENT SPECIFICATION".
+ */
+ uint8_t progif_code;
+ /**
+ * @brief PCIe EP function sub-class code.
+ * Valid range in HW is [0, 0xFFU], BPMP-FW programs the input value without any check.
+ * It is up to the requester to send valid input as documented in "PCI CODE AND ID
+ * ASSIGNMENT SPECIFICATION".
+ */
+ uint8_t subclass_code;
+ /**
+ * @brief PCIe EP function base class code.
+ * Valid range in HW is [0, 0xFFU], BPMP-FW programs the input value without any check.
+ * It is up to the requester to send valid input as documented in "PCI CODE AND ID
+ * ASSIGNMENT SPECIFICATION".
+ */
+ uint8_t baseclass_code;
+ /**
+ * @brief PCIe EP function device id.
+ * Valid range is [0, 0x7FU], only LSB 7 bits are writable in 16-bit PCI device id.
+ * Valid range check is done on input value and returns -BPMP_EINVAL on failure.
+ */
+ uint8_t deviceid;
+ /**
+ * @brief PCIe EP EP BAR1 size.
+ * Valid range is [6U, 16U], which translate to [64MB, 64GB] size.
+ * Valid range check is done on input value and returns -BPMP_EINVAL on failure.
+ */
+ uint8_t bar1_size;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for #MRQ_PCIE sub-command #CMD_PCIE_EP_CONTROLLER_OFF.
+ */
+struct cmd_pcie_ep_controller_off_request {
+ /** @brief EP controller number, T264 valid: 2, 4, 5. */
+ uint8_t ep_controller;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for #MRQ_PCIE sub-command #CMD_PCIE_RP_CONTROLLER_OFF.
+ */
+struct cmd_pcie_rp_controller_off_request {
+ /** @brief RP controller number, T264 valid: 1-5 */
+ uint8_t rp_controller;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup PCIE
+ * @brief Request payload for the #MRQ_PCIE command.
+ *
+ * Used by the sender of an #MRQ_PCIE message to control PCIE.
+ * Below table shows sub-commands with their corresponding payload data.
+ *
+ * |sub-command |payload |
+ * |--------------------------------------|-----------------------------------------|
+ * |#CMD_PCIE_EP_CONTROLLER_INIT |#cmd_pcie_ep_controller_init_request |
+ * |#CMD_PCIE_EP_CONTROLLER_OFF |#cmd_pcie_ep_controller_off_request |
+ *
+ * @cond (!bpmp_safe)
+ *
+ * The following additional MRQs are supported on non-functional-safety
+ * builds:
+ * |sub-command |payload |
+ * |--------------------------------------|-----------------------------------------|
+ * |#CMD_PCIE_RP_CONTROLLER_OFF |#cmd_pcie_rp_controller_off_request |
+ *
+ * @endcond
+ *
+ */
+struct mrq_pcie_request {
+ /** @brief Sub-command ID from @ref mrq_pcie_cmd. */
+ uint32_t cmd;
+
+ union {
+ struct cmd_pcie_ep_controller_init_request ep_ctrlr_init;
+ struct cmd_pcie_ep_controller_off_request ep_ctrlr_off;
+ /** @cond (!bpmp_safe) */
+ struct cmd_pcie_rp_controller_off_request rp_ctrlr_off;
+ /** @endcond */
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} PCIE */
+/** @endcond */
+
+/** @cond (bpmp_t264)
+ * @ingroup MRQ_Codes
+ * @def MRQ_CR7
+ * @brief Perform a CR7 operation
+ *
+ * * Initiators: CPU_S
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_cr7_request
+ *
+ * @addtogroup CR7
+ * @{
+ */
+
+/**
+ * @brief Payload for #MRQ_CR7
+ * 2 fields for future parameters are provided. These must be 0 currently.
+ */
+struct cmd_cr7_request {
+ uint32_t fld0;
+ uint32_t fld1;
+} BPMP_ABI_PACKED;
+
+struct cmd_cr7_query_abi_request {
+ /** #MRQ_CR7 sub-command identifier from @ref mrq_cr7_cmd */
+ uint32_t type;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Sub-command identifiers for #MRQ_CR7.
+ */
+enum mrq_cr7_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified request
+ * type
+ *
+ * mrq_response:: err is 0 if the specified request is supported and
+ * -#BPMP_ENODEV otherwise
+ */
+ CMD_CR7_QUERY_ABI = 0,
+
+ /** @brief Enter CR7 state on the package BPMP-FW is running on. */
+ CMD_CR7_ENTRY = 1,
+ /** @brief Exit CR7 state on the package BPMP-FW is running on. */
+ CMD_CR7_EXIT = 2,
+
+ CMD_CR7_MAX,
+};
+
+/**
+ * @ingroup CR7
+ * @brief #MRQ_CR7 structure
+ *
+ * |Sub-command |Payload |
+ * |----------------------------|---------------------------|
+ * |#CMD_CR7_QUERY_ABI | #cmd_cr7_query_abi_request|
+ * |#CMD_CR7_ENTRY | #cmd_cr7_request |
+ * |#CMD_CR7_EXIT | #cmd_cr7_request |
+
+ */
+struct mrq_cr7_request {
+ /** @brief Sub-command ID from @ref mrq_cr7_cmd. */
+ uint32_t cmd;
+ union {
+ struct cmd_cr7_query_abi_request query_abi;
+ struct cmd_cr7_request cr7_request;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} CR7 */
+/** @endcond */
+
+/** @cond (bpmp_tb500)
+ * @ingroup MRQ_Codes
+ * @def MRQ_HWPM
+ * @brief Configure and query HWPM functionality
+ *
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_hwpm_request
+ * * Response Payload: @ref mrq_hwpm_response
+ *
+ * @addtogroup HWPM
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_HWPM.
+ */
+enum mrq_hwpm_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_HWPM sub-command.
+ *
+ * mrq_response:err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_HWPM_QUERY_ABI = 1,
+
+ /**
+ * @brief Configure IPMU triggers
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_IPMU_SET_TRIGGERS = 2,
+
+ /**
+ * @brief Configure IPMU payloads and shifts
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_IPMU_SET_PAYLOADS_SHIFTS = 3,
+
+ /**
+ * @brief Get maximum number of payloads
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ */
+ CMD_HWPM_IPMU_GET_MAX_PAYLOADS = 4,
+
+ /**
+ * @brief Configure NVTHERM sample rate
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_NVTHERM_SET_SAMPLE_RATE = 5,
+
+ /**
+ * @brief Set NVTHERM bubble interval
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_NVTHERM_SET_BUBBLE_INTERVAL = 6,
+
+ /**
+ * @brief Configure NVTHERM DG flexible channels
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_NVTHERM_SET_FLEX_CHANNELS = 7,
+
+ /**
+ * @brief Get ISENSE sensor name
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_ISENSE_GET_SENSOR_NAME = 8,
+
+ /**
+ * @brief Get ISENSE sensor channel
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_ISENSE_GET_SENSOR_CHANNEL = 9,
+
+ /**
+ * @brief Get ISENSE sensor scale factor
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_ISENSE_GET_SENSOR_SCALE_FACTOR = 10,
+
+ /**
+ * @brief Get ISENSE sensor offset
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ * | -#BPMP_ENODATA | No sensor offset. |
+ */
+ CMD_HWPM_ISENSE_GET_SENSOR_OFFSET = 11,
+
+ /**
+ * @brief Get ISENSE sum block name
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_ISENSE_GET_SUM_BLOCK_NAME = 12,
+
+ /**
+ * @brief Get ISENSE sum input sensor IDs
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_HWPM is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_HWPM_ISENSE_GET_SUM_BLOCK_INPUTS = 13,
+
+ /**
+ * @brief Largest supported #MRQ_HWPM sub-command identifier + 1
+ */
+ CMD_HWPM_MAX,
+};
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_QUERY_ABI
+ */
+struct cmd_hwpm_query_abi_req {
+ /** @brief Sub-command identifier from @ref mrq_hwpm_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Maximum array length for IPMU trigger bitmask
+ */
+#define HWPM_IPMU_TRIGGER_ARR_LEN 28U
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_IPMU_SET_TRIGGERS
+ */
+struct cmd_hwpm_ipmu_set_triggers_req {
+ /** @brief IPMU physical ID
+ *
+ * @note Valid range from [0, MAX_CPU_CORES), see @ref bpmp_hwpm_core_config
+ */
+ uint32_t ipmu_phys_id;
+ /** @brief Trigger bitmask, see @ref bpmp_ipmu_trigger_ids
+ *
+ * @note Setting a trigger bit will cause the associated trigger to
+ * generate an output packet from IPMU to the HWPM perfmux.
+ * @note Up to a maximum possible 896 triggers
+ */
+ uint32_t triggers[HWPM_IPMU_TRIGGER_ARR_LEN];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Array length for IPMU payload bitmask
+ */
+#define HWPM_IPMU_PAYLOAD_ARR_LEN 26U
+
+/**
+ * @brief Array length for IPMU payload shift bitmask
+ */
+#define HWPM_IPMU_SHIFT_ARR_LEN 2U
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_IPMU_SET_PAYLOADS_SHIFTS
+ */
+struct cmd_hwpm_ipmu_set_payloads_shifts_req {
+ /** @brief IPMU physical ID
+ *
+ * @note Valid range from [0, MAX_CPU_CORES), see @ref bpmp_hwpm_core_config
+ */
+ uint32_t ipmu_phys_id;
+ /** @brief Payload bitmask, see @ref bpmp_ipmu_payload_ids
+ *
+ * @note Setting a payload bit will add the associated payload to the
+ * IPMU output packet.
+ * @note The maximum number of payloads is platform dependent,
+ * @see #CMD_HWPM_IPMU_GET_MAX_PAYLOADS
+ * @note To disable IPMU streaming on this instance, set all payload bits to 0.
+ * @note Up to a maximum of 832 available payloads
+ */
+ uint32_t payloads[HWPM_IPMU_PAYLOAD_ARR_LEN];
+ /**
+ * @brief Payload shift mask
+ *
+ * @note Setting the i-th shift bit will right-shift the
+ * i-th enabled payload by 1 bit.
+ * @note Up to a maximum of 64 simultaneous emitted payloads
+ */
+ uint32_t shifts[HWPM_IPMU_SHIFT_ARR_LEN];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_IPMU_GET_MAX_PAYLOADS
+ */
+struct cmd_hwpm_ipmu_get_max_payloads_req {
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_NVTHERM_SET_SAMPLE_RATE
+ */
+struct cmd_hwpm_nvtherm_set_sample_rate_req {
+ /** @brief Sample rate in microseconds
+ *
+ * @note Requesting a sample rate of 0 will disable NVTHERM streaming.
+ */
+ uint32_t sample_rate;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_NVTHERM_SET_BUBBLE_INTERVAL
+ */
+struct cmd_hwpm_nvtherm_set_bubble_interval_req {
+ /** @brief Bubble interval in microseconds */
+ uint32_t bubble_interval;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Maximum array length for NVTHERM flexible channel bitmask
+ */
+#define HWPM_NVTHERM_FLEX_CHANNEL_ARR_LEN 29U
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_NVTHERM_SET_FLEX_CHANNELS
+ */
+struct cmd_hwpm_nvtherm_set_flex_channels_req {
+ /** @brief NVTHERM flexible channel bitmask
+ *
+ * @see #bpmp_nvtherm_flex_channel_ids
+ *
+ * @note Up to a maximum of 928 flexible channels
+ */
+ uint32_t channels[HWPM_NVTHERM_FLEX_CHANNEL_ARR_LEN];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_NAME
+ */
+struct cmd_hwpm_isense_get_sensor_name_req {
+ /** @brief Sensor ID from @ref bpmp_isense_sensor_ids */
+ uint32_t sensor_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_CHANNEL
+ */
+struct cmd_hwpm_isense_get_sensor_channel_req {
+ /** @brief Sensor ID from @ref bpmp_isense_sensor_ids */
+ uint32_t sensor_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_SCALE_FACTOR
+ */
+struct cmd_hwpm_isense_get_sensor_scale_factor_req {
+ /** @brief Sensor ID from @ref bpmp_isense_sensor_ids */
+ uint32_t sensor_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_OFFSET
+ */
+struct cmd_hwpm_isense_get_sensor_offset_req {
+ /** @brief Sensor ID from @ref bpmp_isense_sensor_ids */
+ uint32_t sensor_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SUM_BLOCK_NAME
+ */
+struct cmd_hwpm_isense_get_sum_block_name_req {
+ /** @brief Sum block index */
+ uint32_t sum_block_index;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SUM_BLOCK_INPUTS
+ */
+struct cmd_hwpm_isense_get_sum_block_inputs_req {
+ /** @brief Sum block index */
+ uint32_t sum_block_index;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_IPMU_GET_MAX_PAYLOADS
+ */
+struct cmd_hwpm_ipmu_get_max_payloads_resp {
+ /** @brief Maximum number of payloads */
+ uint32_t max_payloads;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Maximum array length for ISENSE sensor name
+ */
+#define HWPM_ISENSE_SENSOR_MAX_NAME_LEN 64U
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_NAME
+ */
+struct cmd_hwpm_isense_get_sensor_name_resp {
+ /** @brief Sensor name */
+ char sensor_name[HWPM_ISENSE_SENSOR_MAX_NAME_LEN];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_CHANNEL
+ */
+struct cmd_hwpm_isense_get_sensor_channel_resp {
+ /** @brief Physical channel index */
+ uint32_t channel_index;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_SCALE_FACTOR
+ */
+struct cmd_hwpm_isense_get_sensor_scale_factor_resp {
+ /** @brief Scale factor */
+ float scale_factor;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SENSOR_OFFSET
+ */
+struct cmd_hwpm_isense_get_sensor_offset_resp {
+ /** @brief Offset sensor ID */
+ uint32_t offset_sensor_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Maximum array length for ISENSE sum name
+ */
+#define HWPM_ISENSE_SUM_BLOCK_MAX_NAME_LEN 64U
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SUM_BLOCK_NAME
+ */
+struct cmd_hwpm_isense_get_sum_block_name_resp {
+ /** @brief Sum block name */
+ char sum_block_name[HWPM_ISENSE_SUM_BLOCK_MAX_NAME_LEN];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Maximum array length for ISENSE sum block input sensor IDs
+ */
+#define HWPM_ISENSE_SUM_BLOCK_INPUTS_MAX 16U
+
+/**
+ * @brief Response data for #MRQ_HWPM sub-command #CMD_HWPM_ISENSE_GET_SUM_BLOCK_INPUTS
+ */
+struct cmd_hwpm_isense_get_sum_block_inputs_resp {
+ /** @brief Input channel indices; negative if no input is applied */
+ int32_t input_channel_idx[HWPM_ISENSE_SUM_BLOCK_INPUTS_MAX];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_HWPM -command
+ *
+ * | Sub-command | Request payload |
+ * | ---------------------------------------- | -------------------------------------------- |
+ * | #CMD_HWPM_QUERY_ABI | #cmd_hwpm_query_abi_req |
+ * | #CMD_HWPM_IPMU_SET_TRIGGERS | #cmd_hwpm_ipmu_set_triggers_req |
+ * | #CMD_HWPM_IPMU_SET_PAYLOADS_SHIFTS | #cmd_hwpm_ipmu_set_payloads_shifts_req |
+ * | #CMD_HWPM_IPMU_GET_MAX_PAYLOADS | #cmd_hwpm_ipmu_get_max_payloads_req |
+ * | #CMD_HWPM_NVTHERM_SET_SAMPLE_RATE | #cmd_hwpm_nvtherm_set_sample_rate_req |
+ * | #CMD_HWPM_NVTHERM_SET_BUBBLE_INTERVAL | #cmd_hwpm_nvtherm_set_bubble_interval_req |
+ * | #CMD_HWPM_NVTHERM_SET_FLEX_CHANNELS | #cmd_hwpm_nvtherm_set_flex_channels_req |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_CHANNEL | #cmd_hwpm_isense_get_sensor_channel_req |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_SCALE_FACTOR | #cmd_hwpm_isense_get_sensor_scale_factor_req |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_OFFSET | #cmd_hwpm_isense_get_sensor_offset_req |
+ * | #CMD_HWPM_ISENSE_GET_SUM_BLOCK_NAME | #cmd_hwpm_isense_get_sum_block_name_req |
+ * | #CMD_HWPM_ISENSE_GET_SUM_BLOCK_INPUTS | #cmd_hwpm_isense_get_sum_block_inputs_req |
+ */
+struct mrq_hwpm_request {
+ uint32_t cmd;
+ union {
+ struct cmd_hwpm_query_abi_req query_abi;
+ struct cmd_hwpm_ipmu_set_triggers_req ipmu_set_triggers;
+ struct cmd_hwpm_ipmu_set_payloads_shifts_req ipmu_set_payloads_shifts;
+ struct cmd_hwpm_ipmu_get_max_payloads_req ipmu_get_max_payloads;
+ struct cmd_hwpm_nvtherm_set_sample_rate_req nvtherm_set_sample_rate;
+ struct cmd_hwpm_nvtherm_set_bubble_interval_req nvtherm_set_bubble_interval;
+ struct cmd_hwpm_nvtherm_set_flex_channels_req nvtherm_set_flex_channels;
+ struct cmd_hwpm_isense_get_sensor_name_req isense_get_sensor_name;
+ struct cmd_hwpm_isense_get_sensor_channel_req isense_get_sensor_channel;
+ struct cmd_hwpm_isense_get_sensor_scale_factor_req isense_get_sensor_scale_factor;
+ struct cmd_hwpm_isense_get_sensor_offset_req isense_get_sensor_offset;
+ struct cmd_hwpm_isense_get_sum_block_name_req isense_get_sum_block_name;
+ struct cmd_hwpm_isense_get_sum_block_inputs_req isense_get_sum_block_inputs;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for the #MRQ_HWPM -command
+ *
+ * | Sub-command | Response payload |
+ * | ---------------------------------------- | --------------------------------------------- |
+ * | #CMD_HWPM_QUERY_ABI | - |
+ * | #CMD_HWPM_IPMU_SET_TRIGGERS | - |
+ * | #CMD_HWPM_IPMU_SET_PAYLOADS_SHIFTS | - |
+ * | #CMD_HWPM_IPMU_GET_MAX_PAYLOADS | #cmd_hwpm_ipmu_get_max_payloads_resp |
+ * | #CMD_HWPM_NVTHERM_SET_SAMPLE_RATE | - |
+ * | #CMD_HWPM_NVTHERM_SET_BUBBLE_INTERVAL | - |
+ * | #CMD_HWPM_NVTHERM_SET_FLEX_CHANNELS | - |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_NAME | #cmd_hwpm_isense_get_sensor_name_resp |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_CHANNEL | #cmd_hwpm_isense_get_sensor_channel_resp |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_SCALE_FACTOR | #cmd_hwpm_isense_get_sensor_scale_factor_resp |
+ * | #CMD_HWPM_ISENSE_GET_SENSOR_OFFSET | #cmd_hwpm_isense_get_sensor_offset_resp |
+ * | #CMD_HWPM_ISENSE_GET_SUM_BLOCK_NAME | #cmd_hwpm_isense_get_sum_block_name_resp |
+ * | #CMD_HWPM_ISENSE_GET_SUM_BLOCK_INPUTS | #cmd_hwpm_isense_get_sum_block_inputs_resp |
+ */
+struct mrq_hwpm_response {
+ uint32_t err;
+ union {
+ struct cmd_hwpm_ipmu_get_max_payloads_resp ipmu_get_max_payloads;
+ struct cmd_hwpm_isense_get_sensor_name_resp isense_get_sensor_name;
+ struct cmd_hwpm_isense_get_sensor_channel_resp isense_get_sensor_channel;
+ struct cmd_hwpm_isense_get_sensor_scale_factor_resp isense_get_sensor_scale_factor;
+ struct cmd_hwpm_isense_get_sensor_offset_resp isense_get_sensor_offset;
+ struct cmd_hwpm_isense_get_sum_block_name_resp isense_get_sum_block_name;
+ struct cmd_hwpm_isense_get_sum_block_inputs_resp isense_get_sum_block_inputs;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} HWPM */
+/** @endcond */
+
+/** @cond (bpmp_tb500)
+ * @ingroup MRQ_Codes
+ * @def MRQ_DVFS
+ * @brief Configure DVFS functionality
+ *
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_dvfs_request
+ *
+ * @addtogroup DVFS
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_DVFS.
+ */
+enum mrq_dvfs_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_DVFS sub-command.
+ *
+ * mrq_response:err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_DVFS_QUERY_ABI = 1,
+
+ /**
+ * @brief Configure DVFS controller
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_DVFS is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_DVFS_SET_CTRL_STATE = 2,
+
+ /**
+ * @brief Configure DVFS manager
+ *
+ * mrq_response:err is defined as:
+ *
+ * | Value | Description |
+ * | -------------- | ------------------------------------------- |
+ * | 0 | Success |
+ * | -#BPMP_ENODEV | #MRQ_DVFS is not supported by BPMP-FW. |
+ * | -#BPMP_ENOTSUP | Subcommand is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_DVFS_SET_MGR_STATE = 3,
+
+ /**
+ * @brief Largest supported #MRQ_DVFS sub-command identifier + 1
+ */
+ CMD_DVFS_MAX,
+};
+
+/**
+ * @brief Request data for #MRQ_DVFS sub-command #CMD_DVFS_QUERY_ABI
+ */
+struct cmd_dvfs_query_abi_req {
+ /** @brief Sub-command identifier from @ref mrq_dvfs_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+struct cmd_dvfs_set_ctrl_state_req {
+ /** @brief Controller ID from @ref bpmp_dvfs_ctrl_ids */
+ uint32_t ctrl_id;
+ /** @brief Controller enable state */
+ uint32_t enable;
+} BPMP_ABI_PACKED;
+
+struct cmd_dvfs_set_mgr_state_req {
+ /** @brief Manager ID from @ref bpmp_dvfs_mgr_ids */
+ uint32_t mgr_id;
+ /** @brief Manager enable state */
+ uint32_t enable;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_DVFS -command
+ *
+ * | Sub-command | Request payload |
+ * | ---------------------------------------- | -------------------------------------------- |
+ * | #CMD_DVFS_QUERY_ABI | #cmd_dvfs_query_abi_req |
+ * | #CMD_DVFS_SET_CTRL_STATE | #cmd_dvfs_set_ctrl_state_req |
+ * | #CMD_DVFS_SET_MGR_STATE | #cmd_dvfs_set_mgr_state_req |
+ */
+struct mrq_dvfs_request {
+ uint32_t cmd;
+ union {
+ struct cmd_dvfs_query_abi_req query_abi;
+ struct cmd_dvfs_set_ctrl_state_req set_ctrl_state;
+ struct cmd_dvfs_set_mgr_state_req set_mgr_state;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} DVFS */
+/** @endcond */
+
+/** @cond (bpmp_tb500)
+ * @ingroup MRQ_Codes
+ * @def MRQ_PPP_PROFILE
+ * @brief Get power/performance profile configuration settings.
+ *
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_ppp_profile_request
+ * * Response Payload: @ref mrq_ppp_profile_response
+ *
+ * @addtogroup PPP
+ * @{
+ */
+
+/**
+ * @brief Sub-command identifiers for #MRQ_PPP_PROFILE.
+ */
+enum mrq_ppp_profile_cmd {
+ /**
+ * @brief Check whether the BPMP-FW supports the specified
+ * #MRQ_PPP_PROFILE sub-command.
+ *
+ * mrq_ppp_profile_response:err is 0 if the specified request is
+ * supported and -#BPMP_ENOTSUP otherwise.
+ */
+
+ CMD_PPP_PROFILE_QUERY_ABI = 0,
+ /**
+ * @brief Query the BPMP for the CPU core and SLC slice configuration associated
+ * with a given Power/Performance Profile (PPP).
+ *
+ * mrq_ppp_profile_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENOTSUP | #MRQ_PPP_PROFILE is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_PPP_PROFILE_QUERY_MASKS = 1,
+ /**
+ * @brief Query BPMP for the CPU mask corresponding to a requested
+ * number of active CPU cores.
+ *
+ * mrq_ppp_profile_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENOTSUP | #MRQ_PPP_PROFILE is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_PPP_CORE_QUERY_CPU_MASK = 2,
+ /**
+ * @brief Query BPMP-FW for the currently available Power/Performance Profiles.
+ *
+ * mrq_ppp_profile_response:err is defined as:
+ *
+ * | Value | Description |
+ * |----------------|------------------------------------------------|
+ * | 0 | Success |
+ * | -#BPMP_ENOTSUP | #MRQ_PPP_PROFILE is not supported by BPMP-FW. |
+ * | -#BPMP_EINVAL | Invalid request parameters. |
+ */
+ CMD_PPP_AVAILABLE_QUERY = 3,
+};
+
+/**
+ * @brief Request data for #MRQ_PPP_PROFILE sub-command
+ * #CMD_PPP_PROFILE_QUERY_ABI
+ */
+struct cmd_ppp_profile_query_abi_req {
+ /** @brief Sub-command identifier from @ref mrq_ppp_profile_cmd */
+ uint32_t cmd_code;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PPP_PROFILE sub-command
+ * #CMD_PPP_AVAILABLE_QUERY
+ */
+struct cmd_ppp_available_query_resp {
+ /**
+ * @brief Bitmask of available profiles.
+ * Bit N = 1 ⇒ profile N is available
+ */
+ uint32_t avail_ppp_mask;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PPP_PROFILE sub-command
+ * #CMD_PPP_PROFILE_QUERY_MASKS
+ */
+struct cmd_ppp_profile_query_masks_req {
+ /** @brief power/perf profile identifier */
+ uint32_t profile_id;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for #MRQ_PPP_PROFILE sub-command
+ * #CMD_PPP_PROFILE_QUERY_MASKS
+ */
+struct cmd_ppp_profile_query_masks_resp {
+ /** @brief Enabled cores in this profile */
+ uint32_t num_active_cores;
+ /** @brief Enabled SLC slices in this profile */
+ uint32_t num_active_slcs;
+ /** @brief Number of valid words in active_core_masks array */
+ uint32_t max_num_core_words;
+ /** @brief Number of valid words in active_slc_masks array */
+ uint32_t max_num_slc_words;
+ /** @brief Enabled cores bit mask (bit N = 1 => core N enabled) */
+ uint32_t active_core_masks[8];
+ /** @brief Enabled SLC slices bit mask (bit N = 1 => SLC slice N enabled) */
+ uint32_t active_slc_masks[8];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request data for #MRQ_PPP_PROFILE sub-command
+ * #CMD_PPP_CORE_QUERY_CPU_MASK
+ */
+struct cmd_ppp_core_query_cpu_mask_req {
+ /** @brief Requested number of active cores */
+ uint32_t num_cores;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response data for #MRQ_PPP_PROFILE sub-command
+ * #CMD_PPP_CORE_QUERY_CPU_MASK
+ */
+struct cmd_ppp_core_query_cpu_mask_resp {
+ /** @brief Number of valid words in active_core_masks array */
+ uint32_t max_num_words;
+ /** @brief Enabled CPU core bitmask (bit N = 1 ⇒ core N enabled) */
+ uint32_t active_core_masks[8];
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Request payload for the #MRQ_PPP_PROFILE -command
+ *
+ * | Sub-command | Request payload |
+ * |-------------------------------|----------------------------------------|
+ * | #CMD_PPP_PROFILE_QUERY_ABI | #cmd_ppp_profile_query_abi_req |
+ * | #CMD_PPP_PROFILE_QUERY_MASKS | #cmd_ppp_profile_query_masks_req |
+ * | #CMD_PPP_CORE_QUERY_CPU_MASK | #cmd_ppp_core_query_cpu_mask_req |
+ * | #CMD_PPP_AVAILABLE_QUERY | - |
+ */
+struct mrq_ppp_profile_request {
+ /** @brief Sub-command ID from @ref mrq_ppp_profile_cmd. */
+ uint32_t cmd;
+ union {
+ struct cmd_ppp_profile_query_abi_req query_abi;
+ struct cmd_ppp_profile_query_masks_req ppp_profile_masks_req;
+ struct cmd_ppp_core_query_cpu_mask_req ppp_core_mask_req;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @brief Response payload for the #MRQ_PPP_PROFILE -command.
+ *
+ * | Sub-command | Response payload |
+ * |-------------------------------|----------------------------------------|
+ * | #CMD_PPP_PROFILE_QUERY_ABI | - |
+ * | #CMD_PPP_PROFILE_QUERY_MASKS | #cmd_ppp_profile_query_masks_resp |
+ * | #CMD_PPP_CORE_QUERY_CPU_MASK | #cmd_ppp_core_query_cpu_mask_resp |
+ * | #CMD_PPP_AVAILABLE_QUERY | #cmd_ppp_available_query_resp |
+ */
+struct mrq_ppp_profile_response {
+ uint32_t err;
+ union {
+ struct cmd_ppp_profile_query_masks_resp ppp_profile_masks_resp;
+ struct cmd_ppp_core_query_cpu_mask_resp ppp_core_mask_resp;
+ struct cmd_ppp_available_query_resp ppp_avail_query_resp;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/** @} PPP */
+/** @endcond */
/**
* @addtogroup Error_Codes
@@ -3953,6 +6728,8 @@ struct mrq_gears_response {
#define BPMP_ENOSYS 38
/** @brief Invalid slot */
#define BPMP_EBADSLT 57
+/** @brief No data */
+#define BPMP_ENODATA 61
/** @brief Invalid message */
#define BPMP_EBADMSG 77
/** @brief Operation not supported */
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index f5e4ac5b8cce..a33582590a3b 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -127,6 +127,7 @@ struct tegra_bpmp_message {
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
struct tegra_bpmp *tegra_bpmp_get(struct device *dev);
+struct tegra_bpmp *tegra_bpmp_get_with_id(struct device *dev, unsigned int *id);
void tegra_bpmp_put(struct tegra_bpmp *bpmp);
int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
struct tegra_bpmp_message *msg);
@@ -143,21 +144,31 @@ bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq);
#else
static inline struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-ENODEV);
}
+
+static inline struct tegra_bpmp *tegra_bpmp_get_with_id(struct device *dev,
+ unsigned int *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void tegra_bpmp_put(struct tegra_bpmp *bpmp)
{
}
+
static inline int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
struct tegra_bpmp_message *msg)
{
- return -ENOTSUPP;
+ return -ENODEV;
}
+
static inline int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
struct tegra_bpmp_message *msg)
{
- return -ENOTSUPP;
+ return -ENODEV;
}
+
static inline void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
int code, const void *data,
size_t size)
@@ -169,8 +180,9 @@ static inline int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp,
tegra_bpmp_mrq_handler_t handler,
void *data)
{
- return -ENOTSUPP;
+ return -ENODEV;
}
+
static inline void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp,
unsigned int mrq, void *data)
{
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 6ee4c59db620..e6da035d1306 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (C) 2014-2026 NVIDIA Corporation
*/
#ifndef __SOC_TEGRA_MC_H__
@@ -10,10 +10,11 @@
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/interconnect-provider.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/reset-controller.h>
-#include <linux/types.h>
#include <linux/tegra-icc.h>
+#include <linux/types.h>
struct clk;
struct device;
@@ -164,10 +165,31 @@ struct tegra_mc_ops {
int (*probe)(struct tegra_mc *mc);
void (*remove)(struct tegra_mc *mc);
int (*resume)(struct tegra_mc *mc);
- irqreturn_t (*handle_irq)(int irq, void *data);
int (*probe_device)(struct tegra_mc *mc, struct device *dev);
};
+struct tegra_mc_regs {
+ unsigned int cfg_channel_enable;
+ unsigned int err_status;
+ unsigned int err_add;
+ unsigned int err_add_hi;
+ unsigned int err_vpr_status;
+ unsigned int err_vpr_add;
+ unsigned int err_sec_status;
+ unsigned int err_sec_add;
+ unsigned int err_mts_status;
+ unsigned int err_mts_add;
+ unsigned int err_gen_co_status;
+ unsigned int err_gen_co_add;
+ unsigned int err_route_status;
+ unsigned int err_route_add;
+};
+
+struct tegra_mc_intmask {
+ u32 reg;
+ u32 mask;
+};
+
struct tegra_mc_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -185,7 +207,6 @@ struct tegra_mc_soc {
const struct tegra_smmu_soc *smmu;
- u32 intmask;
u32 ch_intmask;
u32 global_intstatus_channel_shift;
bool has_addr_hi_reg;
@@ -196,6 +217,14 @@ struct tegra_mc_soc {
const struct tegra_mc_icc_ops *icc_ops;
const struct tegra_mc_ops *ops;
+ const struct tegra_mc_regs *regs;
+
+ const irq_handler_t *handle_irq;
+ unsigned int num_interrupts;
+ unsigned int mc_addr_hi_mask;
+ unsigned int mc_err_status_type_mask;
+ const struct tegra_mc_intmask *intmasks;
+ unsigned int num_intmasks;
};
struct tegra_mc {
@@ -206,7 +235,6 @@ struct tegra_mc {
void __iomem *bcast_ch_regs;
void __iomem **ch_regs;
struct clk *clk;
- int irq;
const struct tegra_mc_soc *soc;
unsigned long tick;
@@ -256,4 +284,6 @@ tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
}
#endif
+extern const struct tegra_mc_regs tegra20_mc_regs;
+
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/sound/core.h b/include/sound/core.h
index 64327e971122..4093ec82a0a1 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -133,6 +133,9 @@ struct snd_card {
#ifdef CONFIG_SND_DEBUG
struct dentry *debugfs_root; /* debugfs root for card */
#endif
+#ifdef CONFIG_SND_CTL_DEBUG
+ struct snd_ctl_elem_value *value_buf; /* buffer for kctl->put() verification */
+#endif
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 28f9f5940ab6..c3b10587cb4c 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -32,9 +32,6 @@ struct snd_ctl_elem_value;
#define CS35L56_UPDATE_REGS 0x0002A0C
#define CS35L56_REFCLK_INPUT 0x0002C04
#define CS35L56_GLOBAL_SAMPLE_RATE 0x0002C0C
-#define CS35L56_OTP_MEM_53 0x00300D4
-#define CS35L56_OTP_MEM_54 0x00300D8
-#define CS35L56_OTP_MEM_55 0x00300DC
#define CS35L56_ASP1_ENABLES1 0x0004800
#define CS35L56_ASP1_CONTROL1 0x0004804
#define CS35L56_ASP1_CONTROL2 0x0004808
@@ -86,6 +83,9 @@ struct snd_ctl_elem_value;
#define CS35L56_DIE_STS1 0x0017040
#define CS35L56_DIE_STS2 0x0017044
#define CS35L56_DSP_RESTRICT_STS1 0x00190F0
+#define CS35L56_OTP_MEM_53 0x00300D4
+#define CS35L56_OTP_MEM_54 0x00300D8
+#define CS35L56_OTP_MEM_55 0x00300DC
#define CS35L56_DSP1_XMEM_PACKED_0 0x2000000
#define CS35L56_DSP1_XMEM_PACKED_6143 0x2005FFC
#define CS35L56_DSP1_XMEM_UNPACKED32_0 0x2400000
@@ -435,6 +435,7 @@ ssize_t cs35l56_cal_data_debugfs_read(struct cs35l56_base *cs35l56_base,
ssize_t cs35l56_cal_data_debugfs_write(struct cs35l56_base *cs35l56_base,
const char __user *from, size_t count,
loff_t *ppos);
+int cs35l56_factory_calibrate(struct cs35l56_base *cs35l56_base);
void cs35l56_create_cal_debugfs(struct cs35l56_base *cs35l56_base,
const struct cs35l56_cal_debugfs_fops *fops);
void cs35l56_remove_cal_debugfs(struct cs35l56_base *cs35l56_base);
diff --git a/include/sound/gus.h b/include/sound/gus.h
index 321ae93625eb..3feb42627de1 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -536,6 +536,7 @@ int snd_gf1_dma_transfer_block(struct snd_gus_card * gus,
struct snd_gf1_dma_block * block,
int atomic,
int synth);
+void snd_gf1_dma_suspend(struct snd_gus_card *gus);
/* gus_volume.c */
@@ -552,6 +553,8 @@ struct snd_gus_voice *snd_gf1_alloc_voice(struct snd_gus_card * gus, int type, i
void snd_gf1_free_voice(struct snd_gus_card * gus, struct snd_gus_voice *voice);
int snd_gf1_start(struct snd_gus_card * gus);
int snd_gf1_stop(struct snd_gus_card * gus);
+int snd_gf1_suspend(struct snd_gus_card *gus);
+int snd_gf1_resume(struct snd_gus_card *gus);
/* gus_mixer.c */
@@ -572,6 +575,8 @@ int snd_gus_create(struct snd_card *card,
int effect,
struct snd_gus_card ** rgus);
int snd_gus_initialize(struct snd_gus_card * gus);
+int snd_gus_suspend(struct snd_gus_card *gus);
+int snd_gus_resume(struct snd_gus_card *gus);
/* gus_irq.c */
@@ -583,6 +588,8 @@ void snd_gus_irq_profile_init(struct snd_gus_card *gus);
/* gus_uart.c */
int snd_gf1_rawmidi_new(struct snd_gus_card *gus, int device);
+void snd_gf1_uart_suspend(struct snd_gus_card *gus);
+void snd_gf1_uart_resume(struct snd_gus_card *gus);
/* gus_dram.c */
int snd_gus_dram_write(struct snd_gus_card *gus, char __user *ptr,
@@ -593,5 +600,6 @@ int snd_gus_dram_read(struct snd_gus_card *gus, char __user *ptr,
/* gus_timer.c */
void snd_gf1_timers_init(struct snd_gus_card *gus);
void snd_gf1_timers_done(struct snd_gus_card *gus);
+void snd_gf1_timers_resume(struct snd_gus_card *gus);
#endif /* __SOUND_GUS_H */
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 5d9f0ef228af..24581080e26a 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -336,6 +336,17 @@ snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int flags,
return snd_hdac_codec_write(&codec->core, nid, flags, verb, parm);
}
+/* sync after write */
+static inline int
+snd_hda_codec_write_sync(struct hda_codec *codec, hda_nid_t nid, int flags,
+ unsigned int verb, unsigned int parm)
+{
+ /* use snd_hda_codec_read() for writing;
+ * the returned value is usually discarded
+ */
+ return snd_hdac_codec_read(&codec->core, nid, flags, verb, parm);
+}
+
#define snd_hda_param_read(codec, nid, param) \
snd_hdac_read_parm(&(codec)->core, nid, param)
#define snd_hda_get_sub_nodes(codec, nid, start_nid) \
@@ -470,6 +481,10 @@ void snd_hda_unlock_devices(struct hda_bus *bus);
void snd_hda_bus_reset(struct hda_bus *bus);
void snd_hda_bus_reset_codecs(struct hda_bus *bus);
+void snd_hda_codec_set_gpio(struct hda_codec *codec, unsigned int mask,
+ unsigned int dir, unsigned int data,
+ unsigned int delay);
+
int snd_hda_codec_set_name(struct hda_codec *codec, const char *name);
/*
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h
index 006d358acce2..6066954409aa 100644
--- a/include/sound/hda_verbs.h
+++ b/include/sound/hda_verbs.h
@@ -56,7 +56,12 @@ enum {
#define AC_VERB_GET_DIGI_CONVERT_1 0x0f0d
#define AC_VERB_GET_DIGI_CONVERT_2 0x0f0e /* unused */
#define AC_VERB_GET_VOLUME_KNOB_CONTROL 0x0f0f
-/* f10-f1a: GPIO */
+/* f10-f1a: GPI/GPO/GPIO */
+#define AC_VERB_GET_GPI_DATA 0x0f10
+#define AC_VERB_GET_GPI_WAKE_MASK 0x0f11
+#define AC_VERB_GET_GPI_UNSOLICITED_RSP_MASK 0x0f12
+#define AC_VERB_GET_GPI_STICKY_MASK 0x0f13
+#define AC_VERB_GET_GPO_DATA 0x0f14
#define AC_VERB_GET_GPIO_DATA 0x0f15
#define AC_VERB_GET_GPIO_MASK 0x0f16
#define AC_VERB_GET_GPIO_DIRECTION 0x0f17
@@ -99,6 +104,11 @@ enum {
#define AC_VERB_SET_DIGI_CONVERT_2 0x70e
#define AC_VERB_SET_DIGI_CONVERT_3 0x73e
#define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f
+#define AC_VERB_SET_GPI_DATA 0x710
+#define AC_VERB_SET_GPI_WAKE_MASK 0x711
+#define AC_VERB_SET_SPI_UNSOLICITED_RSP_MASK 0x712
+#define AC_VERB_SET_GPI_STICKY_MASK 0x713
+#define AC_VERB_SET_GPO_DATA 0x714
#define AC_VERB_SET_GPIO_DATA 0x715
#define AC_VERB_SET_GPIO_MASK 0x716
#define AC_VERB_SET_GPIO_DIRECTION 0x717
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index a7860c047503..76fc33dce537 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -729,6 +729,10 @@ static inline void __snd_pcm_set_state(struct snd_pcm_runtime *runtime,
runtime->status->state = state; /* copy for mmap */
}
+void snd_pcm_set_state(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state);
+snd_pcm_state_t snd_pcm_get_state(struct snd_pcm_substream *substream);
+
/**
* bytes_to_samples - Unit conversion of the size from bytes to samples
* @runtime: PCM runtime instance
diff --git a/include/sound/sdca_asoc.h b/include/sound/sdca_asoc.h
index aa9124f93218..46a61a52decc 100644
--- a/include/sound/sdca_asoc.h
+++ b/include/sound/sdca_asoc.h
@@ -13,6 +13,8 @@
struct device;
struct regmap;
struct sdca_function_data;
+struct snd_ctl_elem_value;
+struct snd_kcontrol;
struct snd_kcontrol_new;
struct snd_pcm_hw_params;
struct snd_pcm_substream;
@@ -23,6 +25,42 @@ struct snd_soc_dai_ops;
struct snd_soc_dapm_route;
struct snd_soc_dapm_widget;
+/* convenient macro to handle the mono volume in 7.8 fixed format representation */
+#define SDCA_SINGLE_Q78_TLV(xname, xreg, xmin, xmax, xstep, tlv_array) \
+{ \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, \
+ .get = sdca_asoc_q78_get_volsw, \
+ .put = sdca_asoc_q78_put_volsw, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) { \
+ .reg = (xreg), .rreg = (xreg), \
+ .min = (xmin), .max = (xmax), \
+ .shift = (xstep), .rshift = (xstep), \
+ .sign_bit = 15 \
+ } \
+}
+
+/* convenient macro for stereo volume in 7.8 fixed format with separate registers for L/R */
+#define SDCA_DOUBLE_Q78_TLV(xname, xreg_l, xreg_r, xmin, xmax, xstep, tlv_array) \
+{ \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, \
+ .get = sdca_asoc_q78_get_volsw, \
+ .put = sdca_asoc_q78_put_volsw, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) { \
+ .reg = (xreg_l), .rreg = (xreg_r), \
+ .min = (xmin), .max = (xmax), \
+ .shift = (xstep), .rshift = (xstep), \
+ .sign_bit = 15 \
+ } \
+}
+
int sdca_asoc_count_component(struct device *dev, struct sdca_function_data *function,
int *num_widgets, int *num_routes, int *num_controls,
int *num_dais);
@@ -57,5 +95,8 @@ int sdca_asoc_hw_params(struct device *dev, struct regmap *regmap,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai);
-
+int sdca_asoc_q78_put_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int sdca_asoc_q78_get_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
#endif // __SDCA_ASOC_H__
diff --git a/include/sound/sdca_interrupts.h b/include/sound/sdca_interrupts.h
index 9bcb5d8fd592..a515cc3df097 100644
--- a/include/sound/sdca_interrupts.h
+++ b/include/sound/sdca_interrupts.h
@@ -69,6 +69,8 @@ struct sdca_interrupt_info {
int sdca_irq_request(struct device *dev, struct sdca_interrupt_info *interrupt_info,
int sdca_irq, const char *name, irq_handler_t handler,
void *data);
+void sdca_irq_free(struct device *dev, struct sdca_interrupt_info *interrupt_info,
+ int sdca_irq, const char *name, void *data);
int sdca_irq_data_populate(struct device *dev, struct regmap *function_regmap,
struct snd_soc_component *component,
struct sdca_function_data *function,
@@ -81,6 +83,9 @@ int sdca_irq_populate_early(struct device *dev, struct regmap *function_regmap,
int sdca_irq_populate(struct sdca_function_data *function,
struct snd_soc_component *component,
struct sdca_interrupt_info *info);
+void sdca_irq_cleanup(struct device *dev,
+ struct sdca_function_data *function,
+ struct sdca_interrupt_info *info);
struct sdca_interrupt_info *sdca_irq_allocate(struct device *dev,
struct regmap *regmap, int irq);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 69a9c9c4d0e9..915e6ae5f68d 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -54,6 +54,11 @@ struct prop_nums {
int platforms;
};
+enum simple_util_sysclk_order {
+ SIMPLE_SYSCLK_ORDER_CODEC_FIRST = 0,
+ SIMPLE_SYSCLK_ORDER_CPU_FIRST,
+};
+
struct simple_util_priv {
struct snd_soc_card snd_card;
struct simple_dai_props {
@@ -63,6 +68,7 @@ struct simple_util_priv {
struct snd_soc_codec_conf *codec_conf;
struct prop_nums num;
unsigned int mclk_fs;
+ enum simple_util_sysclk_order sysclk_order;
} *dai_props;
struct simple_util_jack hp_jack;
struct simple_util_jack mic_jack;
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 2a2b74b24a60..7e158d27ae8d 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -86,10 +86,10 @@ struct snd_soc_component_driver {
unsigned int reg, unsigned int val);
/* pcm creation and destruction */
- int (*pcm_construct)(struct snd_soc_component *component,
- struct snd_soc_pcm_runtime *rtd);
- void (*pcm_destruct)(struct snd_soc_component *component,
- struct snd_pcm *pcm);
+ int (*pcm_new)(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+ void (*pcm_free)(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
/* component wide operations */
int (*set_sysclk)(struct snd_soc_component *component,
@@ -224,7 +224,6 @@ struct snd_soc_component {
int num_dai;
struct regmap *regmap;
- int val_bytes;
struct mutex io_mutex;
@@ -327,7 +326,7 @@ int snd_soc_component_stream_event(struct snd_soc_component *component,
int snd_soc_component_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level);
-void snd_soc_component_setup_regmap(struct snd_soc_component *component);
+int snd_soc_component_regmap_val_bytes(struct snd_soc_component *component);
#ifdef CONFIG_REGMAP
void snd_soc_component_init_regmap(struct snd_soc_component *component,
struct regmap *regmap);
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 224396927aef..6a42812bba8c 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -53,6 +53,21 @@ struct snd_compr_stream;
#define SND_SOC_POSSIBLE_DAIFMT_PDM (1 << SND_SOC_DAI_FORMAT_PDM)
/*
+ * DAI TDM slot idle modes
+ *
+ * Describes a CODEC/CPU's behaviour when not actively receiving or
+ * transmitting on a given TDM slot. NONE is undefined behaviour.
+ * Add new modes to the end.
+ */
+#define SND_SOC_DAI_TDM_IDLE_NONE 0
+#define SND_SOC_DAI_TDM_IDLE_OFF 1
+#define SND_SOC_DAI_TDM_IDLE_ZERO 2
+#define SND_SOC_DAI_TDM_IDLE_PULLDOWN 3
+#define SND_SOC_DAI_TDM_IDLE_HIZ 4
+#define SND_SOC_DAI_TDM_IDLE_PULLUP 5
+#define SND_SOC_DAI_TDM_IDLE_DRIVE_HIGH 6
+
+/*
* DAI Clock gating.
*
* DAI bit clocks can be gated (disabled) when the DAI is not
@@ -181,6 +196,10 @@ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width);
+int snd_soc_dai_set_tdm_idle(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int tx_mode, int rx_mode);
+
int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, const unsigned int *tx_slot,
unsigned int rx_num, const unsigned int *rx_slot);
@@ -297,6 +316,9 @@ struct snd_soc_dai_ops {
int (*set_tdm_slot)(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width);
+ int (*set_tdm_idle)(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int tx_mode, int rx_mode);
int (*set_channel_map)(struct snd_soc_dai *dai,
unsigned int tx_num, const unsigned int *tx_slot,
unsigned int rx_num, const unsigned int *rx_slot);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 49f0fe05db01..4f8fb7622a13 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -424,6 +424,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
snd_soc_dapm_output, /* output pin */
snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */
+ snd_soc_dapm_mux_named_ctl, /* mux with named controls */
snd_soc_dapm_demux, /* connects the input to one of multiple outputs */
snd_soc_dapm_mixer, /* mixes several analog signals together */
snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 7d8376c8e1be..5e3eb617d832 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -311,6 +311,12 @@ struct platform_device;
.info = snd_soc_info_bool_ext, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = xdata }
+#define SOC_SINGLE_BOOL_EXT_ACC(xname, xdata, xhandler_get, xhandler_put, xaccess) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = xaccess, \
+ .info = snd_soc_info_bool_ext, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = xdata }
#define SOC_ENUM_EXT(xname, xenum, xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_enum_double, \
@@ -422,11 +428,6 @@ struct snd_soc_jack_pin;
#include <sound/soc-dpcm.h>
#include <sound/soc-topology.h>
-enum snd_soc_pcm_subclass {
- SND_SOC_PCM_CLASS_PCM = 0,
- SND_SOC_PCM_CLASS_BE = 1,
-};
-
int snd_soc_register_card(struct snd_soc_card *card);
void snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
@@ -465,6 +466,7 @@ struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
const char *driver_name);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name);
+struct snd_soc_component *snd_soc_lookup_component_by_name(const char *component_name);
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd);
#ifdef CONFIG_SND_SOC_COMPRESS
@@ -999,7 +1001,6 @@ struct snd_soc_card {
/* Mutex for PCM operations */
struct mutex pcm_mutex;
- enum snd_soc_pcm_subclass pcm_subclass;
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
@@ -1026,8 +1027,6 @@ struct snd_soc_card {
void (*remove_dai_link)(struct snd_soc_card *,
struct snd_soc_dai_link *link);
- long pmdown_time;
-
/* CPU <--> Codec DAI links */
struct snd_soc_dai_link *dai_link; /* predefined links only */
int num_links; /* predefined links only */
@@ -1072,9 +1071,6 @@ struct snd_soc_card {
struct list_head dapm_list;
struct list_head dapm_dirty;
- /* attached dynamic objects */
- struct list_head dobj_list;
-
/* Generic DAPM context for the card */
struct snd_soc_dapm_context *dapm;
struct snd_soc_dapm_stats dapm_stats;
@@ -1239,7 +1235,6 @@ struct soc_mixer_control {
unsigned int sign_bit;
unsigned int invert:1;
unsigned int autodisable:1;
- unsigned int sdca_q78:1;
#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
#endif
@@ -1340,15 +1335,6 @@ void snd_soc_of_parse_node_prefix(struct device_node *np,
struct snd_soc_codec_conf *codec_conf,
struct device_node *of_node,
const char *propname);
-static inline
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
- struct snd_soc_codec_conf *codec_conf,
- struct device_node *of_node,
- const char *propname)
-{
- snd_soc_of_parse_node_prefix(card->dev->of_node,
- codec_conf, of_node, propname);
-}
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
@@ -1412,6 +1398,9 @@ struct snd_soc_dai *snd_soc_find_dai(
struct snd_soc_dai *snd_soc_find_dai_with_mutex(
const struct snd_soc_dai_link_component *dlc);
+void soc_pcm_set_dai_params(struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *params);
+
#include <sound/soc-dai.h>
static inline
@@ -1517,7 +1506,7 @@ static inline void _snd_soc_dapm_mutex_assert_held_d(struct snd_soc_dapm_context
*/
static inline void _snd_soc_dpcm_mutex_lock_c(struct snd_soc_card *card)
{
- mutex_lock_nested(&card->pcm_mutex, card->pcm_subclass);
+ mutex_lock(&card->pcm_mutex);
}
static inline void _snd_soc_dpcm_mutex_unlock_c(struct snd_soc_card *card)
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index 227347c8f0b3..489083183673 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -71,6 +71,7 @@ struct asoc_sdw_aux_info {
};
struct asoc_sdw_codec_info {
+ const int vendor_id;
const int part_id;
const int version_id;
const char *name_prefix;
@@ -82,6 +83,8 @@ struct asoc_sdw_codec_info {
const int dai_num;
struct asoc_sdw_aux_info auxs[SOC_SDW_MAX_AUX_NUM];
const int aux_num;
+ /* Force AMP-style name_prefix handling (append AMP index) even if MIC/Jack DAIs exist */
+ const bool is_amp;
int (*codec_card_late_probe)(struct snd_soc_card *card);
@@ -259,6 +262,8 @@ int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_so
int asoc_sdw_cs42l43_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
int asoc_sdw_cs42l45_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
int asoc_sdw_cs42l45_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs47l47_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs47l47_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
int asoc_sdw_maxim_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
/* TI */
diff --git a/include/sound/tea6330t.h b/include/sound/tea6330t.h
index 1c77b78f6533..3a34033d2aa3 100644
--- a/include/sound/tea6330t.h
+++ b/include/sound/tea6330t.h
@@ -12,5 +12,6 @@
int snd_tea6330t_detect(struct snd_i2c_bus *bus, int equalizer);
int snd_tea6330t_update_mixer(struct snd_card *card, struct snd_i2c_bus *bus,
int equalizer, int fader);
+int snd_tea6330t_restore_mixer(struct snd_i2c_bus *bus);
#endif /* __SOUND_TEA6330T_H */
diff --git a/include/sound/timer.h b/include/sound/timer.h
index 760e132cc0cd..83bafe70cf33 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -102,6 +102,7 @@ struct snd_timer_instance {
unsigned int slave_id;
struct list_head open_list;
struct list_head active_list;
+ struct list_head master_list;
struct list_head ack_list;
struct list_head slave_list_head;
struct list_head slave_active_head;
diff --git a/include/sound/uda1380.h b/include/sound/uda1380.h
deleted file mode 100644
index 2e42ea2d0cfd..000000000000
--- a/include/sound/uda1380.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UDA1380 ALSA SoC Codec driver
- *
- * Copyright 2009 Philipp Zabel
- */
-
-#ifndef __UDA1380_H
-#define __UDA1380_H
-
-struct uda1380_platform_data {
- int gpio_power;
- int gpio_reset;
- int dac_clk;
-#define UDA1380_DAC_CLK_SYSCLK 0
-#define UDA1380_DAC_CLK_WSPLL 1
-};
-
-#endif /* __UDA1380_H */
diff --git a/include/trace/define_remote_events.h b/include/trace/define_remote_events.h
new file mode 100644
index 000000000000..676e803dc144
--- /dev/null
+++ b/include/trace/define_remote_events.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/trace_events.h>
+#include <linux/trace_remote_event.h>
+#include <linux/trace_seq.h>
+#include <linux/stringify.h>
+
+#define REMOTE_EVENT_INCLUDE(__file) __stringify(../../__file)
+
+#ifdef REMOTE_EVENT_SECTION
+# define __REMOTE_EVENT_SECTION(__name) __used __section(REMOTE_EVENT_SECTION"."#__name)
+#else
+# define __REMOTE_EVENT_SECTION(__name)
+#endif
+
+#define REMOTE_PRINTK_COUNT_ARGS(__args...) \
+ __COUNT_ARGS(, ##__args, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0)
+
+#define __remote_printk0() \
+ trace_seq_putc(seq, '\n')
+
+#define __remote_printk1(__fmt) \
+ trace_seq_puts(seq, " " __fmt "\n") \
+
+#define __remote_printk2(__fmt, __args...) \
+do { \
+ trace_seq_putc(seq, ' '); \
+ trace_seq_printf(seq, __fmt, __args); \
+ trace_seq_putc(seq, '\n'); \
+} while (0)
+
+/* Apply the appropriate trace_seq sequence according to the number of arguments */
+#define remote_printk(__args...) \
+ CONCATENATE(__remote_printk, REMOTE_PRINTK_COUNT_ARGS(__args))(__args)
+
+#define RE_PRINTK(__args...) __args
+
+#define REMOTE_EVENT(__name, __id, __struct, __printk) \
+ REMOTE_EVENT_FORMAT(__name, __struct); \
+ static void remote_event_print_##__name(void *evt, struct trace_seq *seq) \
+ { \
+ struct remote_event_format_##__name __maybe_unused *__entry = evt; \
+ trace_seq_puts(seq, #__name); \
+ remote_printk(__printk); \
+ }
+#include REMOTE_EVENT_INCLUDE(REMOTE_EVENT_INCLUDE_FILE)
+
+#undef REMOTE_EVENT
+#undef RE_PRINTK
+#undef re_field
+#define re_field(__type, __field) \
+ { \
+ .type = #__type, .name = #__field, \
+ .size = sizeof(__type), .align = __alignof__(__type), \
+ .is_signed = is_signed_type(__type), \
+ },
+#define __entry REC
+#define RE_PRINTK(__fmt, __args...) "\"" __fmt "\", " __stringify(__args)
+#define REMOTE_EVENT(__name, __id, __struct, __printk) \
+ static struct trace_event_fields remote_event_fields_##__name[] = { \
+ __struct \
+ {} \
+ }; \
+ static char remote_event_print_fmt_##__name[] = __printk; \
+ static struct remote_event __REMOTE_EVENT_SECTION(__name) \
+ remote_event_##__name = { \
+ .name = #__name, \
+ .id = __id, \
+ .fields = remote_event_fields_##__name, \
+ .print_fmt = remote_event_print_fmt_##__name, \
+ .print = remote_event_print_##__name, \
+ }
+#include REMOTE_EVENT_INCLUDE(REMOTE_EVENT_INCLUDE_FILE)
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0864700f76e0..8ad7a2d76c1d 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1113,6 +1113,30 @@ TRACE_EVENT(btrfs_cow_block,
__entry->cow_level)
);
+TRACE_EVENT(btrfs_search_slot_restart,
+
+ TP_PROTO(const struct btrfs_root *root, int level,
+ const char *reason),
+
+ TP_ARGS(root, level, reason),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( int, level )
+ __string( reason, reason )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = btrfs_root_id(root);
+ __entry->level = level;
+ __assign_str(reason);
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) level=%d reason=%s",
+ show_root_type(__entry->root_objectid),
+ __entry->level, __get_str(reason))
+);
+
TRACE_EVENT(btrfs_space_reservation,
TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val,
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index a743b2a35ea7..6e3b1424eea4 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -249,10 +249,10 @@ TRACE_EVENT(cachefiles_lookup,
TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
+ __field(u64, dino)
+ __field(u64, ino)
__field(unsigned int, obj)
__field(short, error)
- __field(unsigned long, dino)
- __field(unsigned long, ino)
),
TP_fast_assign(
@@ -263,7 +263,7 @@ TRACE_EVENT(cachefiles_lookup,
__entry->error = IS_ERR(de) ? PTR_ERR(de) : 0;
),
- TP_printk("o=%08x dB=%lx B=%lx e=%d",
+ TP_printk("o=%08x dB=%llx B=%llx e=%d",
__entry->obj, __entry->dino, __entry->ino, __entry->error)
);
@@ -578,8 +578,8 @@ TRACE_EVENT(cachefiles_mark_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
+ __field(u64, inode)
__field(unsigned int, obj)
- __field(ino_t, inode)
),
TP_fast_assign(
@@ -587,7 +587,7 @@ TRACE_EVENT(cachefiles_mark_active,
__entry->inode = inode->i_ino;
),
- TP_printk("o=%08x B=%lx",
+ TP_printk("o=%08x B=%llx",
__entry->obj, __entry->inode)
);
@@ -599,8 +599,8 @@ TRACE_EVENT(cachefiles_mark_failed,
/* Note that obj may be NULL */
TP_STRUCT__entry(
+ __field(u64, inode)
__field(unsigned int, obj)
- __field(ino_t, inode)
),
TP_fast_assign(
@@ -608,7 +608,7 @@ TRACE_EVENT(cachefiles_mark_failed,
__entry->inode = inode->i_ino;
),
- TP_printk("o=%08x B=%lx",
+ TP_printk("o=%08x B=%llx",
__entry->obj, __entry->inode)
);
@@ -620,8 +620,8 @@ TRACE_EVENT(cachefiles_mark_inactive,
/* Note that obj may be NULL */
TP_STRUCT__entry(
+ __field(u64, inode)
__field(unsigned int, obj)
- __field(ino_t, inode)
),
TP_fast_assign(
@@ -629,7 +629,7 @@ TRACE_EVENT(cachefiles_mark_inactive,
__entry->inode = inode->i_ino;
),
- TP_printk("o=%08x B=%lx",
+ TP_printk("o=%08x B=%llx",
__entry->obj, __entry->inode)
);
diff --git a/include/trace/events/coredump.h b/include/trace/events/coredump.h
new file mode 100644
index 000000000000..c7b9c53fc498
--- /dev/null
+++ b/include/trace/events/coredump.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2026 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2026 Breno Leitao <leitao@debian.org>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM coredump
+
+#if !defined(_TRACE_COREDUMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COREDUMP_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+/**
+ * coredump - called when a coredump starts
+ * @sig: signal number that triggered the coredump
+ *
+ * This tracepoint fires at the beginning of a coredump attempt,
+ * providing a stable interface for monitoring coredump events.
+ */
+TRACE_EVENT(coredump,
+
+ TP_PROTO(int sig),
+
+ TP_ARGS(sig),
+
+ TP_STRUCT__entry(
+ __field(int, sig)
+ __array(char, comm, TASK_COMM_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->sig = sig;
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("sig=%d comm=%s",
+ __entry->sig, __entry->comm)
+);
+
+#endif /* _TRACE_COREDUMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index f241e204fe6b..4f8edf77dfbe 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -21,9 +21,9 @@ TRACE_EVENT(devlink_hwmsg,
TP_ARGS(devlink, incoming, type, buf, len),
TP_STRUCT__entry(
- __string(bus_name, devlink_to_dev(devlink)->bus->name)
- __string(dev_name, dev_name(devlink_to_dev(devlink)))
- __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(bus_name, devlink_bus_name(devlink))
+ __string(dev_name, devlink_dev_name(devlink))
+ __string(driver_name, devlink_dev_driver_name(devlink))
__field(bool, incoming)
__field(unsigned long, type)
__dynamic_array(u8, buf, len)
@@ -55,9 +55,9 @@ TRACE_EVENT(devlink_hwerr,
TP_ARGS(devlink, err, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink_to_dev(devlink)->bus->name)
- __string(dev_name, dev_name(devlink_to_dev(devlink)))
- __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(bus_name, devlink_bus_name(devlink))
+ __string(dev_name, devlink_dev_name(devlink))
+ __string(driver_name, devlink_dev_driver_name(devlink))
__field(int, err)
__string(msg, msg)
),
@@ -85,9 +85,9 @@ TRACE_EVENT(devlink_health_report,
TP_ARGS(devlink, reporter_name, msg),
TP_STRUCT__entry(
- __string(bus_name, devlink_to_dev(devlink)->bus->name)
- __string(dev_name, dev_name(devlink_to_dev(devlink)))
- __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(bus_name, devlink_bus_name(devlink))
+ __string(dev_name, devlink_dev_name(devlink))
+ __string(driver_name, devlink_dev_driver_name(devlink))
__string(reporter_name, reporter_name)
__string(msg, msg)
),
@@ -116,9 +116,9 @@ TRACE_EVENT(devlink_health_recover_aborted,
TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover),
TP_STRUCT__entry(
- __string(bus_name, devlink_to_dev(devlink)->bus->name)
- __string(dev_name, dev_name(devlink_to_dev(devlink)))
- __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(bus_name, devlink_bus_name(devlink))
+ __string(dev_name, devlink_dev_name(devlink))
+ __string(driver_name, devlink_dev_driver_name(devlink))
__string(reporter_name, reporter_name)
__field(bool, health_state)
__field(u64, time_since_last_recover)
@@ -150,9 +150,9 @@ TRACE_EVENT(devlink_health_reporter_state_update,
TP_ARGS(devlink, reporter_name, new_state),
TP_STRUCT__entry(
- __string(bus_name, devlink_to_dev(devlink)->bus->name)
- __string(dev_name, dev_name(devlink_to_dev(devlink)))
- __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(bus_name, devlink_bus_name(devlink))
+ __string(dev_name, devlink_dev_name(devlink))
+ __string(driver_name, devlink_dev_driver_name(devlink))
__string(reporter_name, reporter_name)
__field(u8, new_state)
),
@@ -181,9 +181,9 @@ TRACE_EVENT(devlink_trap_report,
TP_ARGS(devlink, skb, metadata),
TP_STRUCT__entry(
- __string(bus_name, devlink_to_dev(devlink)->bus->name)
- __string(dev_name, dev_name(devlink_to_dev(devlink)))
- __string(driver_name, devlink_to_dev(devlink)->driver->name)
+ __string(bus_name, devlink_bus_name(devlink))
+ __string(dev_name, devlink_dev_name(devlink))
+ __string(driver_name, devlink_dev_driver_name(devlink))
__string(trap_name, metadata->trap_name)
__string(trap_group_name, metadata->trap_group_name)
__array(char, input_dev_name, IFNAMSIZ)
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
index 63597b004424..31c9ddf72c9d 100644
--- a/include/trace/events/dma.h
+++ b/include/trace/events/dma.h
@@ -34,7 +34,8 @@ TRACE_DEFINE_ENUM(DMA_NONE);
{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
{ DMA_ATTR_MMIO, "MMIO" }, \
{ DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" }, \
- { DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" })
+ { DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" }, \
+ { DMA_ATTR_CC_SHARED, "CC_SHARED" })
DECLARE_EVENT_CLASS(dma_map,
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 4814a65b68dc..3abba45c0601 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -9,37 +9,12 @@
struct dma_fence;
-DECLARE_EVENT_CLASS(dma_fence,
-
- TP_PROTO(struct dma_fence *fence),
-
- TP_ARGS(fence),
-
- TP_STRUCT__entry(
- __string(driver, dma_fence_driver_name(fence))
- __string(timeline, dma_fence_timeline_name(fence))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
- ),
-
- TP_fast_assign(
- __assign_str(driver);
- __assign_str(timeline);
- __entry->context = fence->context;
- __entry->seqno = fence->seqno;
- ),
-
- TP_printk("driver=%s timeline=%s context=%u seqno=%u",
- __get_str(driver), __get_str(timeline), __entry->context,
- __entry->seqno)
-);
-
/*
* Safe only for call sites which are guaranteed to not race with fence
* signaling,holding the fence->lock and having checked for not signaled, or the
* signaling path itself.
*/
-DECLARE_EVENT_CLASS(dma_fence_unsignaled,
+DECLARE_EVENT_CLASS(dma_fence,
TP_PROTO(struct dma_fence *fence),
@@ -64,14 +39,14 @@ DECLARE_EVENT_CLASS(dma_fence_unsignaled,
__entry->seqno)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit,
+DEFINE_EVENT(dma_fence, dma_fence_emit,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init,
+DEFINE_EVENT(dma_fence, dma_fence_init,
TP_PROTO(struct dma_fence *fence),
@@ -85,14 +60,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy,
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal,
+DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled,
+DEFINE_EVENT(dma_fence, dma_fence_signaled,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index def20d06507b..cd0e3fd8c23f 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -26,10 +26,9 @@ struct erofs_map_blocks;
#define show_mflags(flags) __print_flags(flags, "", \
{ EROFS_MAP_MAPPED, "M" }, \
{ EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ENCODED, "E" }, \
- { EROFS_MAP_FULL_MAPPED, "F" }, \
- { EROFS_MAP_FRAGMENT, "R" }, \
- { EROFS_MAP_PARTIAL_REF, "P" })
+ { EROFS_MAP_PARTIAL_MAPPED, "T" }, \
+ { EROFS_MAP_PARTIAL_REF, "P" }, \
+ { EROFS_MAP_FRAGMENT, "R" })
TRACE_EVENT(erofs_lookup,
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index a3e8fe414df8..f493642cf121 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -138,14 +138,14 @@ TRACE_DEFINE_ENUM(CR_ANY_FREE);
{ CR_ANY_FREE, "CR_ANY_FREE" })
TRACE_EVENT(ext4_other_inode_update_time,
- TP_PROTO(struct inode *inode, ino_t orig_ino),
+ TP_PROTO(struct inode *inode, u64 orig_ino),
TP_ARGS(inode, orig_ino),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( u64, orig_ino )
__field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, orig_ino )
__field( uid_t, uid )
__field( gid_t, gid )
__field( __u16, mode )
@@ -160,10 +160,10 @@ TRACE_EVENT(ext4_other_inode_update_time,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d orig_ino %lu ino %lu mode 0%o uid %u gid %u",
+ TP_printk("dev %d,%d orig_ino %llu ino %llu mode 0%o uid %u gid %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->orig_ino,
- (unsigned long) __entry->ino, __entry->mode,
+ __entry->orig_ino,
+ __entry->ino, __entry->mode,
__entry->uid, __entry->gid)
);
@@ -173,11 +173,11 @@ TRACE_EVENT(ext4_free_inode,
TP_ARGS(inode),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( __u64, blocks )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( uid_t, uid )
__field( gid_t, gid )
- __field( __u64, blocks )
__field( __u16, mode )
),
@@ -190,9 +190,9 @@ TRACE_EVENT(ext4_free_inode,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
+ TP_printk("dev %d,%d ino %llu mode 0%o uid %u gid %u blocks %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->mode,
+ __entry->ino, __entry->mode,
__entry->uid, __entry->gid, __entry->blocks)
);
@@ -202,8 +202,8 @@ TRACE_EVENT(ext4_request_inode,
TP_ARGS(dir, mode),
TP_STRUCT__entry(
+ __field( u64, dir )
__field( dev_t, dev )
- __field( ino_t, dir )
__field( __u16, mode )
),
@@ -213,9 +213,9 @@ TRACE_EVENT(ext4_request_inode,
__entry->mode = mode;
),
- TP_printk("dev %d,%d dir %lu mode 0%o",
+ TP_printk("dev %d,%d dir %llu mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->dir, __entry->mode)
+ __entry->dir, __entry->mode)
);
TRACE_EVENT(ext4_allocate_inode,
@@ -224,9 +224,9 @@ TRACE_EVENT(ext4_allocate_inode,
TP_ARGS(inode, dir, mode),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( u64, dir )
__field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, dir )
__field( __u16, mode )
),
@@ -237,10 +237,10 @@ TRACE_EVENT(ext4_allocate_inode,
__entry->mode = mode;
),
- TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ TP_printk("dev %d,%d ino %llu dir %llu mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->dir, __entry->mode)
+ __entry->ino,
+ __entry->dir, __entry->mode)
);
TRACE_EVENT(ext4_evict_inode,
@@ -249,8 +249,8 @@ TRACE_EVENT(ext4_evict_inode,
TP_ARGS(inode),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, nlink )
),
@@ -260,9 +260,9 @@ TRACE_EVENT(ext4_evict_inode,
__entry->nlink = inode->i_nlink;
),
- TP_printk("dev %d,%d ino %lu nlink %d",
+ TP_printk("dev %d,%d ino %llu nlink %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->nlink)
+ __entry->ino, __entry->nlink)
);
TRACE_EVENT(ext4_drop_inode,
@@ -271,8 +271,8 @@ TRACE_EVENT(ext4_drop_inode,
TP_ARGS(inode, drop),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, drop )
),
@@ -282,9 +282,9 @@ TRACE_EVENT(ext4_drop_inode,
__entry->drop = drop;
),
- TP_printk("dev %d,%d ino %lu drop %d",
+ TP_printk("dev %d,%d ino %llu drop %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->drop)
+ __entry->ino, __entry->drop)
);
TRACE_EVENT(ext4_nfs_commit_metadata,
@@ -293,8 +293,8 @@ TRACE_EVENT(ext4_nfs_commit_metadata,
TP_ARGS(inode),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
),
TP_fast_assign(
@@ -302,9 +302,9 @@ TRACE_EVENT(ext4_nfs_commit_metadata,
__entry->ino = inode->i_ino;
),
- TP_printk("dev %d,%d ino %lu",
+ TP_printk("dev %d,%d ino %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
+ __entry->ino)
);
TRACE_EVENT(ext4_mark_inode_dirty,
@@ -313,9 +313,9 @@ TRACE_EVENT(ext4_mark_inode_dirty,
TP_ARGS(inode, IP),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field(unsigned long, ip )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -324,9 +324,9 @@ TRACE_EVENT(ext4_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pS",
+ TP_printk("dev %d,%d ino %llu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, (void *)__entry->ip)
+ __entry->ino, (void *)__entry->ip)
);
TRACE_EVENT(ext4_begin_ordered_truncate,
@@ -335,9 +335,9 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
TP_ARGS(inode, new_size),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, new_size )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -346,9 +346,9 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
__entry->new_size = new_size;
),
- TP_printk("dev %d,%d ino %lu new_size %lld",
+ TP_printk("dev %d,%d ino %llu new_size %lld",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->new_size)
);
@@ -359,9 +359,9 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, pos )
+ __field( dev_t, dev )
__field( unsigned int, len )
),
@@ -372,9 +372,9 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
__entry->len = len;
),
- TP_printk("dev %d,%d ino %lu pos %lld len %u",
+ TP_printk("dev %d,%d ino %llu pos %lld len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->pos, __entry->len)
);
@@ -399,9 +399,9 @@ DECLARE_EVENT_CLASS(ext4__write_end,
TP_ARGS(inode, pos, len, copied),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, pos )
+ __field( dev_t, dev )
__field( unsigned int, len )
__field( unsigned int, copied )
),
@@ -414,9 +414,9 @@ DECLARE_EVENT_CLASS(ext4__write_end,
__entry->copied = copied;
),
- TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
+ TP_printk("dev %d,%d ino %llu pos %lld len %u copied %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->pos, __entry->len, __entry->copied)
);
@@ -450,13 +450,13 @@ TRACE_EVENT(ext4_writepages,
TP_ARGS(inode, wbc),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( long, nr_to_write )
- __field( long, pages_skipped )
+ __field( u64, ino )
__field( loff_t, range_start )
__field( loff_t, range_end )
+ __field( long, nr_to_write )
+ __field( long, pages_skipped )
__field( pgoff_t, writeback_index )
+ __field( dev_t, dev )
__field( int, sync_mode )
__field( char, for_kupdate )
__field( char, range_cyclic )
@@ -475,11 +475,11 @@ TRACE_EVENT(ext4_writepages,
__entry->range_cyclic = wbc->range_cyclic;
),
- TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+ TP_printk("dev %d,%d ino %llu nr_to_write %ld pages_skipped %ld "
"range_start %lld range_end %lld sync_mode %d "
"for_kupdate %d range_cyclic %d writeback_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->nr_to_write,
+ __entry->ino, __entry->nr_to_write,
__entry->pages_skipped, __entry->range_start,
__entry->range_end, __entry->sync_mode,
__entry->for_kupdate, __entry->range_cyclic,
@@ -493,11 +493,11 @@ TRACE_EVENT(ext4_da_write_folios_start,
TP_ARGS(inode, start_pos, next_pos, wbc),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, start_pos )
__field( loff_t, next_pos )
__field( long, nr_to_write )
+ __field( dev_t, dev )
__field( int, sync_mode )
),
@@ -510,9 +510,9 @@ TRACE_EVENT(ext4_da_write_folios_start,
__entry->sync_mode = wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d",
+ TP_printk("dev %d,%d ino %llu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
+ __entry->ino, __entry->start_pos, __entry->next_pos,
__entry->nr_to_write, __entry->sync_mode)
);
@@ -523,11 +523,11 @@ TRACE_EVENT(ext4_da_write_folios_end,
TP_ARGS(inode, start_pos, next_pos, wbc, ret),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, start_pos )
__field( loff_t, next_pos )
__field( long, nr_to_write )
+ __field( dev_t, dev )
__field( int, ret )
),
@@ -540,9 +540,9 @@ TRACE_EVENT(ext4_da_write_folios_end,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d",
+ TP_printk("dev %d,%d ino %llu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
+ __entry->ino, __entry->start_pos, __entry->next_pos,
__entry->nr_to_write, __entry->ret)
);
@@ -552,9 +552,9 @@ TRACE_EVENT(ext4_da_write_pages_extent,
TP_ARGS(inode, map),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, lblk )
+ __field( dev_t, dev )
__field( __u32, len )
__field( __u32, flags )
),
@@ -567,9 +567,9 @@ TRACE_EVENT(ext4_da_write_pages_extent,
__entry->flags = map->m_flags;
),
- TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s",
+ TP_printk("dev %d,%d ino %llu lblk %llu len %u flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->lblk, __entry->len,
+ __entry->ino, __entry->lblk, __entry->len,
show_mflags(__entry->flags))
);
@@ -580,12 +580,12 @@ TRACE_EVENT(ext4_writepages_result,
TP_ARGS(inode, wbc, ret, pages_written),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( long, pages_skipped )
+ __field( pgoff_t, writeback_index )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, ret )
__field( int, pages_written )
- __field( long, pages_skipped )
- __field( pgoff_t, writeback_index )
__field( int, sync_mode )
),
@@ -599,10 +599,10 @@ TRACE_EVENT(ext4_writepages_result,
__entry->sync_mode = wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+ TP_printk("dev %d,%d ino %llu ret %d pages_written %d pages_skipped %ld "
"sync_mode %d writeback_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->ret,
+ __entry->ino, __entry->ret,
__entry->pages_written, __entry->pages_skipped,
__entry->sync_mode,
(unsigned long) __entry->writeback_index)
@@ -614,9 +614,9 @@ DECLARE_EVENT_CLASS(ext4__folio_op,
TP_ARGS(inode, folio),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( pgoff_t, index )
+ __field( dev_t, dev )
),
@@ -626,9 +626,9 @@ DECLARE_EVENT_CLASS(ext4__folio_op,
__entry->index = folio->index;
),
- TP_printk("dev %d,%d ino %lu folio_index %lu",
+ TP_printk("dev %d,%d ino %llu folio_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned long) __entry->index)
);
@@ -652,11 +652,11 @@ DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
TP_ARGS(folio, offset, length),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( pgoff_t, index )
__field( size_t, offset )
__field( size_t, length )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -667,9 +667,9 @@ DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
__entry->length = length;
),
- TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu",
+ TP_printk("dev %d,%d ino %llu folio_index %lu offset %zu length %zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned long) __entry->index,
__entry->offset, __entry->length)
);
@@ -717,10 +717,10 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
TP_ARGS(ac, pa),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, pa_pstart )
__field( __u64, pa_lstart )
+ __field( dev_t, dev )
__field( __u32, pa_len )
),
@@ -733,9 +733,9 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
__entry->pa_len = pa->pa_len;
),
- TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
+ TP_printk("dev %d,%d ino %llu pstart %llu len %u lstart %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
);
@@ -762,9 +762,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
TP_ARGS(pa, block, count),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, block )
+ __field( dev_t, dev )
__field( __u32, count )
),
@@ -776,9 +776,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
__entry->count = count;
),
- TP_printk("dev %d,%d ino %lu block %llu count %u",
+ TP_printk("dev %d,%d ino %llu block %llu count %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->block, __entry->count)
);
@@ -811,8 +811,8 @@ TRACE_EVENT(ext4_discard_preallocations,
TP_ARGS(inode, len),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( unsigned int, len )
),
@@ -823,9 +823,9 @@ TRACE_EVENT(ext4_discard_preallocations,
__entry->len = len;
),
- TP_printk("dev %d,%d ino %lu len: %u",
+ TP_printk("dev %d,%d ino %llu len: %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->len)
+ __entry->ino, __entry->len)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -855,15 +855,15 @@ TRACE_EVENT(ext4_request_blocks,
TP_ARGS(ar),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( __u64, goal )
+ __field( __u64, pleft )
+ __field( __u64, pright )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( unsigned int, len )
__field( __u32, logical )
__field( __u32, lleft )
__field( __u32, lright )
- __field( __u64, goal )
- __field( __u64, pleft )
- __field( __u64, pright )
__field( unsigned int, flags )
),
@@ -880,10 +880,10 @@ TRACE_EVENT(ext4_request_blocks,
__entry->flags = ar->flags;
),
- TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu "
+ TP_printk("dev %d,%d ino %llu flags %s len %u lblk %u goal %llu "
"lleft %u lright %u pleft %llu pright %llu ",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
+ __entry->ino, show_mballoc_flags(__entry->flags),
__entry->len, __entry->logical, __entry->goal,
__entry->lleft, __entry->lright, __entry->pleft,
__entry->pright)
@@ -895,16 +895,16 @@ TRACE_EVENT(ext4_allocate_blocks,
TP_ARGS(ar, block),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, block )
+ __field( __u64, goal )
+ __field( __u64, pleft )
+ __field( __u64, pright )
+ __field( dev_t, dev )
__field( unsigned int, len )
__field( __u32, logical )
__field( __u32, lleft )
__field( __u32, lright )
- __field( __u64, goal )
- __field( __u64, pleft )
- __field( __u64, pright )
__field( unsigned int, flags )
),
@@ -922,10 +922,10 @@ TRACE_EVENT(ext4_allocate_blocks,
__entry->flags = ar->flags;
),
- TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u "
+ TP_printk("dev %d,%d ino %llu flags %s len %u block %llu lblk %u "
"goal %llu lleft %u lright %u pleft %llu pright %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
+ __entry->ino, show_mballoc_flags(__entry->flags),
__entry->len, __entry->block, __entry->logical,
__entry->goal, __entry->lleft, __entry->lright,
__entry->pleft, __entry->pright)
@@ -938,10 +938,10 @@ TRACE_EVENT(ext4_free_blocks,
TP_ARGS(inode, block, count, flags),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, block )
__field( unsigned long, count )
+ __field( dev_t, dev )
__field( int, flags )
__field( __u16, mode )
),
@@ -955,9 +955,9 @@ TRACE_EVENT(ext4_free_blocks,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s",
+ TP_printk("dev %d,%d ino %llu mode 0%o block %llu count %lu flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->mode, __entry->block, __entry->count,
show_free_flags(__entry->flags))
);
@@ -968,9 +968,9 @@ TRACE_EVENT(ext4_sync_file_enter,
TP_ARGS(file, datasync),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( u64, parent )
__field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, parent )
__field( int, datasync )
),
@@ -983,10 +983,10 @@ TRACE_EVENT(ext4_sync_file_enter,
__entry->parent = d_inode(dentry->d_parent)->i_ino;
),
- TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
+ TP_printk("dev %d,%d ino %llu parent %llu datasync %d ",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->parent, __entry->datasync)
+ __entry->ino,
+ __entry->parent, __entry->datasync)
);
TRACE_EVENT(ext4_sync_file_exit,
@@ -995,8 +995,8 @@ TRACE_EVENT(ext4_sync_file_exit,
TP_ARGS(inode, ret),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, ret )
),
@@ -1006,9 +1006,9 @@ TRACE_EVENT(ext4_sync_file_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu ret %d",
+ TP_printk("dev %d,%d ino %llu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->ret)
);
@@ -1039,8 +1039,8 @@ TRACE_EVENT(ext4_alloc_da_blocks,
TP_ARGS(inode),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( unsigned int, data_blocks )
),
@@ -1050,9 +1050,9 @@ TRACE_EVENT(ext4_alloc_da_blocks,
__entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
),
- TP_printk("dev %d,%d ino %lu reserved_data_blocks %u",
+ TP_printk("dev %d,%d ino %llu reserved_data_blocks %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->data_blocks)
);
@@ -1062,8 +1062,8 @@ TRACE_EVENT(ext4_mballoc_alloc,
TP_ARGS(ac),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( __u32, orig_logical )
__field( int, orig_start )
__field( __u32, orig_group )
@@ -1107,11 +1107,11 @@ TRACE_EVENT(ext4_mballoc_alloc,
__entry->cr = ac->ac_criteria;
),
- TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+ TP_printk("dev %d,%d inode %llu orig %u/%d/%u@%u goal %u/%d/%u@%u "
"result %u/%d/%u@%u blks %u grps %u cr %s flags %s "
"tail %u broken %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->orig_group, __entry->orig_start,
__entry->orig_len, __entry->orig_logical,
__entry->goal_group, __entry->goal_start,
@@ -1129,8 +1129,8 @@ TRACE_EVENT(ext4_mballoc_prealloc,
TP_ARGS(ac),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( __u32, orig_logical )
__field( int, orig_start )
__field( __u32, orig_group )
@@ -1154,9 +1154,9 @@ TRACE_EVENT(ext4_mballoc_prealloc,
__entry->result_len = ac->ac_b_ex.fe_len;
),
- TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+ TP_printk("dev %d,%d inode %llu orig %u/%d/%u@%u result %u/%d/%u@%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->orig_group, __entry->orig_start,
__entry->orig_len, __entry->orig_logical,
__entry->result_group, __entry->result_start,
@@ -1173,8 +1173,8 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
TP_ARGS(sb, inode, group, start, len),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
@@ -1188,9 +1188,9 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
__entry->result_len = len;
),
- TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
+ TP_printk("dev %d,%d inode %llu extent %u/%d/%d ",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->result_group, __entry->result_start,
__entry->result_len)
);
@@ -1223,9 +1223,9 @@ TRACE_EVENT(ext4_forget,
TP_ARGS(inode, is_metadata, block),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, block )
+ __field( dev_t, dev )
__field( int, is_metadata )
__field( __u16, mode )
),
@@ -1238,9 +1238,9 @@ TRACE_EVENT(ext4_forget,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
+ TP_printk("dev %d,%d ino %llu mode 0%o is_metadata %d block %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->mode, __entry->is_metadata, __entry->block)
);
@@ -1250,9 +1250,9 @@ TRACE_EVENT(ext4_da_update_reserve_space,
TP_ARGS(inode, used_blocks, quota_claim),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, i_blocks )
+ __field( dev_t, dev )
__field( int, used_blocks )
__field( int, reserved_data_blocks )
__field( int, quota_claim )
@@ -1270,10 +1270,10 @@ TRACE_EVENT(ext4_da_update_reserve_space,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
+ TP_printk("dev %d,%d ino %llu mode 0%o i_blocks %llu used_blocks %d "
"reserved_data_blocks %d quota_claim %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->mode, __entry->i_blocks,
__entry->used_blocks, __entry->reserved_data_blocks,
__entry->quota_claim)
@@ -1285,9 +1285,9 @@ TRACE_EVENT(ext4_da_reserve_space,
TP_ARGS(inode, nr_resv),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, i_blocks )
+ __field( dev_t, dev )
__field( int, reserve_blocks )
__field( int, reserved_data_blocks )
__field( __u16, mode )
@@ -1302,10 +1302,10 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu reserve_blocks %d"
+ TP_printk("dev %d,%d ino %llu mode 0%o i_blocks %llu reserve_blocks %d"
"reserved_data_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->mode, __entry->i_blocks,
__entry->reserve_blocks, __entry->reserved_data_blocks)
);
@@ -1316,9 +1316,9 @@ TRACE_EVENT(ext4_da_release_space,
TP_ARGS(inode, freed_blocks),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, i_blocks )
+ __field( dev_t, dev )
__field( int, freed_blocks )
__field( int, reserved_data_blocks )
__field( __u16, mode )
@@ -1333,10 +1333,10 @@ TRACE_EVENT(ext4_da_release_space,
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
+ TP_printk("dev %d,%d ino %llu mode 0%o i_blocks %llu freed_blocks %d "
"reserved_data_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->mode, __entry->i_blocks,
__entry->freed_blocks, __entry->reserved_data_blocks)
);
@@ -1412,10 +1412,10 @@ DECLARE_EVENT_CLASS(ext4__fallocate_mode,
TP_ARGS(inode, offset, len, mode),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, offset )
__field( loff_t, len )
+ __field( dev_t, dev )
__field( int, mode )
),
@@ -1427,9 +1427,9 @@ DECLARE_EVENT_CLASS(ext4__fallocate_mode,
__entry->mode = mode;
),
- TP_printk("dev %d,%d ino %lu offset %lld len %lld mode %s",
+ TP_printk("dev %d,%d ino %llu offset %lld len %lld mode %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->offset, __entry->len,
show_falloc_mode(__entry->mode))
);
@@ -1462,9 +1462,9 @@ TRACE_EVENT(ext4_fallocate_exit,
TP_ARGS(inode, offset, max_blocks, ret),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, pos )
+ __field( dev_t, dev )
__field( unsigned int, blocks )
__field( int, ret )
),
@@ -1477,9 +1477,9 @@ TRACE_EVENT(ext4_fallocate_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
+ TP_printk("dev %d,%d ino %llu pos %lld blocks %u ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->pos, __entry->blocks,
__entry->ret)
);
@@ -1490,10 +1490,10 @@ TRACE_EVENT(ext4_unlink_enter,
TP_ARGS(parent, dentry),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
- __field( ino_t, parent )
+ __field( u64, ino )
+ __field( u64, parent )
__field( loff_t, size )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -1503,10 +1503,10 @@ TRACE_EVENT(ext4_unlink_enter,
__entry->size = d_inode(dentry)->i_size;
),
- TP_printk("dev %d,%d ino %lu size %lld parent %lu",
+ TP_printk("dev %d,%d ino %llu size %lld parent %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->size,
- (unsigned long) __entry->parent)
+ __entry->ino, __entry->size,
+ __entry->parent)
);
TRACE_EVENT(ext4_unlink_exit,
@@ -1515,8 +1515,8 @@ TRACE_EVENT(ext4_unlink_exit,
TP_ARGS(dentry, ret),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, ret )
),
@@ -1526,9 +1526,9 @@ TRACE_EVENT(ext4_unlink_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu ret %d",
+ TP_printk("dev %d,%d ino %llu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->ret)
);
@@ -1538,9 +1538,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( __u64, blocks )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -1549,9 +1549,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
__entry->blocks = inode->i_blocks;
),
- TP_printk("dev %d,%d ino %lu blocks %llu",
+ TP_printk("dev %d,%d ino %llu blocks %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->blocks)
+ __entry->ino, __entry->blocks)
);
DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
@@ -1576,13 +1576,13 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
TP_ARGS(inode, map, ux),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, u_pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, m_lblk )
__field( unsigned, m_len )
__field( ext4_lblk_t, u_lblk )
__field( unsigned, u_len )
- __field( ext4_fsblk_t, u_pblk )
),
TP_fast_assign(
@@ -1595,10 +1595,10 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
__entry->u_pblk = ext4_ext_pblock(ux);
),
- TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
+ TP_printk("dev %d,%d ino %llu m_lblk %u m_len %u u_lblk %u u_len %u "
"u_pblk %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->m_lblk, __entry->m_len,
__entry->u_lblk, __entry->u_len, __entry->u_pblk)
);
@@ -1614,16 +1614,16 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
TP_ARGS(inode, map, ux, ix),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, u_pblk )
+ __field( ext4_fsblk_t, i_pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, m_lblk )
__field( unsigned, m_len )
__field( ext4_lblk_t, u_lblk )
__field( unsigned, u_len )
- __field( ext4_fsblk_t, u_pblk )
__field( ext4_lblk_t, i_lblk )
__field( unsigned, i_len )
- __field( ext4_fsblk_t, i_pblk )
),
TP_fast_assign(
@@ -1639,11 +1639,11 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
__entry->i_pblk = ext4_ext_pblock(ix);
),
- TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u "
+ TP_printk("dev %d,%d ino %llu m_lblk %u m_len %u "
"u_lblk %u u_len %u u_pblk %llu "
"i_lblk %u i_len %u i_pblk %llu ",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->m_lblk, __entry->m_len,
__entry->u_lblk, __entry->u_len, __entry->u_pblk,
__entry->i_lblk, __entry->i_len, __entry->i_pblk)
@@ -1656,8 +1656,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
TP_ARGS(inode, lblk, len, flags),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( unsigned int, len )
__field( unsigned int, flags )
@@ -1671,9 +1671,9 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
__entry->flags = flags;
),
- TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s",
+ TP_printk("dev %d,%d ino %llu lblk %u len %u flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->lblk, __entry->len, show_map_flags(__entry->flags))
);
@@ -1698,10 +1698,10 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
TP_ARGS(inode, flags, map, ret),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( unsigned int, flags )
- __field( ext4_fsblk_t, pblk )
__field( ext4_lblk_t, lblk )
__field( unsigned int, len )
__field( unsigned int, mflags )
@@ -1719,10 +1719,10 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u "
+ TP_printk("dev %d,%d ino %llu flags %s lblk %u pblk %llu len %u "
"mflags %s ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
show_map_flags(__entry->flags), __entry->lblk, __entry->pblk,
__entry->len, show_mflags(__entry->mflags), __entry->ret)
);
@@ -1747,9 +1747,9 @@ TRACE_EVENT(ext4_ext_load_extent,
TP_ARGS(inode, lblk, pblk),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( ext4_fsblk_t, pblk )
+ __field( dev_t, dev )
__field( ext4_lblk_t, lblk )
),
@@ -1760,9 +1760,9 @@ TRACE_EVENT(ext4_ext_load_extent,
__entry->lblk = lblk;
),
- TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
+ TP_printk("dev %d,%d ino %llu lblk %u pblk %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->lblk, __entry->pblk)
);
@@ -1772,8 +1772,8 @@ TRACE_EVENT(ext4_load_inode,
TP_ARGS(sb, ino),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
),
TP_fast_assign(
@@ -1781,9 +1781,9 @@ TRACE_EVENT(ext4_load_inode,
__entry->ino = ino;
),
- TP_printk("dev %d,%d ino %ld",
+ TP_printk("dev %d,%d ino %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
+ __entry->ino)
);
TRACE_EVENT(ext4_journal_start_sb,
@@ -1823,7 +1823,7 @@ TRACE_EVENT(ext4_journal_start_inode,
TP_ARGS(inode, blocks, rsv_blocks, revoke_creds, type, IP),
TP_STRUCT__entry(
- __field( unsigned long, ino )
+ __field( u64, ino )
__field( dev_t, dev )
__field( unsigned long, ip )
__field( int, blocks )
@@ -1843,10 +1843,10 @@ TRACE_EVENT(ext4_journal_start_inode,
),
TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d,"
- " type %d, ino %lu, caller %pS", MAJOR(__entry->dev),
+ " type %d, ino %llu, caller %pS", MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->blocks, __entry->rsv_blocks,
- __entry->revoke_creds, __entry->type, __entry->ino,
- (void *)__entry->ip)
+ __entry->revoke_creds, __entry->type,
+ __entry->ino, (void *)__entry->ip)
);
TRACE_EVENT(ext4_journal_start_reserved,
@@ -1927,14 +1927,14 @@ TRACE_EVENT(ext4_ext_handle_unwritten_extents,
TP_ARGS(inode, map, flags, allocated, newblock),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, pblk )
+ __field( ext4_fsblk_t, newblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( int, flags )
__field( ext4_lblk_t, lblk )
- __field( ext4_fsblk_t, pblk )
__field( unsigned int, len )
__field( unsigned int, allocated )
- __field( ext4_fsblk_t, newblk )
),
TP_fast_assign(
@@ -1948,10 +1948,10 @@ TRACE_EVENT(ext4_ext_handle_unwritten_extents,
__entry->newblk = newblock;
),
- TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s "
+ TP_printk("dev %d,%d ino %llu m_lblk %u m_pblk %llu m_len %u flags %s "
"allocated %d newblock %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
__entry->len, show_map_flags(__entry->flags),
(unsigned int) __entry->allocated,
@@ -1994,9 +1994,9 @@ TRACE_EVENT(ext4_ext_show_extent,
TP_ARGS(inode, lblk, pblk, len),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( ext4_fsblk_t, pblk )
+ __field( dev_t, dev )
__field( ext4_lblk_t, lblk )
__field( unsigned short, len )
),
@@ -2009,9 +2009,9 @@ TRACE_EVENT(ext4_ext_show_extent,
__entry->len = len;
),
- TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
+ TP_printk("dev %d,%d ino %llu lblk %u pblk %llu len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned) __entry->lblk,
(unsigned long long) __entry->pblk,
(unsigned short) __entry->len)
@@ -2025,14 +2025,14 @@ TRACE_EVENT(ext4_remove_blocks,
TP_ARGS(inode, ex, from, to, pc),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, ee_pblk )
+ __field( ext4_fsblk_t, pc_pclu )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, from )
__field( ext4_lblk_t, to )
- __field( ext4_fsblk_t, ee_pblk )
__field( ext4_lblk_t, ee_lblk )
__field( unsigned short, ee_len )
- __field( ext4_fsblk_t, pc_pclu )
__field( ext4_lblk_t, pc_lblk )
__field( int, pc_state)
),
@@ -2050,10 +2050,10 @@ TRACE_EVENT(ext4_remove_blocks,
__entry->pc_state = pc->state;
),
- TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
+ TP_printk("dev %d,%d ino %llu extent [%u(%llu), %u]"
"from %u to %u partial [pclu %lld lblk %u state %d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned) __entry->ee_lblk,
(unsigned long long) __entry->ee_pblk,
(unsigned short) __entry->ee_len,
@@ -2072,13 +2072,13 @@ TRACE_EVENT(ext4_ext_rm_leaf,
TP_ARGS(inode, start, ex, pc),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, ee_pblk )
+ __field( ext4_fsblk_t, pc_pclu )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, ee_lblk )
- __field( ext4_fsblk_t, ee_pblk )
__field( short, ee_len )
- __field( ext4_fsblk_t, pc_pclu )
__field( ext4_lblk_t, pc_lblk )
__field( int, pc_state)
),
@@ -2095,10 +2095,10 @@ TRACE_EVENT(ext4_ext_rm_leaf,
__entry->pc_state = pc->state;
),
- TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
+ TP_printk("dev %d,%d ino %llu start_lblk %u last_extent [%u(%llu), %u]"
"partial [pclu %lld lblk %u state %d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->ee_lblk,
(unsigned long long) __entry->ee_pblk,
@@ -2114,9 +2114,9 @@ TRACE_EVENT(ext4_ext_rm_idx,
TP_ARGS(inode, pblk),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( ext4_fsblk_t, pblk )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -2125,9 +2125,9 @@ TRACE_EVENT(ext4_ext_rm_idx,
__entry->pblk = pblk;
),
- TP_printk("dev %d,%d ino %lu index_pblk %llu",
+ TP_printk("dev %d,%d ino %llu index_pblk %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned long long) __entry->pblk)
);
@@ -2138,8 +2138,8 @@ TRACE_EVENT(ext4_ext_remove_space,
TP_ARGS(inode, start, end, depth),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, end )
__field( int, depth )
@@ -2153,9 +2153,9 @@ TRACE_EVENT(ext4_ext_remove_space,
__entry->depth = depth;
),
- TP_printk("dev %d,%d ino %lu since %u end %u depth %d",
+ TP_printk("dev %d,%d ino %llu since %u end %u depth %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->end,
__entry->depth)
@@ -2168,12 +2168,12 @@ TRACE_EVENT(ext4_ext_remove_space_done,
TP_ARGS(inode, start, end, depth, pc, eh_entries),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, pc_pclu )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, end )
__field( int, depth )
- __field( ext4_fsblk_t, pc_pclu )
__field( ext4_lblk_t, pc_lblk )
__field( int, pc_state )
__field( unsigned short, eh_entries )
@@ -2191,11 +2191,11 @@ TRACE_EVENT(ext4_ext_remove_space_done,
__entry->eh_entries = le16_to_cpu(eh_entries);
),
- TP_printk("dev %d,%d ino %lu since %u end %u depth %d "
+ TP_printk("dev %d,%d ino %llu since %u end %u depth %d "
"partial [pclu %lld lblk %u state %d] "
"remaining_entries %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->end,
__entry->depth,
@@ -2211,13 +2211,13 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
TP_ARGS(inode, es),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( u64, seq )
+ __field( ext4_fsblk_t, pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
- __field( ext4_fsblk_t, pblk )
__field( char, status )
- __field( u64, seq )
),
TP_fast_assign(
@@ -2230,9 +2230,9 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s seq %llu",
+ TP_printk("dev %d,%d ino %llu es [%u/%u) mapped %llu status %s seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->lblk, __entry->len,
__entry->pblk, show_extent_status(__entry->status),
__entry->seq)
@@ -2256,11 +2256,11 @@ TRACE_EVENT(ext4_es_remove_extent,
TP_ARGS(inode, lblk, len),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field( loff_t, lblk )
__field( loff_t, len )
__field( u64, seq )
+ __field( dev_t, dev )
),
TP_fast_assign(
@@ -2271,9 +2271,9 @@ TRACE_EVENT(ext4_es_remove_extent,
__entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%lld/%lld) seq %llu",
+ TP_printk("dev %d,%d ino %llu es [%lld/%lld) seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->lblk, __entry->len, __entry->seq)
);
@@ -2283,8 +2283,8 @@ TRACE_EVENT(ext4_es_find_extent_range_enter,
TP_ARGS(inode, lblk),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
),
@@ -2294,9 +2294,9 @@ TRACE_EVENT(ext4_es_find_extent_range_enter,
__entry->lblk = lblk;
),
- TP_printk("dev %d,%d ino %lu lblk %u",
+ TP_printk("dev %d,%d ino %llu lblk %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->lblk)
+ __entry->ino, __entry->lblk)
);
TRACE_EVENT(ext4_es_find_extent_range_exit,
@@ -2305,11 +2305,11 @@ TRACE_EVENT(ext4_es_find_extent_range_exit,
TP_ARGS(inode, es),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
- __field( ext4_fsblk_t, pblk )
__field( char, status )
),
@@ -2322,9 +2322,9 @@ TRACE_EVENT(ext4_es_find_extent_range_exit,
__entry->status = ext4_es_status(es);
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
+ TP_printk("dev %d,%d ino %llu es [%u/%u) mapped %llu status %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->lblk, __entry->len,
__entry->pblk, show_extent_status(__entry->status))
);
@@ -2335,8 +2335,8 @@ TRACE_EVENT(ext4_es_lookup_extent_enter,
TP_ARGS(inode, lblk),
TP_STRUCT__entry(
+ __field( u64, ino )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
),
@@ -2346,9 +2346,9 @@ TRACE_EVENT(ext4_es_lookup_extent_enter,
__entry->lblk = lblk;
),
- TP_printk("dev %d,%d ino %lu lblk %u",
+ TP_printk("dev %d,%d ino %llu lblk %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->lblk)
+ __entry->ino, __entry->lblk)
);
TRACE_EVENT(ext4_es_lookup_extent_exit,
@@ -2358,13 +2358,13 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
TP_ARGS(inode, es, found),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( ext4_fsblk_t, pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
- __field( ext4_fsblk_t, pblk )
- __field( char, status )
__field( int, found )
+ __field( char, status )
),
TP_fast_assign(
@@ -2377,9 +2377,9 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->found = found;
),
- TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s",
+ TP_printk("dev %d,%d ino %llu found %d [%u/%u) %llu %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->found,
+ __entry->ino, __entry->found,
__entry->lblk, __entry->len,
__entry->found ? __entry->pblk : 0,
show_extent_status(__entry->found ? __entry->status : 0))
@@ -2447,10 +2447,10 @@ TRACE_EVENT(ext4_collapse_range,
TP_ARGS(inode, offset, len),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, offset)
__field(loff_t, len)
+ __field(dev_t, dev)
),
TP_fast_assign(
@@ -2460,9 +2460,9 @@ TRACE_EVENT(ext4_collapse_range,
__entry->len = len;
),
- TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+ TP_printk("dev %d,%d ino %llu offset %lld len %lld",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->offset, __entry->len)
);
@@ -2472,10 +2472,10 @@ TRACE_EVENT(ext4_insert_range,
TP_ARGS(inode, offset, len),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, offset)
__field(loff_t, len)
+ __field(dev_t, dev)
),
TP_fast_assign(
@@ -2485,9 +2485,9 @@ TRACE_EVENT(ext4_insert_range,
__entry->len = len;
),
- TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+ TP_printk("dev %d,%d ino %llu offset %lld len %lld",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->offset, __entry->len)
);
@@ -2526,15 +2526,15 @@ TRACE_EVENT(ext4_es_insert_delayed_extent,
TP_ARGS(inode, es, lclu_allocated, end_allocated),
TP_STRUCT__entry(
+ __field( u64, ino )
+ __field( u64, seq )
+ __field( ext4_fsblk_t, pblk )
__field( dev_t, dev )
- __field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
- __field( ext4_fsblk_t, pblk )
__field( char, status )
__field( bool, lclu_allocated )
__field( bool, end_allocated )
- __field( u64, seq )
),
TP_fast_assign(
@@ -2549,9 +2549,9 @@ TRACE_EVENT(ext4_es_insert_delayed_extent,
__entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s allocated %d %d seq %llu",
+ TP_printk("dev %d,%d ino %llu es [%u/%u) mapped %llu status %s allocated %d %d seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
+ __entry->ino,
__entry->lblk, __entry->len,
__entry->pblk, show_extent_status(__entry->status),
__entry->lclu_allocated, __entry->end_allocated,
@@ -2875,9 +2875,9 @@ DECLARE_EVENT_CLASS(ext4_fc_track_dentry,
TP_ARGS(handle, inode, dentry, ret),
TP_STRUCT__entry(
+ __field(u64, i_ino)
__field(dev_t, dev)
__field(tid_t, t_tid)
- __field(ino_t, i_ino)
__field(tid_t, i_sync_tid)
__field(int, error)
),
@@ -2892,7 +2892,7 @@ DECLARE_EVENT_CLASS(ext4_fc_track_dentry,
__entry->error = ret;
),
- TP_printk("dev %d,%d, t_tid %u, ino %lu, i_sync_tid %u, error %d",
+ TP_printk("dev %d,%d, t_tid %u, ino %llu, i_sync_tid %u, error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
__entry->error
@@ -2916,9 +2916,9 @@ TRACE_EVENT(ext4_fc_track_inode,
TP_ARGS(handle, inode, ret),
TP_STRUCT__entry(
+ __field(u64, i_ino)
__field(dev_t, dev)
__field(tid_t, t_tid)
- __field(ino_t, i_ino)
__field(tid_t, i_sync_tid)
__field(int, error)
),
@@ -2933,7 +2933,7 @@ TRACE_EVENT(ext4_fc_track_inode,
__entry->error = ret;
),
- TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d",
+ TP_printk("dev %d:%d, t_tid %u, inode %llu, i_sync_tid %u, error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
__entry->error)
@@ -2946,12 +2946,12 @@ TRACE_EVENT(ext4_fc_track_range,
TP_ARGS(handle, inode, start, end, ret),
TP_STRUCT__entry(
+ __field(u64, i_ino)
+ __field(long, start)
+ __field(long, end)
__field(dev_t, dev)
__field(tid_t, t_tid)
- __field(ino_t, i_ino)
__field(tid_t, i_sync_tid)
- __field(long, start)
- __field(long, end)
__field(int, error)
),
@@ -2967,7 +2967,7 @@ TRACE_EVENT(ext4_fc_track_range,
__entry->error = ret;
),
- TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d, start %ld, end %ld",
+ TP_printk("dev %d:%d, t_tid %u, inode %llu, i_sync_tid %u, error %d, start %ld, end %ld",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
__entry->error, __entry->start, __entry->end)
@@ -3029,11 +3029,11 @@ TRACE_EVENT(ext4_move_extent_enter,
TP_ARGS(orig_inode, orig_map, donor_inode, donor_lblk),
TP_STRUCT__entry(
+ __field(u64, orig_ino)
+ __field(u64, donor_ino)
__field(dev_t, dev)
- __field(ino_t, orig_ino)
__field(ext4_lblk_t, orig_lblk)
__field(unsigned int, orig_flags)
- __field(ino_t, donor_ino)
__field(ext4_lblk_t, donor_lblk)
__field(unsigned int, len)
),
@@ -3048,11 +3048,11 @@ TRACE_EVENT(ext4_move_extent_enter,
__entry->len = orig_map->m_len;
),
- TP_printk("dev %d,%d origin ino %lu lblk %u flags %s donor ino %lu lblk %u len %u",
+ TP_printk("dev %d,%d origin ino %llu lblk %u flags %s donor ino %llu lblk %u len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->orig_ino, __entry->orig_lblk,
+ __entry->orig_ino, __entry->orig_lblk,
show_mflags(__entry->orig_flags),
- (unsigned long) __entry->donor_ino, __entry->donor_lblk,
+ __entry->donor_ino, __entry->donor_lblk,
__entry->len)
);
@@ -3065,13 +3065,13 @@ TRACE_EVENT(ext4_move_extent_exit,
move_len, move_type, ret),
TP_STRUCT__entry(
+ __field(u64, orig_ino)
+ __field(u64, donor_ino)
+ __field(u64, move_len)
__field(dev_t, dev)
- __field(ino_t, orig_ino)
__field(ext4_lblk_t, orig_lblk)
- __field(ino_t, donor_ino)
__field(ext4_lblk_t, donor_lblk)
__field(unsigned int, m_len)
- __field(u64, move_len)
__field(int, move_type)
__field(int, ret)
),
@@ -3088,10 +3088,10 @@ TRACE_EVENT(ext4_move_extent_exit,
__entry->ret = ret;
),
- TP_printk("dev %d,%d origin ino %lu lblk %u donor ino %lu lblk %u m_len %u, move_len %llu type %d ret %d",
+ TP_printk("dev %d,%d origin ino %llu lblk %u donor ino %llu lblk %u m_len %u, move_len %llu type %d ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->orig_ino, __entry->orig_lblk,
- (unsigned long) __entry->donor_ino, __entry->donor_lblk,
+ __entry->orig_ino, __entry->orig_lblk,
+ __entry->donor_ino, __entry->donor_lblk,
__entry->m_len, __entry->move_len, __entry->move_type,
__entry->ret)
);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 9364e6775562..05a46908acd9 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -9,7 +9,7 @@
#include <uapi/linux/f2fs.h>
#define show_dev(dev) MAJOR(dev), MINOR(dev)
-#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino
+#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long long)entry->ino
TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
@@ -206,13 +206,13 @@ DECLARE_EVENT_CLASS(f2fs__inode,
TP_ARGS(inode),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(u64, pino)
+ __field(loff_t, size)
+ __field(blkcnt_t, blocks)
__field(dev_t, dev)
- __field(ino_t, ino)
- __field(ino_t, pino)
__field(umode_t, mode)
- __field(loff_t, size)
__field(unsigned int, nlink)
- __field(blkcnt_t, blocks)
__field(__u8, advise)
),
@@ -227,10 +227,10 @@ DECLARE_EVENT_CLASS(f2fs__inode,
__entry->advise = F2FS_I(inode)->i_advise;
),
- TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, "
+ TP_printk("dev = (%d,%d), ino = %llu, pino = %llu, i_mode = 0x%hx, "
"i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x",
show_dev_ino(__entry),
- (unsigned long)__entry->pino,
+ __entry->pino,
__entry->mode,
__entry->size,
(unsigned int)__entry->nlink,
@@ -245,8 +245,8 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit,
TP_ARGS(inode, ret),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(umode_t, mode)
__field(int, ret)
),
@@ -258,7 +258,7 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d",
+ TP_printk("dev = (%d,%d), ino = %llu, type: %s, mode = 0%o, ret = %d",
show_dev_ino(__entry),
show_inode_type(__entry->mode & S_IFMT),
__entry->mode & S_ALL_PERM,
@@ -279,8 +279,8 @@ TRACE_EVENT(f2fs_sync_file_exit,
TP_ARGS(inode, cp_reason, datasync, ret),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(int, cp_reason)
__field(int, datasync)
__field(int, ret)
@@ -294,7 +294,7 @@ TRACE_EVENT(f2fs_sync_file_exit,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, cp_reason: %s, "
+ TP_printk("dev = (%d,%d), ino = %llu, cp_reason: %s, "
"datasync = %d, ret = %d",
show_dev_ino(__entry),
show_fsync_cpreason(__entry->cp_reason),
@@ -361,10 +361,10 @@ TRACE_EVENT(f2fs_unlink_enter,
TP_ARGS(dir, dentry),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, size)
__field(blkcnt_t, blocks)
+ __field(dev_t, dev)
__string(name, dentry->d_name.name)
),
@@ -376,7 +376,7 @@ TRACE_EVENT(f2fs_unlink_enter,
__assign_str(name);
),
- TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
+ TP_printk("dev = (%d,%d), dir ino = %llu, i_size = %lld, "
"i_blocks = %llu, name = %s",
show_dev_ino(__entry),
__entry->size,
@@ -412,8 +412,8 @@ TRACE_EVENT(f2fs_truncate_data_blocks_range,
TP_ARGS(inode, nid, ofs, free),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(nid_t, nid)
__field(unsigned int, ofs)
__field(int, free)
@@ -427,7 +427,7 @@ TRACE_EVENT(f2fs_truncate_data_blocks_range,
__entry->free = free;
),
- TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d",
+ TP_printk("dev = (%d,%d), ino = %llu, nid = %u, offset = %u, freed = %d",
show_dev_ino(__entry),
(unsigned int)__entry->nid,
__entry->ofs,
@@ -441,11 +441,11 @@ DECLARE_EVENT_CLASS(f2fs__truncate_op,
TP_ARGS(inode, from),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, size)
__field(blkcnt_t, blocks)
__field(u64, from)
+ __field(dev_t, dev)
),
TP_fast_assign(
@@ -456,7 +456,7 @@ DECLARE_EVENT_CLASS(f2fs__truncate_op,
__entry->from = from;
),
- TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, "
+ TP_printk("dev = (%d,%d), ino = %llu, i_size = %lld, i_blocks = %llu, "
"start file offset = %llu",
show_dev_ino(__entry),
__entry->size,
@@ -499,8 +499,8 @@ DECLARE_EVENT_CLASS(f2fs__truncate_node,
TP_ARGS(inode, nid, blk_addr),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(nid_t, nid)
__field(block_t, blk_addr)
),
@@ -512,7 +512,7 @@ DECLARE_EVENT_CLASS(f2fs__truncate_node,
__entry->blk_addr = blk_addr;
),
- TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx",
+ TP_printk("dev = (%d,%d), ino = %llu, nid = %u, block_address = 0x%llx",
show_dev_ino(__entry),
(unsigned int)__entry->nid,
(unsigned long long)__entry->blk_addr)
@@ -546,8 +546,8 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TP_ARGS(inode, nid, depth, err),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__array(nid_t, nid, 3)
__field(int, depth)
__field(int, err)
@@ -563,7 +563,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err = err;
),
- TP_printk("dev = (%d,%d), ino = %lu, "
+ TP_printk("dev = (%d,%d), ino = %llu, "
"nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d",
show_dev_ino(__entry),
(unsigned int)__entry->nid[0],
@@ -581,11 +581,11 @@ TRACE_EVENT(f2fs_file_write_iter,
TP_ARGS(inode, offset, length, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, offset)
__field(size_t, length)
__field(ssize_t, ret)
+ __field(dev_t, dev)
),
TP_fast_assign(
@@ -596,7 +596,7 @@ TRACE_EVENT(f2fs_file_write_iter,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, "
+ TP_printk("dev = (%d,%d), ino = %llu, "
"offset = %lld, length = %zu, written(err) = %zd",
show_dev_ino(__entry),
__entry->offset,
@@ -611,11 +611,11 @@ TRACE_EVENT(f2fs_fadvise,
TP_ARGS(inode, offset, len, advice),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, size)
__field(loff_t, offset)
__field(loff_t, len)
+ __field(dev_t, dev)
__field(int, advice)
),
@@ -628,7 +628,7 @@ TRACE_EVENT(f2fs_fadvise,
__entry->advice = advice;
),
- TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld offset:%llu, len:%llu, advise:%d",
+ TP_printk("dev = (%d,%d), ino = %llu, i_size = %lld offset:%llu, len:%llu, advise:%d",
show_dev_ino(__entry),
(unsigned long long)__entry->size,
__entry->offset,
@@ -643,8 +643,8 @@ TRACE_EVENT(f2fs_map_blocks,
TP_ARGS(inode, map, flag, ret),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(block_t, m_lblk)
__field(block_t, m_pblk)
__field(unsigned int, m_len)
@@ -670,7 +670,7 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
+ TP_printk("dev = (%d,%d), ino = %llu, file offset = %llu, "
"start blkaddr = 0x%llx, len = 0x%llx, flags = %u, "
"seg_type = %d, may_create = %d, multidevice = %d, "
"flag = %d, err = %d",
@@ -885,8 +885,8 @@ TRACE_EVENT(f2fs_lookup_start,
TP_ARGS(dir, dentry, flags),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__string(name, dentry->d_name.name)
__field(unsigned int, flags)
),
@@ -898,7 +898,7 @@ TRACE_EVENT(f2fs_lookup_start,
__entry->flags = flags;
),
- TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
+ TP_printk("dev = (%d,%d), pino = %llu, name:%s, flags:%u",
show_dev_ino(__entry),
__get_str(name),
__entry->flags)
@@ -912,8 +912,8 @@ TRACE_EVENT(f2fs_lookup_end,
TP_ARGS(dir, dentry, ino, err),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__string(name, dentry->d_name.name)
__field(nid_t, cino)
__field(int, err)
@@ -927,7 +927,7 @@ TRACE_EVENT(f2fs_lookup_end,
__entry->err = err;
),
- TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
+ TP_printk("dev = (%d,%d), pino = %llu, name:%s, ino:%u, err:%d",
show_dev_ino(__entry),
__get_str(name),
__entry->cino,
@@ -943,10 +943,10 @@ TRACE_EVENT(f2fs_rename_start,
TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(u64, new_pino)
__field(dev_t, dev)
- __field(ino_t, ino)
__string(old_name, old_dentry->d_name.name)
- __field(ino_t, new_pino)
__string(new_name, new_dentry->d_name.name)
__field(unsigned int, flags)
),
@@ -960,8 +960,8 @@ TRACE_EVENT(f2fs_rename_start,
__entry->flags = flags;
),
- TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, "
- "new_dir = %lu, new_name: %s, flags = %u",
+ TP_printk("dev = (%d,%d), old_dir = %llu, old_name: %s, "
+ "new_dir = %llu, new_name: %s, flags = %u",
show_dev_ino(__entry),
__get_str(old_name),
__entry->new_pino,
@@ -977,8 +977,8 @@ TRACE_EVENT(f2fs_rename_end,
TP_ARGS(old_dentry, new_dentry, flags, ret),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__string(old_name, old_dentry->d_name.name)
__string(new_name, new_dentry->d_name.name)
__field(unsigned int, flags)
@@ -994,7 +994,7 @@ TRACE_EVENT(f2fs_rename_end,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, "
+ TP_printk("dev = (%d,%d), ino = %llu, old_name: %s, "
"new_name: %s, flags = %u, ret = %d",
show_dev_ino(__entry),
__get_str(old_name),
@@ -1010,10 +1010,10 @@ TRACE_EVENT(f2fs_readdir,
TP_ARGS(dir, start_pos, end_pos, err),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, start)
__field(loff_t, end)
+ __field(dev_t, dev)
__field(int, err)
),
@@ -1025,7 +1025,7 @@ TRACE_EVENT(f2fs_readdir,
__entry->err = err;
),
- TP_printk("dev = (%d,%d), ino = %lu, start_pos:%llu, end_pos:%llu, err:%d",
+ TP_printk("dev = (%d,%d), ino = %llu, start_pos:%llu, end_pos:%llu, err:%d",
show_dev_ino(__entry),
__entry->start,
__entry->end,
@@ -1040,13 +1040,13 @@ TRACE_EVENT(f2fs_fallocate,
TP_ARGS(inode, mode, offset, len, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(int, mode)
+ __field(u64, ino)
__field(loff_t, offset)
__field(loff_t, len)
__field(loff_t, size)
__field(blkcnt_t, blocks)
+ __field(dev_t, dev)
+ __field(int, mode)
__field(int, ret)
),
@@ -1061,7 +1061,7 @@ TRACE_EVENT(f2fs_fallocate,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, "
+ TP_printk("dev = (%d,%d), ino = %llu, mode = %x, offset = %lld, "
"len = %lld, i_size = %lld, i_blocks = %llu, ret = %d",
show_dev_ino(__entry),
__entry->mode,
@@ -1079,12 +1079,12 @@ TRACE_EVENT(f2fs_direct_IO_enter,
TP_ARGS(inode, iocb, len, rw),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, ki_pos)
+ __field(unsigned long, len)
+ __field(dev_t, dev)
__field(int, ki_flags)
__field(u16, ki_ioprio)
- __field(unsigned long, len)
__field(int, rw)
),
@@ -1098,7 +1098,7 @@ TRACE_EVENT(f2fs_direct_IO_enter,
__entry->rw = rw;
),
- TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
+ TP_printk("dev = (%d,%d), ino = %llu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
show_dev_ino(__entry),
__entry->ki_pos,
__entry->len,
@@ -1115,10 +1115,10 @@ TRACE_EVENT(f2fs_direct_IO_exit,
TP_ARGS(inode, offset, len, rw, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, pos)
__field(unsigned long, len)
+ __field(dev_t, dev)
__field(int, rw)
__field(int, ret)
),
@@ -1132,7 +1132,7 @@ TRACE_EVENT(f2fs_direct_IO_exit,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu "
+ TP_printk("dev = (%d,%d), ino = %llu pos = %lld len = %lu "
"rw = %d ret = %d",
show_dev_ino(__entry),
__entry->pos,
@@ -1176,9 +1176,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_folio_bio,
TP_ARGS(folio, fio),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, index)
+ __field(dev_t, dev)
__field(block_t, old_blkaddr)
__field(block_t, new_blkaddr)
__field(enum req_op, op)
@@ -1199,7 +1199,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_folio_bio,
__entry->type = fio->type;
),
- TP_printk("dev = (%d,%d), ino = %lu, folio_index = 0x%lx, "
+ TP_printk("dev = (%d,%d), ino = %llu, folio_index = 0x%lx, "
"oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
@@ -1306,9 +1306,9 @@ TRACE_EVENT(f2fs_write_begin,
TP_ARGS(inode, pos, len),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, pos)
+ __field(dev_t, dev)
__field(unsigned int, len)
),
@@ -1319,7 +1319,7 @@ TRACE_EVENT(f2fs_write_begin,
__entry->len = len;
),
- TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u",
+ TP_printk("dev = (%d,%d), ino = %llu, pos = %llu, len = %u",
show_dev_ino(__entry),
(unsigned long long)__entry->pos,
__entry->len)
@@ -1333,9 +1333,9 @@ TRACE_EVENT(f2fs_write_end,
TP_ARGS(inode, pos, len, copied),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, pos)
+ __field(dev_t, dev)
__field(unsigned int, len)
__field(unsigned int, copied)
),
@@ -1348,7 +1348,7 @@ TRACE_EVENT(f2fs_write_end,
__entry->copied = copied;
),
- TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, copied = %u",
+ TP_printk("dev = (%d,%d), ino = %llu, pos = %llu, len = %u, copied = %u",
show_dev_ino(__entry),
(unsigned long long)__entry->pos,
__entry->len,
@@ -1362,12 +1362,12 @@ DECLARE_EVENT_CLASS(f2fs__folio,
TP_ARGS(folio, type),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(pgoff_t, index)
+ __field(pgoff_t, nrpages)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(int, type)
__field(int, dir)
- __field(pgoff_t, index)
- __field(pgoff_t, nrpages)
__field(int, dirty)
__field(int, uptodate)
),
@@ -1383,7 +1383,7 @@ DECLARE_EVENT_CLASS(f2fs__folio,
__entry->uptodate = folio_test_uptodate(folio);
),
- TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, nr_pages = %lu, "
+ TP_printk("dev = (%d,%d), ino = %llu, %s, %s, index = %lu, nr_pages = %lu, "
"dirty = %d, uptodate = %d",
show_dev_ino(__entry),
show_block_type(__entry->type),
@@ -1437,10 +1437,10 @@ TRACE_EVENT(f2fs_replace_atomic_write_block,
TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(ino_t, cow_ino)
+ __field(u64, ino)
+ __field(u64, cow_ino)
__field(pgoff_t, index)
+ __field(dev_t, dev)
__field(block_t, old_addr)
__field(block_t, new_addr)
__field(bool, recovery)
@@ -1456,7 +1456,7 @@ TRACE_EVENT(f2fs_replace_atomic_write_block,
__entry->recovery = recovery;
),
- TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, "
+ TP_printk("dev = (%d,%d), ino = %llu, cow_ino = %llu, index = %lu, "
"old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d",
show_dev_ino(__entry),
__entry->cow_ino,
@@ -1474,10 +1474,10 @@ DECLARE_EVENT_CLASS(f2fs_mmap,
TP_ARGS(inode, index, flags, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, index)
__field(vm_flags_t, flags)
+ __field(dev_t, dev)
__field(vm_fault_t, ret)
),
@@ -1489,7 +1489,7 @@ DECLARE_EVENT_CLASS(f2fs_mmap,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s",
+ TP_printk("dev = (%d,%d), ino = %llu, index = %lu, flags: %s, ret: %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
__print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
@@ -1519,15 +1519,15 @@ TRACE_EVENT(f2fs_writepages,
TP_ARGS(inode, wbc, type),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(int, type)
- __field(int, dir)
- __field(long, nr_to_write)
- __field(long, pages_skipped)
+ __field(u64, ino)
__field(loff_t, range_start)
__field(loff_t, range_end)
+ __field(long, nr_to_write)
+ __field(long, pages_skipped)
__field(pgoff_t, writeback_index)
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(int, dir)
__field(int, sync_mode)
__field(char, for_kupdate)
__field(char, for_background)
@@ -1554,7 +1554,7 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_sync = wbc->for_sync;
),
- TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, "
+ TP_printk("dev = (%d,%d), ino = %llu, %s, %s, nr_to_write %ld, "
"skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, "
"kupdate %u background %u tagged %u cyclic %u sync %u",
show_dev_ino(__entry),
@@ -1580,9 +1580,9 @@ TRACE_EVENT(f2fs_readpages,
TP_ARGS(inode, start, nrpage),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, start)
+ __field(dev_t, dev)
__field(unsigned int, nrpage)
),
@@ -1593,7 +1593,7 @@ TRACE_EVENT(f2fs_readpages,
__entry->nrpage = nrpage;
),
- TP_printk("dev = (%d,%d), ino = %lu, start = %lu nrpage = %u",
+ TP_printk("dev = (%d,%d), ino = %llu, start = %lu nrpage = %u",
show_dev_ino(__entry),
(unsigned long)__entry->start,
__entry->nrpage)
@@ -1738,8 +1738,8 @@ TRACE_EVENT(f2fs_lookup_extent_tree_start,
TP_ARGS(inode, pgofs, type),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, pgofs)
__field(enum extent_type, type)
),
@@ -1751,7 +1751,7 @@ TRACE_EVENT(f2fs_lookup_extent_tree_start,
__entry->type = type;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s",
+ TP_printk("dev = (%d,%d), ino = %llu, pgofs = %u, type = %s",
show_dev_ino(__entry),
__entry->pgofs,
show_extent_type(__entry->type))
@@ -1767,8 +1767,8 @@ TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end,
TP_CONDITION(ei),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, pgofs)
__field(unsigned int, fofs)
__field(unsigned int, len)
@@ -1784,7 +1784,7 @@ TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end,
__entry->blk = ei->blk;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ TP_printk("dev = (%d,%d), ino = %llu, pgofs = %u, "
"read_ext_info(fofs: %u, len: %u, blk: %u)",
show_dev_ino(__entry),
__entry->pgofs,
@@ -1803,13 +1803,13 @@ TRACE_EVENT_CONDITION(f2fs_lookup_age_extent_tree_end,
TP_CONDITION(ei),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(unsigned long long, age)
+ __field(unsigned long long, blocks)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, pgofs)
__field(unsigned int, fofs)
__field(unsigned int, len)
- __field(unsigned long long, age)
- __field(unsigned long long, blocks)
),
TP_fast_assign(
@@ -1822,7 +1822,7 @@ TRACE_EVENT_CONDITION(f2fs_lookup_age_extent_tree_end,
__entry->blocks = ei->last_blocks;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ TP_printk("dev = (%d,%d), ino = %llu, pgofs = %u, "
"age_ext_info(fofs: %u, len: %u, age: %llu, blocks: %llu)",
show_dev_ino(__entry),
__entry->pgofs,
@@ -1841,8 +1841,8 @@ TRACE_EVENT(f2fs_update_read_extent_tree_range,
TP_ARGS(inode, pgofs, len, blkaddr, c_len),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, pgofs)
__field(u32, blk)
__field(unsigned int, len)
@@ -1858,7 +1858,7 @@ TRACE_EVENT(f2fs_update_read_extent_tree_range,
__entry->c_len = c_len;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ TP_printk("dev = (%d,%d), ino = %llu, pgofs = %u, "
"len = %u, blkaddr = %u, c_len = %u",
show_dev_ino(__entry),
__entry->pgofs,
@@ -1876,12 +1876,12 @@ TRACE_EVENT(f2fs_update_age_extent_tree_range,
TP_ARGS(inode, pgofs, len, age, last_blks),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(unsigned long long, age)
+ __field(unsigned long long, blocks)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, pgofs)
__field(unsigned int, len)
- __field(unsigned long long, age)
- __field(unsigned long long, blocks)
),
TP_fast_assign(
@@ -1893,7 +1893,7 @@ TRACE_EVENT(f2fs_update_age_extent_tree_range,
__entry->blocks = last_blks;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ TP_printk("dev = (%d,%d), ino = %llu, pgofs = %u, "
"len = %u, age = %llu, blocks = %llu",
show_dev_ino(__entry),
__entry->pgofs,
@@ -1938,8 +1938,8 @@ TRACE_EVENT(f2fs_destroy_extent_tree,
TP_ARGS(inode, node_cnt, type),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, node_cnt)
__field(enum extent_type, type)
),
@@ -1951,7 +1951,7 @@ TRACE_EVENT(f2fs_destroy_extent_tree,
__entry->type = type;
),
- TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s",
+ TP_printk("dev = (%d,%d), ino = %llu, destroyed: node_cnt = %u, type = %s",
show_dev_ino(__entry),
__entry->node_cnt,
show_extent_type(__entry->type))
@@ -2027,9 +2027,9 @@ DECLARE_EVENT_CLASS(f2fs_zip_start,
TP_ARGS(inode, cluster_idx, cluster_size, algtype),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, idx)
+ __field(dev_t, dev)
__field(unsigned int, size)
__field(unsigned int, algtype)
),
@@ -2042,7 +2042,7 @@ DECLARE_EVENT_CLASS(f2fs_zip_start,
__entry->algtype = algtype;
),
- TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+ TP_printk("dev = (%d,%d), ino = %llu, cluster_idx:%lu, "
"cluster_size = %u, algorithm = %s",
show_dev_ino(__entry),
__entry->idx,
@@ -2058,9 +2058,9 @@ DECLARE_EVENT_CLASS(f2fs_zip_end,
TP_ARGS(inode, cluster_idx, compressed_size, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, idx)
+ __field(dev_t, dev)
__field(unsigned int, size)
__field(unsigned int, ret)
),
@@ -2073,7 +2073,7 @@ DECLARE_EVENT_CLASS(f2fs_zip_end,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+ TP_printk("dev = (%d,%d), ino = %llu, cluster_idx:%lu, "
"compressed_size = %u, ret = %d",
show_dev_ino(__entry),
__entry->idx,
@@ -2311,10 +2311,10 @@ TRACE_EVENT(f2fs_bmap,
TP_ARGS(inode, lblock, pblock),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(sector_t, lblock)
__field(sector_t, pblock)
+ __field(dev_t, dev)
),
TP_fast_assign(
@@ -2324,7 +2324,7 @@ TRACE_EVENT(f2fs_bmap,
__entry->pblock = pblock;
),
- TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld",
+ TP_printk("dev = (%d,%d), ino = %llu, lblock:%lld, pblock:%lld",
show_dev_ino(__entry),
(unsigned long long)__entry->lblock,
(unsigned long long)__entry->pblock)
@@ -2338,11 +2338,11 @@ TRACE_EVENT(f2fs_fiemap,
TP_ARGS(inode, lblock, pblock, len, flags, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(sector_t, lblock)
__field(sector_t, pblock)
__field(unsigned long long, len)
+ __field(dev_t, dev)
__field(unsigned int, flags)
__field(int, ret)
),
@@ -2357,7 +2357,7 @@ TRACE_EVENT(f2fs_fiemap,
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, "
+ TP_printk("dev = (%d,%d), ino = %llu, lblock:%lld, pblock:%lld, "
"len:%llu, flags:%u, ret:%d",
show_dev_ino(__entry),
(unsigned long long)__entry->lblock,
@@ -2375,13 +2375,13 @@ DECLARE_EVENT_CLASS(f2fs__rw_start,
TP_ARGS(inode, offset, bytes, pid, pathname, command),
TP_STRUCT__entry(
- __string(pathbuf, pathname)
+ __field(u64, ino)
__field(loff_t, offset)
- __field(int, bytes)
__field(loff_t, i_size)
+ __string(pathbuf, pathname)
__string(cmdline, command)
__field(pid_t, pid)
- __field(ino_t, ino)
+ __field(int, bytes)
),
TP_fast_assign(
@@ -2402,10 +2402,10 @@ DECLARE_EVENT_CLASS(f2fs__rw_start,
),
TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
- " pid %d, i_size %llu, ino %lu",
+ " pid %d, i_size %llu, ino %llu",
__get_str(pathbuf), __entry->offset, __entry->bytes,
__get_str(cmdline), __entry->pid, __entry->i_size,
- (unsigned long) __entry->ino)
+ __entry->ino)
);
DECLARE_EVENT_CLASS(f2fs__rw_end,
@@ -2415,7 +2415,7 @@ DECLARE_EVENT_CLASS(f2fs__rw_end,
TP_ARGS(inode, offset, bytes),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(loff_t, offset)
__field(int, bytes)
),
@@ -2426,8 +2426,8 @@ DECLARE_EVENT_CLASS(f2fs__rw_end,
__entry->bytes = bytes;
),
- TP_printk("ino %lu, offset %llu, bytes %d",
- (unsigned long) __entry->ino,
+ TP_printk("ino %llu, offset %llu, bytes %d",
+ __entry->ino,
__entry->offset, __entry->bytes)
);
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 370016c38a5b..116774886244 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -42,10 +42,10 @@ TRACE_EVENT(locks_get_lock_context,
TP_ARGS(inode, type, ctx),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
+ __field(struct file_lock_context *, ctx)
__field(dev_t, s_dev)
__field(unsigned char, type)
- __field(struct file_lock_context *, ctx)
),
TP_fast_assign(
@@ -55,7 +55,7 @@ TRACE_EVENT(locks_get_lock_context,
__entry->ctx = ctx;
),
- TP_printk("dev=0x%x:0x%x ino=0x%lx type=%s ctx=%p",
+ TP_printk("dev=0x%x:0x%x ino=0x%llx type=%s ctx=%p",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, show_fl_type(__entry->type), __entry->ctx)
);
@@ -66,16 +66,16 @@ DECLARE_EVENT_CLASS(filelock_lock,
TP_ARGS(inode, fl, ret),
TP_STRUCT__entry(
+ __field(u64, i_ino)
+ __field(loff_t, fl_start)
+ __field(loff_t, fl_end)
__field(struct file_lock *, fl)
- __field(unsigned long, i_ino)
- __field(dev_t, s_dev)
__field(struct file_lock_core *, blocker)
__field(fl_owner_t, owner)
+ __field(dev_t, s_dev)
__field(unsigned int, pid)
__field(unsigned int, flags)
__field(unsigned char, type)
- __field(loff_t, fl_start)
- __field(loff_t, fl_end)
__field(int, ret)
),
@@ -93,7 +93,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->ret = ret;
),
- TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
+ TP_printk("fl=%p dev=0x%x:0x%x ino=0x%llx fl_blocker=%p fl_owner=%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->blocker, __entry->owner,
__entry->pid, show_fl_flags(__entry->flags),
@@ -123,15 +123,15 @@ DECLARE_EVENT_CLASS(filelock_lease,
TP_ARGS(inode, fl),
TP_STRUCT__entry(
+ __field(u64, i_ino)
__field(struct file_lease *, fl)
- __field(unsigned long, i_ino)
- __field(dev_t, s_dev)
__field(struct file_lock_core *, blocker)
__field(fl_owner_t, owner)
- __field(unsigned int, flags)
- __field(unsigned char, type)
__field(unsigned long, break_time)
__field(unsigned long, downgrade_time)
+ __field(dev_t, s_dev)
+ __field(unsigned int, flags)
+ __field(unsigned char, type)
),
TP_fast_assign(
@@ -146,7 +146,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
- TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
+ TP_printk("fl=%p dev=0x%x:0x%x ino=0x%llx fl_blocker=%p fl_owner=%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->blocker, __entry->owner,
show_fl_flags(__entry->flags),
@@ -175,12 +175,12 @@ TRACE_EVENT(generic_add_lease,
TP_ARGS(inode, fl),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
+ __field(fl_owner_t, owner)
+ __field(dev_t, s_dev)
__field(int, wcount)
__field(int, rcount)
__field(int, icount)
- __field(dev_t, s_dev)
- __field(fl_owner_t, owner)
__field(unsigned int, flags)
__field(unsigned char, type)
),
@@ -196,7 +196,7 @@ TRACE_EVENT(generic_add_lease,
__entry->type = fl->c.flc_type;
),
- TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s",
+ TP_printk("dev=0x%x:0x%x ino=0x%llx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->wcount, __entry->rcount,
__entry->icount, __entry->owner,
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index f48fe637bfd2..4dcf8e9e2e0d 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -20,8 +20,8 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_ARGS(folio),
TP_STRUCT__entry(
+ __field(u64, i_ino)
__field(unsigned long, pfn)
- __field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
__field(unsigned char, order)
@@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
__entry->order = folio_order(folio);
),
- TP_printk("dev %d:%d ino %lx pfn=0x%lx ofs=%lu order=%u",
+ TP_printk("dev %d:%d ino %llx pfn=0x%lx ofs=%lu order=%u",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
__entry->pfn,
@@ -67,7 +67,7 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
TP_ARGS(mapping, index, last_index),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
__field(dev_t, s_dev)
__field(unsigned long, index)
__field(unsigned long, last_index)
@@ -85,7 +85,7 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
),
TP_printk(
- "dev=%d:%d ino=%lx ofs=%lld-%lld",
+ "dev=%d:%d ino=%llx ofs=%lld-%lld",
MAJOR(__entry->s_dev),
MINOR(__entry->s_dev), __entry->i_ino,
((loff_t)__entry->index) << PAGE_SHIFT,
@@ -117,7 +117,7 @@ TRACE_EVENT(mm_filemap_fault,
TP_ARGS(mapping, index),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
__field(dev_t, s_dev)
__field(unsigned long, index)
),
@@ -133,7 +133,7 @@ TRACE_EVENT(mm_filemap_fault,
),
TP_printk(
- "dev=%d:%d ino=%lx ofs=%lld",
+ "dev=%d:%d ino=%llx ofs=%lld",
MAJOR(__entry->s_dev),
MINOR(__entry->s_dev), __entry->i_ino,
((loff_t)__entry->index) << PAGE_SHIFT
@@ -146,7 +146,7 @@ TRACE_EVENT(filemap_set_wb_err,
TP_ARGS(mapping, eseq),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
__field(dev_t, s_dev)
__field(errseq_t, errseq)
),
@@ -160,7 +160,7 @@ TRACE_EVENT(filemap_set_wb_err,
__entry->s_dev = mapping->host->i_rdev;
),
- TP_printk("dev=%d:%d ino=0x%lx errseq=0x%x",
+ TP_printk("dev=%d:%d ino=0x%llx errseq=0x%x",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->errseq)
);
@@ -171,8 +171,8 @@ TRACE_EVENT(file_check_and_advance_wb_err,
TP_ARGS(file, old),
TP_STRUCT__entry(
+ __field(u64, i_ino)
__field(struct file *, file)
- __field(unsigned long, i_ino)
__field(dev_t, s_dev)
__field(errseq_t, old)
__field(errseq_t, new)
@@ -191,7 +191,7 @@ TRACE_EVENT(file_check_and_advance_wb_err,
__entry->new = file->f_wb_err;
),
- TP_printk("file=%p dev=%d:%d ino=0x%lx old=0x%x new=0x%x",
+ TP_printk("file=%p dev=%d:%d ino=0x%llx old=0x%x new=0x%x",
__entry->file, MAJOR(__entry->s_dev),
MINOR(__entry->s_dev), __entry->i_ino, __entry->old,
__entry->new)
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 50ebc1290ab0..11121baa8ece 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -12,7 +12,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
pgoff_t max_pgoff, int result),
TP_ARGS(inode, vmf, max_pgoff, result),
TP_STRUCT__entry(
- __field(unsigned long, ino)
+ __field(u64, ino)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
__field(vm_flags_t, vm_flags)
@@ -35,7 +35,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
__entry->max_pgoff = max_pgoff;
__entry->result = result;
),
- TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start "
+ TP_printk("dev %d:%d ino %#llx %s %s address %#lx vm_start "
"%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s",
MAJOR(__entry->dev),
MINOR(__entry->dev),
@@ -66,7 +66,7 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
void *radix_entry),
TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry(
- __field(unsigned long, ino)
+ __field(u64, ino)
__field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(struct folio *, zero_folio)
@@ -81,7 +81,7 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
__entry->zero_folio = zero_folio;
__entry->radix_entry = radix_entry;
),
- TP_printk("dev %d:%d ino %#lx %s address %#lx zero_folio %p "
+ TP_printk("dev %d:%d ino %#llx %s address %#lx zero_folio %p "
"radix_entry %#lx",
MAJOR(__entry->dev),
MINOR(__entry->dev),
@@ -106,7 +106,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
TP_ARGS(inode, vmf, result),
TP_STRUCT__entry(
- __field(unsigned long, ino)
+ __field(u64, ino)
__field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
@@ -123,7 +123,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class,
__entry->pgoff = vmf->pgoff;
__entry->result = result;
),
- TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
+ TP_printk("dev %d:%d ino %#llx %s %s address %#lx pgoff %#lx %s",
MAJOR(__entry->dev),
MINOR(__entry->dev),
__entry->ino,
@@ -150,7 +150,7 @@ DECLARE_EVENT_CLASS(dax_writeback_range_class,
TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
TP_ARGS(inode, start_index, end_index),
TP_STRUCT__entry(
- __field(unsigned long, ino)
+ __field(u64, ino)
__field(pgoff_t, start_index)
__field(pgoff_t, end_index)
__field(dev_t, dev)
@@ -161,7 +161,7 @@ DECLARE_EVENT_CLASS(dax_writeback_range_class,
__entry->start_index = start_index;
__entry->end_index = end_index;
),
- TP_printk("dev %d:%d ino %#lx pgoff %#lx-%#lx",
+ TP_printk("dev %d:%d ino %#llx pgoff %#lx-%#lx",
MAJOR(__entry->dev),
MINOR(__entry->dev),
__entry->ino,
@@ -182,7 +182,7 @@ TRACE_EVENT(dax_writeback_one,
TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen),
TP_ARGS(inode, pgoff, pglen),
TP_STRUCT__entry(
- __field(unsigned long, ino)
+ __field(u64, ino)
__field(pgoff_t, pgoff)
__field(pgoff_t, pglen)
__field(dev_t, dev)
@@ -193,7 +193,7 @@ TRACE_EVENT(dax_writeback_one,
__entry->pgoff = pgoff;
__entry->pglen = pglen;
),
- TP_printk("dev %d:%d ino %#lx pgoff %#lx pglen %#lx",
+ TP_printk("dev %d:%d ino %#llx pgoff %#lx pglen %#lx",
MAJOR(__entry->dev),
MINOR(__entry->dev),
__entry->ino,
diff --git a/include/trace/events/fsverity.h b/include/trace/events/fsverity.h
index a8c52f21cbd5..4477c17e0574 100644
--- a/include/trace/events/fsverity.h
+++ b/include/trace/events/fsverity.h
@@ -16,7 +16,7 @@ TRACE_EVENT(fsverity_enable,
const struct merkle_tree_params *params),
TP_ARGS(inode, params),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(u64, data_size)
__field(u64, tree_size)
__field(unsigned int, merkle_block)
@@ -29,8 +29,8 @@ TRACE_EVENT(fsverity_enable,
__entry->merkle_block = params->block_size;
__entry->num_levels = params->num_levels;
),
- TP_printk("ino %lu data_size %llu tree_size %llu merkle_block %u levels %u",
- (unsigned long) __entry->ino,
+ TP_printk("ino %llu data_size %llu tree_size %llu merkle_block %u levels %u",
+ __entry->ino,
__entry->data_size,
__entry->tree_size,
__entry->merkle_block,
@@ -42,7 +42,7 @@ TRACE_EVENT(fsverity_tree_done,
const struct merkle_tree_params *params),
TP_ARGS(inode, vi, params),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(u64, data_size)
__field(u64, tree_size)
__field(unsigned int, merkle_block)
@@ -59,8 +59,8 @@ TRACE_EVENT(fsverity_tree_done,
memcpy(__get_dynamic_array(root_hash), vi->root_hash, __get_dynamic_array_len(root_hash));
memcpy(__get_dynamic_array(file_digest), vi->file_digest, __get_dynamic_array_len(file_digest));
),
- TP_printk("ino %lu data_size %llu tree_size %lld merkle_block %u levels %u root_hash %s digest %s",
- (unsigned long) __entry->ino,
+ TP_printk("ino %llu data_size %llu tree_size %lld merkle_block %u levels %u root_hash %s digest %s",
+ __entry->ino,
__entry->data_size,
__entry->tree_size,
__entry->merkle_block,
@@ -75,7 +75,7 @@ TRACE_EVENT(fsverity_verify_data_block,
u64 data_pos),
TP_ARGS(inode, params, data_pos),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(u64, data_pos)
__field(unsigned int, merkle_block)
),
@@ -84,8 +84,8 @@ TRACE_EVENT(fsverity_verify_data_block,
__entry->data_pos = data_pos;
__entry->merkle_block = params->block_size;
),
- TP_printk("ino %lu data_pos %llu merkle_block %u",
- (unsigned long) __entry->ino,
+ TP_printk("ino %llu data_pos %llu merkle_block %u",
+ __entry->ino,
__entry->data_pos,
__entry->merkle_block)
);
@@ -96,7 +96,7 @@ TRACE_EVENT(fsverity_merkle_hit,
unsigned int hidx),
TP_ARGS(inode, data_pos, hblock_idx, level, hidx),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(u64, data_pos)
__field(unsigned long, hblock_idx)
__field(unsigned int, level)
@@ -109,8 +109,8 @@ TRACE_EVENT(fsverity_merkle_hit,
__entry->level = level;
__entry->hidx = hidx;
),
- TP_printk("ino %lu data_pos %llu hblock_idx %lu level %u hidx %u",
- (unsigned long) __entry->ino,
+ TP_printk("ino %llu data_pos %llu hblock_idx %lu level %u hidx %u",
+ __entry->ino,
__entry->data_pos,
__entry->hblock_idx,
__entry->level,
@@ -122,7 +122,7 @@ TRACE_EVENT(fsverity_verify_merkle_block,
unsigned int level, unsigned int hidx),
TP_ARGS(inode, hblock_idx, level, hidx),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(unsigned long, hblock_idx)
__field(unsigned int, level)
__field(unsigned int, hidx)
@@ -133,8 +133,8 @@ TRACE_EVENT(fsverity_verify_merkle_block,
__entry->level = level;
__entry->hidx = hidx;
),
- TP_printk("ino %lu hblock_idx %lu level %u hidx %u",
- (unsigned long) __entry->ino,
+ TP_printk("ino %llu hblock_idx %lu level %u hidx %u",
+ __entry->ino,
__entry->hblock_idx,
__entry->level,
__entry->hidx)
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 4e41bff31888..bcdc57eea270 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -25,6 +25,7 @@
EM( SCAN_PAGE_LRU, "page_not_in_lru") \
EM( SCAN_PAGE_LOCK, "page_locked") \
EM( SCAN_PAGE_ANON, "page_not_anon") \
+ EM( SCAN_PAGE_LAZYFREE, "page_lazyfree") \
EM( SCAN_PAGE_COMPOUND, "page_compound") \
EM( SCAN_ANY_PROCESS, "no_process_for_page") \
EM( SCAN_VMA_NULL, "vma_null") \
@@ -237,5 +238,30 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
__print_symbolic(__entry->result, SCAN_STATUS))
);
+TRACE_EVENT(mm_khugepaged_scan,
+
+ TP_PROTO(struct mm_struct *mm, unsigned int progress,
+ bool full_scan_finished),
+
+ TP_ARGS(mm, progress, full_scan_finished),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned int, progress)
+ __field(bool, full_scan_finished)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->progress = progress;
+ __entry->full_scan_finished = full_scan_finished;
+ ),
+
+ TP_printk("mm=%p, progress=%u, full_scan_finished=%d",
+ __entry->mm,
+ __entry->progress,
+ __entry->full_scan_finished)
+);
+
#endif /* __HUGE_MEMORY_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h
index 59605dfaeeb4..8ba72c1d4f4d 100644
--- a/include/trace/events/hugetlbfs.h
+++ b/include/trace/events/hugetlbfs.h
@@ -14,9 +14,9 @@ TRACE_EVENT(hugetlbfs_alloc_inode,
TP_ARGS(inode, dir, mode),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(u64, dir)
__field(dev_t, dev)
- __field(ino_t, ino)
- __field(ino_t, dir)
__field(__u16, mode)
),
@@ -27,10 +27,10 @@ TRACE_EVENT(hugetlbfs_alloc_inode,
__entry->mode = mode;
),
- TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ TP_printk("dev %d,%d ino %llu dir %llu mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino,
- (unsigned long) __entry->dir, __entry->mode)
+ __entry->ino,
+ __entry->dir, __entry->mode)
);
DECLARE_EVENT_CLASS(hugetlbfs__inode,
@@ -40,13 +40,13 @@ DECLARE_EVENT_CLASS(hugetlbfs__inode,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(__u16, mode)
+ __field(u64, ino)
__field(loff_t, size)
+ __field(blkcnt_t, blocks)
+ __field(dev_t, dev)
__field(unsigned int, nlink)
__field(unsigned int, seals)
- __field(blkcnt_t, blocks)
+ __field(__u16, mode)
),
TP_fast_assign(
@@ -59,8 +59,8 @@ DECLARE_EVENT_CLASS(hugetlbfs__inode,
__entry->blocks = inode->i_blocks;
),
- TP_printk("dev %d,%d ino %lu mode 0%o size %lld nlink %u seals %u blocks %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %llu mode 0%o size %lld nlink %u seals %u blocks %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
__entry->mode, __entry->size, __entry->nlink, __entry->seals,
(unsigned long long)__entry->blocks)
);
@@ -87,14 +87,14 @@ TRACE_EVENT(hugetlbfs_setattr,
TP_ARGS(inode, dentry, attr),
TP_STRUCT__entry(
+ __field(u64, ino)
+ __field(loff_t, old_size)
+ __field(loff_t, ia_size)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(unsigned int, d_len)
__string(d_name, dentry->d_name.name)
__field(unsigned int, ia_valid)
__field(unsigned int, ia_mode)
- __field(loff_t, old_size)
- __field(loff_t, ia_size)
),
TP_fast_assign(
@@ -108,8 +108,8 @@ TRACE_EVENT(hugetlbfs_setattr,
__entry->ia_size = attr->ia_size;
),
- TP_printk("dev %d,%d ino %lu name %.*s valid %#x mode 0%o old_size %lld size %lld",
- MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino,
+ TP_printk("dev %d,%d ino %llu name %.*s valid %#x mode 0%o old_size %lld size %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
__entry->d_len, __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
__entry->old_size, __entry->ia_size)
);
@@ -122,12 +122,12 @@ TRACE_EVENT(hugetlbfs_fallocate,
TP_ARGS(inode, mode, offset, len, ret),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(int, mode)
+ __field(u64, ino)
__field(loff_t, offset)
__field(loff_t, len)
__field(loff_t, size)
+ __field(dev_t, dev)
+ __field(int, mode)
__field(int, ret)
),
@@ -141,9 +141,9 @@ TRACE_EVENT(hugetlbfs_fallocate,
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu mode 0%o offset %lld len %lld size %lld ret %d",
+ TP_printk("dev %d,%d ino %llu mode 0%o offset %lld len %lld size %lld ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long)__entry->ino, __entry->mode,
+ __entry->ino, __entry->mode,
(unsigned long long)__entry->offset,
(unsigned long long)__entry->len,
(unsigned long long)__entry->size,
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h
index 9912f0ded81d..fae4f8eac411 100644
--- a/include/trace/events/ipi.h
+++ b/include/trace/events/ipi.h
@@ -68,16 +68,16 @@ TRACE_EVENT(ipi_raise,
TP_ARGS(mask, reason),
TP_STRUCT__entry(
- __bitmask(target_cpus, nr_cpumask_bits)
+ __cpumask(target_cpus)
__field(const char *, reason)
),
TP_fast_assign(
- __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
+ __assign_cpumask(target_cpus, cpumask_bits(mask));
__entry->reason = reason;
),
- TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
+ TP_printk("target_mask=%s (%s)", __get_cpumask(target_cpus), __entry->reason)
);
DECLARE_EVENT_CLASS(ipi_handler,
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 269d949b2025..04521acba483 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -219,7 +219,7 @@ TRACE_EVENT(mptcp_rcvbuf_grow,
__be32 *p32;
__entry->time = time;
- __entry->rtt_us = msk->rcvq_space.rtt_us >> 3;
+ __entry->rtt_us = mptcp_rtt_us_est(msk) >> 3;
__entry->copied = msk->rcvq_space.copied;
__entry->inq = mptcp_inq_hint(sk);
__entry->space = msk->rcvq_space.space;
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index cbe28211106c..8c936fc575d5 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -300,7 +300,7 @@ TRACE_EVENT(netfs_read,
__field(loff_t, start)
__field(size_t, len)
__field(enum netfs_read_trace, what)
- __field(unsigned int, netfs_inode)
+ __field(u64, netfs_inode)
),
TP_fast_assign(
@@ -313,7 +313,7 @@ TRACE_EVENT(netfs_read,
__entry->netfs_inode = rreq->inode->i_ino;
),
- TP_printk("R=%08x %s c=%08x ni=%x s=%llx l=%zx sz=%llx",
+ TP_printk("R=%08x %s c=%08x ni=%llx s=%llx l=%zx sz=%llx",
__entry->rreq,
__print_symbolic(__entry->what, netfs_read_traces),
__entry->cookie,
@@ -486,7 +486,7 @@ TRACE_EVENT(netfs_folio,
TP_ARGS(folio, why),
TP_STRUCT__entry(
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, index)
__field(unsigned int, nr)
__field(enum netfs_folio_trace, why)
@@ -500,7 +500,7 @@ TRACE_EVENT(netfs_folio,
__entry->nr = folio_nr_pages(folio);
),
- TP_printk("i=%05lx ix=%05lx-%05lx %s",
+ TP_printk("i=%05llx ix=%05lx-%05lx %s",
__entry->ino, __entry->index, __entry->index + __entry->nr - 1,
__print_symbolic(__entry->why, netfs_folio_traces))
);
diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h
index 8880c11733dd..49efbd209585 100644
--- a/include/trace/events/nilfs2.h
+++ b/include/trace/events/nilfs2.h
@@ -165,14 +165,14 @@ TRACE_EVENT(nilfs2_segment_usage_freed,
TRACE_EVENT(nilfs2_mdt_insert_new_block,
TP_PROTO(struct inode *inode,
- unsigned long ino,
+ u64 ino,
unsigned long block),
TP_ARGS(inode, ino, block),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(struct inode *, inode)
- __field(unsigned long, ino)
__field(unsigned long, block)
),
@@ -182,7 +182,7 @@ TRACE_EVENT(nilfs2_mdt_insert_new_block,
__entry->block = block;
),
- TP_printk("inode = %p ino = %lu block = %lu",
+ TP_printk("inode = %p ino = %llu block = %lu",
__entry->inode,
__entry->ino,
__entry->block)
@@ -190,15 +190,15 @@ TRACE_EVENT(nilfs2_mdt_insert_new_block,
TRACE_EVENT(nilfs2_mdt_submit_block,
TP_PROTO(struct inode *inode,
- unsigned long ino,
+ u64 ino,
unsigned long blkoff,
enum req_op mode),
TP_ARGS(inode, ino, blkoff, mode),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(struct inode *, inode)
- __field(unsigned long, ino)
__field(unsigned long, blkoff)
/*
* Use field_struct() to avoid is_signed_type() on the
@@ -214,7 +214,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
__entry->mode = mode;
),
- TP_printk("inode = %p ino = %lu blkoff = %lu mode = %x",
+ TP_printk("inode = %p ino = %llu blkoff = %lu mode = %x",
__entry->inode,
__entry->ino,
__entry->blkoff,
diff --git a/include/trace/events/pci_controller.h b/include/trace/events/pci_controller.h
new file mode 100644
index 000000000000..a4b387cf52a6
--- /dev/null
+++ b/include/trace/events/pci_controller.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pci_controller
+
+#if !defined(_TRACE_HW_EVENT_PCI_CONTROLLER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HW_EVENT_PCI_CONTROLLER_H
+
+#include <uapi/linux/pci_regs.h>
+#include <linux/tracepoint.h>
+
+#define RATE \
+ EM(PCIE_SPEED_2_5GT, "2.5 GT/s") \
+ EM(PCIE_SPEED_5_0GT, "5.0 GT/s") \
+ EM(PCIE_SPEED_8_0GT, "8.0 GT/s") \
+ EM(PCIE_SPEED_16_0GT, "16.0 GT/s") \
+ EM(PCIE_SPEED_32_0GT, "32.0 GT/s") \
+ EM(PCIE_SPEED_64_0GT, "64.0 GT/s") \
+ EMe(PCI_SPEED_UNKNOWN, "Unknown")
+
+
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+RATE
+
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+TRACE_EVENT(pcie_ltssm_state_transition,
+ TP_PROTO(const char *dev_name, const char *state, u32 rate),
+ TP_ARGS(dev_name, state, rate),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __string(state, state)
+ __field(u32, rate)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(state);
+ __entry->rate = rate;
+ ),
+
+ TP_printk("dev: %s state: %s rate: %s",
+ __get_str(dev_name), __get_str(state),
+ __print_symbolic(__entry->rate, RATE)
+ )
+);
+
+#endif /* _TRACE_HW_EVENT_PCI_CONTROLLER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index ff33f41a9db7..d8a5c2677470 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -74,6 +74,57 @@ TRACE_EVENT(qdisc_enqueue,
__entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
);
+#undef FN
+#undef FNe
+#define FN(reason) TRACE_DEFINE_ENUM(QDISC_DROP_##reason);
+#define FNe(reason) TRACE_DEFINE_ENUM(QDISC_DROP_##reason);
+DEFINE_QDISC_DROP_REASON(FN, FNe)
+
+#undef FN
+#undef FNe
+#define FN(reason) { QDISC_DROP_##reason, #reason },
+#define FNe(reason) { QDISC_DROP_##reason, #reason }
+
+TRACE_EVENT(qdisc_drop,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq,
+ struct net_device *dev, struct sk_buff *skb,
+ enum qdisc_drop_reason reason),
+
+ TP_ARGS(qdisc, txq, dev, skb, reason),
+
+ TP_STRUCT__entry(
+ __field(struct Qdisc *, qdisc)
+ __field(const struct netdev_queue *, txq)
+ __field(void *, skbaddr)
+ __field(int, ifindex)
+ __field(u32, handle)
+ __field(u32, parent)
+ __field(enum qdisc_drop_reason, reason)
+ __string(kind, qdisc->ops->id)
+ ),
+
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->txq = txq;
+ __entry->skbaddr = skb;
+ __entry->ifindex = dev ? dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ __entry->reason = reason;
+ __assign_str(kind);
+ ),
+
+ TP_printk("drop ifindex=%d kind=%s handle=0x%X parent=0x%X skbaddr=%p reason=%s",
+ __entry->ifindex, __get_str(kind), __entry->handle,
+ __entry->parent, __entry->skbaddr,
+ __print_symbolic(__entry->reason,
+ DEFINE_QDISC_DROP_REASON(FN, FNe)))
+);
+
+#undef FN
+#undef FNe
+
TRACE_EVENT(qdisc_reset,
TP_PROTO(struct Qdisc *q),
diff --git a/include/trace/events/readahead.h b/include/trace/events/readahead.h
index 0997ac5eceab..087f171e2b02 100644
--- a/include/trace/events/readahead.h
+++ b/include/trace/events/readahead.h
@@ -18,7 +18,7 @@ TRACE_EVENT(page_cache_ra_unbounded,
TP_ARGS(inode, index, nr_to_read, lookahead_size),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
__field(dev_t, s_dev)
__field(pgoff_t, index)
__field(unsigned long, nr_to_read)
@@ -34,7 +34,7 @@ TRACE_EVENT(page_cache_ra_unbounded,
),
TP_printk(
- "dev=%d:%d ino=%lx index=%lu nr_to_read=%lu lookahead_size=%lu",
+ "dev=%d:%d ino=%llx index=%lu nr_to_read=%lu lookahead_size=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
__entry->index, __entry->nr_to_read, __entry->lookahead_size
)
@@ -46,7 +46,7 @@ TRACE_EVENT(page_cache_ra_order,
TP_ARGS(inode, index, ra),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
+ __field(u64, i_ino)
__field(dev_t, s_dev)
__field(pgoff_t, index)
__field(unsigned int, order)
@@ -66,7 +66,7 @@ TRACE_EVENT(page_cache_ra_order,
),
TP_printk(
- "dev=%d:%d ino=%lx index=%lu order=%u size=%u async_size=%u ra_pages=%u",
+ "dev=%d:%d ino=%llx index=%lu order=%u size=%u async_size=%u ra_pages=%u",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
__entry->index, __entry->order, __entry->size,
__entry->async_size, __entry->ra_pages
@@ -80,16 +80,16 @@ DECLARE_EVENT_CLASS(page_cache_ra_op,
TP_ARGS(inode, index, ra, req_count),
TP_STRUCT__entry(
- __field(unsigned long, i_ino)
- __field(dev_t, s_dev)
+ __field(u64, i_ino)
+ __field(loff_t, prev_pos)
__field(pgoff_t, index)
+ __field(unsigned long, req_count)
+ __field(dev_t, s_dev)
__field(unsigned int, order)
__field(unsigned int, size)
__field(unsigned int, async_size)
__field(unsigned int, ra_pages)
__field(unsigned int, mmap_miss)
- __field(loff_t, prev_pos)
- __field(unsigned long, req_count)
),
TP_fast_assign(
@@ -106,7 +106,7 @@ DECLARE_EVENT_CLASS(page_cache_ra_op,
),
TP_printk(
- "dev=%d:%d ino=%lx index=%lu req_count=%lu order=%u size=%u async_size=%u ra_pages=%u mmap_miss=%u prev_pos=%lld",
+ "dev=%d:%d ino=%llx index=%lu req_count=%lu order=%u size=%u async_size=%u ra_pages=%u mmap_miss=%u prev_pos=%lld",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
__entry->index, __entry->req_count, __entry->order,
__entry->size, __entry->async_size, __entry->ra_pages,
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 869f97c9bf73..578b8038b211 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -185,6 +185,7 @@
EM(rxrpc_skb_put_input, "PUT input ") \
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
EM(rxrpc_skb_put_oob, "PUT oob ") \
+ EM(rxrpc_skb_put_old_response, "PUT old-resp ") \
EM(rxrpc_skb_put_purge, "PUT purge ") \
EM(rxrpc_skb_put_purge_oob, "PUT purge-oob") \
EM(rxrpc_skb_put_response, "PUT response ") \
@@ -347,7 +348,7 @@
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
- E_(rxrpc_call_see_zap, "SEE zap ")
+ E_(rxrpc_call_see_still_live, "SEE !still-l")
#define rxrpc_txqueue_traces \
EM(rxrpc_txqueue_await_reply, "AWR") \
@@ -520,6 +521,7 @@
#define rxrpc_req_ack_traces \
EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \
EM(rxrpc_reqack_app_stall, "APP-STALL ") \
+ EM(rxrpc_reqack_jumbo_win, "JUMBO-WIN ") \
EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \
EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \
EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b50e78..535860581f15 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -896,6 +896,32 @@ DECLARE_TRACE(sched_set_need_resched,
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
TP_ARGS(tsk, cpu, tif));
+#define DL_OTHER 0
+#define DL_TASK 1
+#define DL_SERVER_FAIR 2
+#define DL_SERVER_EXT 3
+
+DECLARE_TRACE(sched_dl_throttle,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_replenish,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+/* Call to update_curr_dl_se not involving throttle or replenish */
+DECLARE_TRACE(sched_dl_update,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_server_start,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_server_stop,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 1641ae3e6ca0..07cbb9836b91 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -218,12 +218,13 @@ TRACE_EVENT(hrtimer_setup,
* hrtimer_start - called when the hrtimer is started
* @hrtimer: pointer to struct hrtimer
* @mode: the hrtimers mode
+ * @was_armed: Was armed when hrtimer_start*() was invoked
*/
TRACE_EVENT(hrtimer_start,
- TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
+ TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode, bool was_armed),
- TP_ARGS(hrtimer, mode),
+ TP_ARGS(hrtimer, mode, was_armed),
TP_STRUCT__entry(
__field( void *, hrtimer )
@@ -231,6 +232,7 @@ TRACE_EVENT(hrtimer_start,
__field( s64, expires )
__field( s64, softexpires )
__field( enum hrtimer_mode, mode )
+ __field( bool, was_armed )
),
TP_fast_assign(
@@ -239,26 +241,26 @@ TRACE_EVENT(hrtimer_start,
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
__entry->mode = mode;
+ __entry->was_armed = was_armed;
),
TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu "
- "mode=%s", __entry->hrtimer, __entry->function,
+ "mode=%s was_armed=%d", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
(unsigned long long) __entry->softexpires,
- decode_hrtimer_mode(__entry->mode))
+ decode_hrtimer_mode(__entry->mode), __entry->was_armed)
);
/**
* hrtimer_expire_entry - called immediately before the hrtimer callback
* @hrtimer: pointer to struct hrtimer
- * @now: pointer to variable which contains current time of the
- * timers base.
+ * @now: variable which contains current time of the timers base.
*
* Allows to determine the timer latency.
*/
TRACE_EVENT(hrtimer_expire_entry,
- TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
+ TP_PROTO(struct hrtimer *hrtimer, ktime_t now),
TP_ARGS(hrtimer, now),
@@ -270,7 +272,7 @@ TRACE_EVENT(hrtimer_expire_entry,
TP_fast_assign(
__entry->hrtimer = hrtimer;
- __entry->now = *now;
+ __entry->now = now;
__entry->function = ACCESS_PRIVATE(hrtimer, function);
),
@@ -322,6 +324,30 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
);
/**
+ * hrtimer_rearm - Invoked when the clockevent device is rearmed
+ * @next_event: The next expiry time (CLOCK_MONOTONIC)
+ */
+TRACE_EVENT(hrtimer_rearm,
+
+ TP_PROTO(ktime_t next_event, bool deferred),
+
+ TP_ARGS(next_event, deferred),
+
+ TP_STRUCT__entry(
+ __field( s64, next_event )
+ __field( bool, deferred )
+ ),
+
+ TP_fast_assign(
+ __entry->next_event = next_event;
+ __entry->deferred = deferred;
+ ),
+
+ TP_printk("next_event=%llu deferred=%d",
+ (unsigned long long) __entry->next_event, __entry->deferred)
+);
+
+/**
* itimer_state - called when itimer is started or canceled
* @which: name of the interval timer
* @value: the itimers value, itimer is canceled if value->it_value is
diff --git a/include/trace/events/timestamp.h b/include/trace/events/timestamp.h
index c9e5ec930054..d6503612dddf 100644
--- a/include/trace/events/timestamp.h
+++ b/include/trace/events/timestamp.h
@@ -18,9 +18,9 @@ DECLARE_EVENT_CLASS(ctime,
TP_ARGS(inode, ctime),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(time64_t, ctime_s)
+ __field(dev_t, dev)
__field(u32, ctime_ns)
__field(u32, gen)
),
@@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(ctime,
__entry->ctime_ns = ctime->tv_nsec;
),
- TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u",
+ TP_printk("ino=%d:%d:%llu:%u ctime=%lld.%u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
__entry->ctime_s, __entry->ctime_ns
)
@@ -58,8 +58,8 @@ TRACE_EVENT(ctime_ns_xchg,
TP_ARGS(inode, old, new, cur),
TP_STRUCT__entry(
+ __field(u64, ino)
__field(dev_t, dev)
- __field(ino_t, ino)
__field(u32, gen)
__field(u32, old)
__field(u32, new)
@@ -75,7 +75,7 @@ TRACE_EVENT(ctime_ns_xchg,
__entry->cur = cur;
),
- TP_printk("ino=%d:%d:%ld:%u old=%u:%s new=%u cur=%u:%s",
+ TP_printk("ino=%d:%d:%llu:%u old=%u:%s new=%u cur=%u:%s",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
__entry->old & ~I_CTIME_QUERIED,
__print_flags(__entry->old & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS),
@@ -93,10 +93,10 @@ TRACE_EVENT(fill_mg_cmtime,
TP_ARGS(inode, ctime, mtime),
TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(time64_t, ctime_s)
__field(time64_t, mtime_s)
+ __field(dev_t, dev)
__field(u32, ctime_ns)
__field(u32, mtime_ns)
__field(u32, gen)
@@ -112,7 +112,7 @@ TRACE_EVENT(fill_mg_cmtime,
__entry->mtime_ns = mtime->tv_nsec;
),
- TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u mtime=%lld.%u",
+ TP_printk("ino=%d:%d:%llu:%u ctime=%lld.%u mtime=%lld.%u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
__entry->ctime_s, __entry->ctime_ns,
__entry->mtime_s, __entry->mtime_ns
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ea58e4656abf..4445a8d9218d 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -124,85 +124,96 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
- TP_PROTO(int order, gfp_t gfp_flags),
+ TP_PROTO(gfp_t gfp_flags, int order, struct mem_cgroup *memcg),
- TP_ARGS(order, gfp_flags),
+ TP_ARGS(gfp_flags, order, memcg),
TP_STRUCT__entry(
- __field( int, order )
__field( unsigned long, gfp_flags )
+ __field( u64, memcg_id )
+ __field( int, order )
),
TP_fast_assign(
- __entry->order = order;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
+ __entry->order = order;
+ __entry->memcg_id = mem_cgroup_id(memcg);
),
- TP_printk("order=%d gfp_flags=%s",
+ TP_printk("order=%d gfp_flags=%s pid=%d memcg_id=%llu %s",
__entry->order,
- show_gfp_flags(__entry->gfp_flags))
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->ent.pid,
+ __entry->memcg_id,
+ __event_in_irq() ? "(in-irq)" : "")
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
- TP_PROTO(int order, gfp_t gfp_flags),
+ TP_PROTO(gfp_t gfp_flags, int order, struct mem_cgroup *memcg),
- TP_ARGS(order, gfp_flags)
+ TP_ARGS(gfp_flags, order, memcg)
);
#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
- TP_PROTO(int order, gfp_t gfp_flags),
+ TP_PROTO(gfp_t gfp_flags, int order, struct mem_cgroup *memcg),
- TP_ARGS(order, gfp_flags)
+ TP_ARGS(gfp_flags, order, memcg)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
- TP_PROTO(int order, gfp_t gfp_flags),
+ TP_PROTO(gfp_t gfp_flags, int order, struct mem_cgroup *memcg),
- TP_ARGS(order, gfp_flags)
+ TP_ARGS(gfp_flags, order, memcg)
);
#endif /* CONFIG_MEMCG */
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
- TP_PROTO(unsigned long nr_reclaimed),
+ TP_PROTO(unsigned long nr_reclaimed, struct mem_cgroup *memcg),
- TP_ARGS(nr_reclaimed),
+ TP_ARGS(nr_reclaimed, memcg),
TP_STRUCT__entry(
__field( unsigned long, nr_reclaimed )
+ __field( u64, memcg_id )
),
TP_fast_assign(
__entry->nr_reclaimed = nr_reclaimed;
+ __entry->memcg_id = mem_cgroup_id(memcg);
),
- TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed)
+ TP_printk("nr_reclaimed=%lu pid=%d memcg_id=%llu %s",
+ __entry->nr_reclaimed,
+ __entry->ent.pid,
+ __entry->memcg_id,
+ __event_in_irq() ? "(in-irq)" : "")
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end,
- TP_PROTO(unsigned long nr_reclaimed),
+ TP_PROTO(unsigned long nr_reclaimed, struct mem_cgroup *memcg),
- TP_ARGS(nr_reclaimed)
+ TP_ARGS(nr_reclaimed, memcg)
);
#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
- TP_PROTO(unsigned long nr_reclaimed),
+ TP_PROTO(unsigned long nr_reclaimed, struct mem_cgroup *memcg),
- TP_ARGS(nr_reclaimed)
+ TP_ARGS(nr_reclaimed, memcg)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
- TP_PROTO(unsigned long nr_reclaimed),
+ TP_PROTO(unsigned long nr_reclaimed, struct mem_cgroup *memcg),
- TP_ARGS(nr_reclaimed)
+ TP_ARGS(nr_reclaimed, memcg)
);
#endif /* CONFIG_MEMCG */
@@ -210,82 +221,92 @@ TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
long nr_objects_to_shrink, unsigned long cache_items,
unsigned long long delta, unsigned long total_scan,
- int priority),
+ int priority, struct mem_cgroup *memcg),
TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan,
- priority),
+ priority, memcg),
TP_STRUCT__entry(
__field(struct shrinker *, shr)
__field(void *, shrink)
- __field(int, nid)
__field(long, nr_objects_to_shrink)
__field(unsigned long, gfp_flags)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
__field(int, priority)
+ __field(int, nid)
+ __field(u64, memcg_id)
),
TP_fast_assign(
__entry->shr = shr;
__entry->shrink = shr->scan_objects;
- __entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = (__force unsigned long)sc->gfp_mask;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
__entry->priority = priority;
+ __entry->nid = sc->nid;
+ __entry->memcg_id = mem_cgroup_id(memcg);
),
- TP_printk("%pS %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
+ TP_printk("%pS %p: nid: %d pid: %d memcg_id: %llu objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d %s",
__entry->shrink,
__entry->shr,
__entry->nid,
+ __entry->ent.pid,
+ __entry->memcg_id,
__entry->nr_objects_to_shrink,
show_gfp_flags(__entry->gfp_flags),
__entry->cache_items,
__entry->delta,
__entry->total_scan,
- __entry->priority)
+ __entry->priority,
+ __event_in_irq() ? "(in-irq)" : "")
);
TRACE_EVENT(mm_shrink_slab_end,
TP_PROTO(struct shrinker *shr, int nid, int shrinker_retval,
- long unused_scan_cnt, long new_scan_cnt, long total_scan),
+ long unused_scan_cnt, long new_scan_cnt, long total_scan, struct mem_cgroup *memcg),
TP_ARGS(shr, nid, shrinker_retval, unused_scan_cnt, new_scan_cnt,
- total_scan),
+ total_scan, memcg),
TP_STRUCT__entry(
__field(struct shrinker *, shr)
- __field(int, nid)
__field(void *, shrink)
__field(long, unused_scan)
__field(long, new_scan)
- __field(int, retval)
__field(long, total_scan)
+ __field(int, nid)
+ __field(int, retval)
+ __field(u64, memcg_id)
),
TP_fast_assign(
__entry->shr = shr;
- __entry->nid = nid;
__entry->shrink = shr->scan_objects;
__entry->unused_scan = unused_scan_cnt;
__entry->new_scan = new_scan_cnt;
- __entry->retval = shrinker_retval;
__entry->total_scan = total_scan;
+ __entry->nid = nid;
+ __entry->retval = shrinker_retval;
+ __entry->memcg_id = mem_cgroup_id(memcg);
),
- TP_printk("%pS %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
+ TP_printk("%pS %p: nid: %d pid: %d memcg_id: %llu unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d %s",
__entry->shrink,
__entry->shr,
__entry->nid,
+ __entry->ent.pid,
+ __entry->memcg_id,
__entry->unused_scan,
__entry->new_scan,
__entry->total_scan,
- __entry->retval)
+ __entry->retval,
+ __event_in_irq() ? "(in-irq)" : "")
);
TRACE_EVENT(mm_vmscan_lru_isolate,
@@ -514,9 +535,9 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin,
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_node_reclaim_end,
- TP_PROTO(unsigned long nr_reclaimed),
+ TP_PROTO(unsigned long nr_reclaimed, struct mem_cgroup *memcg),
- TP_ARGS(nr_reclaimed)
+ TP_ARGS(nr_reclaimed, memcg)
);
TRACE_EVENT(mm_vmscan_throttled,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 4d3d8c8f3a1b..e5cd2b80fd29 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -67,7 +67,7 @@ DECLARE_EVENT_CLASS(writeback_folio_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(pgoff_t, index)
),
@@ -79,9 +79,9 @@ DECLARE_EVENT_CLASS(writeback_folio_template,
__entry->index = folio->index;
),
- TP_printk("bdi %s: ino=%lu index=%lu",
+ TP_printk("bdi %s: ino=%llu index=%lu",
__entry->name,
- (unsigned long)__entry->ino,
+ __entry->ino,
__entry->index
)
);
@@ -108,7 +108,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(ino_t, ino)
+ __field(u64, ino)
__field(unsigned long, state)
__field(unsigned long, flags)
),
@@ -123,9 +123,9 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
__entry->flags = flags;
),
- TP_printk("bdi %s: ino=%lu state=%s flags=%s",
+ TP_printk("bdi %s: ino=%llu state=%s flags=%s",
__entry->name,
- (unsigned long)__entry->ino,
+ __entry->ino,
show_inode_state(__entry->state),
show_inode_state(__entry->flags)
)
@@ -155,12 +155,12 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
#ifdef CREATE_TRACE_POINTS
#ifdef CONFIG_CGROUP_WRITEBACK
-static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+static inline u64 __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
return cgroup_ino(wb->memcg_css->cgroup);
}
-static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+static inline u64 __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
if (wbc->wb)
return __trace_wb_assign_cgroup(wbc->wb);
@@ -169,12 +169,12 @@ static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
}
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+static inline u64 __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
return 1;
}
-static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+static inline u64 __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
return 1;
}
@@ -192,8 +192,8 @@ TRACE_EVENT(inode_foreign_history,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, ino)
- __field(ino_t, cgroup_ino)
+ __field(u64, ino)
+ __field(u64, cgroup_ino)
__field(unsigned int, history)
),
@@ -204,10 +204,10 @@ TRACE_EVENT(inode_foreign_history,
__entry->history = history;
),
- TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
+ TP_printk("bdi %s: ino=%llu cgroup_ino=%llu history=0x%x",
__entry->name,
- (unsigned long)__entry->ino,
- (unsigned long)__entry->cgroup_ino,
+ __entry->ino,
+ __entry->cgroup_ino,
__entry->history
)
);
@@ -221,8 +221,8 @@ TRACE_EVENT(inode_switch_wbs_queue,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, old_cgroup_ino)
- __field(ino_t, new_cgroup_ino)
+ __field(u64, old_cgroup_ino)
+ __field(u64, new_cgroup_ino)
__field(unsigned int, count)
),
@@ -233,10 +233,10 @@ TRACE_EVENT(inode_switch_wbs_queue,
__entry->count = count;
),
- TP_printk("bdi %s: old_cgroup_ino=%lu new_cgroup_ino=%lu count=%u",
+ TP_printk("bdi %s: old_cgroup_ino=%llu new_cgroup_ino=%llu count=%u",
__entry->name,
- (unsigned long)__entry->old_cgroup_ino,
- (unsigned long)__entry->new_cgroup_ino,
+ __entry->old_cgroup_ino,
+ __entry->new_cgroup_ino,
__entry->count
)
);
@@ -250,9 +250,9 @@ TRACE_EVENT(inode_switch_wbs,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, ino)
- __field(ino_t, old_cgroup_ino)
- __field(ino_t, new_cgroup_ino)
+ __field(u64, ino)
+ __field(u64, old_cgroup_ino)
+ __field(u64, new_cgroup_ino)
),
TP_fast_assign(
@@ -262,11 +262,11 @@ TRACE_EVENT(inode_switch_wbs,
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
),
- TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
+ TP_printk("bdi %s: ino=%llu old_cgroup_ino=%llu new_cgroup_ino=%llu",
__entry->name,
- (unsigned long)__entry->ino,
- (unsigned long)__entry->old_cgroup_ino,
- (unsigned long)__entry->new_cgroup_ino
+ __entry->ino,
+ __entry->old_cgroup_ino,
+ __entry->new_cgroup_ino
)
);
@@ -279,10 +279,10 @@ TRACE_EVENT(track_foreign_dirty,
TP_STRUCT__entry(
__array(char, name, 32)
__field(u64, bdi_id)
- __field(ino_t, ino)
+ __field(u64, ino)
+ __field(u64, cgroup_ino)
+ __field(u64, page_cgroup_ino)
__field(unsigned int, memcg_id)
- __field(ino_t, cgroup_ino)
- __field(ino_t, page_cgroup_ino)
),
TP_fast_assign(
@@ -297,13 +297,13 @@ TRACE_EVENT(track_foreign_dirty,
__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
),
- TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
+ TP_printk("bdi %s[%llu]: ino=%llu memcg_id=%u cgroup_ino=%llu page_cgroup_ino=%llu",
__entry->name,
__entry->bdi_id,
- (unsigned long)__entry->ino,
+ __entry->ino,
__entry->memcg_id,
- (unsigned long)__entry->cgroup_ino,
- (unsigned long)__entry->page_cgroup_ino
+ __entry->cgroup_ino,
+ __entry->page_cgroup_ino
)
);
@@ -316,7 +316,7 @@ TRACE_EVENT(flush_foreign,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, cgroup_ino)
+ __field(u64, cgroup_ino)
__field(unsigned int, frn_bdi_id)
__field(unsigned int, frn_memcg_id)
),
@@ -328,9 +328,9 @@ TRACE_EVENT(flush_foreign,
__entry->frn_memcg_id = frn_memcg_id;
),
- TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
+ TP_printk("bdi %s: cgroup_ino=%llu frn_bdi_id=%u frn_memcg_id=%u",
__entry->name,
- (unsigned long)__entry->cgroup_ino,
+ __entry->cgroup_ino,
__entry->frn_bdi_id,
__entry->frn_memcg_id
)
@@ -345,9 +345,9 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(ino_t, ino)
+ __field(u64, ino)
+ __field(u64, cgroup_ino)
__field(int, sync_mode)
- __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -358,11 +358,11 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
+ TP_printk("bdi %s: ino=%llu sync_mode=%d cgroup_ino=%llu",
__entry->name,
- (unsigned long)__entry->ino,
+ __entry->ino,
__entry->sync_mode,
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
@@ -385,6 +385,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
TP_ARGS(wb, work),
TP_STRUCT__entry(
__array(char, name, 32)
+ __field(u64, cgroup_ino)
__field(long, nr_pages)
__field(dev_t, sb_dev)
__field(int, sync_mode)
@@ -392,7 +393,6 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
- __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
@@ -406,7 +406,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%llu",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -415,7 +415,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic,
__entry->for_background,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -445,15 +445,15 @@ DECLARE_EVENT_CLASS(writeback_class,
TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, cgroup_ino)
+ __field(u64, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: cgroup_ino=%lu",
+ TP_printk("bdi %s: cgroup_ino=%llu",
__entry->name,
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
@@ -482,15 +482,15 @@ DECLARE_EVENT_CLASS(wbc_class,
TP_ARGS(wbc, bdi),
TP_STRUCT__entry(
__array(char, name, 32)
+ __field(u64, cgroup_ino)
__field(long, nr_to_write)
__field(long, pages_skipped)
+ __field(long, range_start)
+ __field(long, range_end)
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, for_background)
__field(int, range_cyclic)
- __field(long, range_start)
- __field(long, range_end)
- __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -507,7 +507,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
- "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
+ "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%llu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -517,7 +517,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
)
@@ -535,11 +535,11 @@ TRACE_EVENT(writeback_queue_io,
TP_ARGS(wb, work, dirtied_before, moved),
TP_STRUCT__entry(
__array(char, name, 32)
+ __field(u64, cgroup_ino)
__field(unsigned long, older)
__field(long, age)
__field(int, moved)
__field(int, reason)
- __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
@@ -549,13 +549,13 @@ TRACE_EVENT(writeback_queue_io,
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%llu",
__entry->name,
__entry->older, /* dirtied_before in jiffies */
__entry->age, /* dirtied_before in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
@@ -614,13 +614,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TP_STRUCT__entry(
__array(char, bdi, 32)
+ __field(u64, cgroup_ino)
__field(unsigned long, write_bw)
__field(unsigned long, avg_write_bw)
__field(unsigned long, dirty_rate)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
- __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -638,7 +638,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
+ "balanced_dirty_ratelimit=%lu cgroup_ino=%llu",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
@@ -646,7 +646,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
__entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
@@ -667,6 +667,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_STRUCT__entry(
__array( char, bdi, 32)
+ __field(u64, cgroup_ino)
__field(unsigned long, limit)
__field(unsigned long, setpoint)
__field(unsigned long, dirty)
@@ -674,13 +675,12 @@ TRACE_EVENT(balance_dirty_pages,
__field(unsigned long, wb_dirty)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
- __field(unsigned int, dirtied)
- __field(unsigned int, dirtied_pause)
__field(unsigned long, paused)
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
- __field(ino_t, cgroup_ino)
+ __field(unsigned int, dirtied)
+ __field(unsigned int, dirtied_pause)
),
TP_fast_assign(
@@ -711,7 +711,7 @@ TRACE_EVENT(balance_dirty_pages,
"wb_setpoint=%lu wb_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%llu",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -726,7 +726,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->pause, /* ms */
__entry->period, /* ms */
__entry->think, /* ms */
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
@@ -737,10 +737,10 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, ino)
+ __field(u64, ino)
+ __field(u64, cgroup_ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
- __field(ino_t, cgroup_ino)
),
TP_fast_assign(
@@ -752,13 +752,13 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
+ TP_printk("bdi %s: ino=%llu state=%s dirtied_when=%lu age=%lu cgroup_ino=%llu",
__entry->name,
- (unsigned long)__entry->ino,
+ __entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
@@ -773,13 +773,13 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(ino_t, ino)
+ __field(u64, ino)
+ __field(u64, cgroup_ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
__field(unsigned long, writeback_index)
- __field(long, nr_to_write)
__field(unsigned long, wrote)
- __field(ino_t, cgroup_ino)
+ __field(long, nr_to_write)
),
TP_fast_assign(
@@ -794,17 +794,17 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
+ TP_printk("bdi %s: ino=%llu state=%s dirtied_when=%lu age=%lu "
+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%llu",
__entry->name,
- (unsigned long)__entry->ino,
+ __entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote,
- (unsigned long)__entry->cgroup_ino
+ __entry->cgroup_ino
)
);
@@ -828,11 +828,11 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( u64, ino )
__field(unsigned long, state )
- __field( __u16, mode )
__field(unsigned long, dirtied_when )
+ __field( dev_t, dev )
+ __field( __u16, mode )
),
TP_fast_assign(
@@ -843,9 +843,9 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
__entry->dirtied_when = inode->dirtied_when;
),
- TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
+ TP_printk("dev %d,%d ino %llu dirtied %lu state %s mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long)__entry->ino, __entry->dirtied_when,
+ __entry->ino, __entry->dirtied_when,
show_inode_state(__entry->state), __entry->mode)
);
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
index fce85ea2df1c..181b81335781 100644
--- a/include/trace/stages/stage3_trace_output.h
+++ b/include/trace/stages/stage3_trace_output.h
@@ -64,36 +64,36 @@
#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
#undef __print_flags
-#define __print_flags(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags __flags[] = \
- { flag_array, { -1, NULL }}; \
- trace_print_flags_seq(p, delim, flag, __flags); \
+#define __print_flags(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags __flags[] = \
+ { flag_array }; \
+ trace_print_flags_seq(p, delim, flag, __flags, ARRAY_SIZE(__flags)); \
})
#undef __print_symbolic
-#define __print_symbolic(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags symbols[] = \
- { symbol_array, { -1, NULL }}; \
- trace_print_symbols_seq(p, value, symbols); \
+#define __print_symbolic(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags symbols[] = \
+ { symbol_array }; \
+ trace_print_symbols_seq(p, value, symbols, ARRAY_SIZE(symbols)); \
})
#undef __print_flags_u64
#undef __print_symbolic_u64
#if BITS_PER_LONG == 32
-#define __print_flags_u64(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags_u64 __flags[] = \
- { flag_array, { -1, NULL } }; \
- trace_print_flags_seq_u64(p, delim, flag, __flags); \
+#define __print_flags_u64(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 __flags[] = \
+ { flag_array }; \
+ trace_print_flags_seq_u64(p, delim, flag, __flags, ARRAY_SIZE(__flags)); \
})
-#define __print_symbolic_u64(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags_u64 symbols[] = \
- { symbol_array, { -1, NULL } }; \
- trace_print_symbols_seq_u64(p, value, symbols); \
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array }; \
+ trace_print_symbols_seq_u64(p, value, symbols, ARRAY_SIZE(symbols)); \
})
#else
#define __print_flags_u64(flag, delim, flag_array...) \
@@ -150,3 +150,11 @@
#undef __get_buf
#define __get_buf(len) trace_seq_acquire(p, (len))
+
+#undef __event_in_hardirq
+#undef __event_in_softirq
+#undef __event_in_irq
+
+#define __event_in_hardirq() (__entry->ent.flags & TRACE_FLAG_HARDIRQ)
+#define __event_in_softirq() (__entry->ent.flags & TRACE_FLAG_SOFTIRQ)
+#define __event_in_irq() (__entry->ent.flags & (TRACE_FLAG_HARDIRQ | TRACE_FLAG_SOFTIRQ))
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
index fcd564a590f4..47008897a795 100644
--- a/include/trace/stages/stage7_class_define.h
+++ b/include/trace/stages/stage7_class_define.h
@@ -26,6 +26,25 @@
#undef __print_hex_dump
#undef __get_buf
+#undef __event_in_hardirq
+#undef __event_in_softirq
+#undef __event_in_irq
+
+/*
+ * The TRACE_FLAG_* are enums. Instead of using TRACE_DEFINE_ENUM(),
+ * use their hardcoded values. These values are parsed by user space
+ * tooling elsewhere so they will never change.
+ *
+ * See "enum trace_flag_type" in linux/trace_events.h:
+ * TRACE_FLAG_HARDIRQ
+ * TRACE_FLAG_SOFTIRQ
+ */
+
+/* This is what is displayed in the format files */
+#define __event_in_hardirq() (REC->common_flags & 0x8)
+#define __event_in_softirq() (REC->common_flags & 0x10)
+#define __event_in_irq() (REC->common_flags & 0x18)
+
/*
* The below is not executed in the kernel. It is only what is
* displayed in the print format for userspace to parse.
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index ebbd861ef0bc..9f3090db2f16 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -479,7 +479,9 @@ struct drm_amdgpu_userq_signal {
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
- __u64 num_syncobj_handles;
+ __u16 num_syncobj_handles;
+ __u16 pad0;
+ __u32 pad1;
/**
* @bo_read_handles: The list of BO handles that the submitted user queue job
* is using for read only. This will update BO fences in the kernel.
@@ -563,7 +565,8 @@ struct drm_amdgpu_userq_wait {
* @num_syncobj_handles: A count that represents the number of syncobj handles in
* @syncobj_handles.
*/
- __u32 num_syncobj_handles;
+ __u16 num_syncobj_handles;
+ __u16 pad0;
/**
* @num_bo_read_handles: A count that represents the number of read BO handles in
* @bo_read_handles.
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index 9c44db2b3dcd..61d3686fa3b1 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -156,10 +156,11 @@ struct amdxdna_drm_config_hwctx {
enum amdxdna_bo_type {
AMDXDNA_BO_INVALID = 0,
- AMDXDNA_BO_SHMEM,
- AMDXDNA_BO_DEV_HEAP,
- AMDXDNA_BO_DEV,
- AMDXDNA_BO_CMD,
+ AMDXDNA_BO_SHMEM = 1, /* Be compatible with legacy application code. */
+ AMDXDNA_BO_SHARE = 1,
+ AMDXDNA_BO_DEV_HEAP = 2,
+ AMDXDNA_BO_DEV = 3,
+ AMDXDNA_BO_CMD = 4,
};
/**
@@ -353,7 +354,8 @@ struct amdxdna_drm_query_clock_metadata {
};
enum amdxdna_sensor_type {
- AMDXDNA_SENSOR_TYPE_POWER
+ AMDXDNA_SENSOR_TYPE_POWER,
+ AMDXDNA_SENSOR_TYPE_COLUMN_UTILIZATION
};
/**
@@ -589,8 +591,37 @@ struct amdxdna_async_error {
__u64 ex_err_code;
};
+/**
+ * struct amdxdna_drm_bo_usage - all types of BO usage
+ * BOs managed by XRT/SHIM/driver is counted as internal.
+ * Others are counted as external which are managed by applications.
+ *
+ * Among all types of BOs:
+ * AMDXDNA_BO_DEV_HEAP - is counted for internal.
+ * AMDXDNA_BO_SHARE - is counted for external.
+ * AMDXDNA_BO_CMD - is counted for internal.
+ * AMDXDNA_BO_DEV - is counted by heap_usage only, not internal
+ * or external. It does not add to the total memory
+ * footprint since its mem comes from heap which is
+ * already counted as internal.
+ */
+struct amdxdna_drm_bo_usage {
+ /** @pid: The ID of the process to query from. */
+ __s64 pid;
+ /** @total_usage: Total BO size used by process. */
+ __u64 total_usage;
+ /** @internal_usage: Total internal BO size used by process. */
+ __u64 internal_usage;
+ /** @heap_usage: Total device BO size used by process. */
+ __u64 heap_usage;
+};
+
+/*
+ * Supported params in struct amdxdna_drm_get_array
+ */
#define DRM_AMDXDNA_HW_CONTEXT_ALL 0
#define DRM_AMDXDNA_HW_LAST_ASYNC_ERR 2
+#define DRM_AMDXDNA_BO_USAGE 6
/**
* struct amdxdna_drm_get_array - Get information array.
@@ -603,6 +634,12 @@ struct amdxdna_drm_get_array {
*
* %DRM_AMDXDNA_HW_CONTEXT_ALL:
* Returns all created hardware contexts.
+ *
+ * %DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
+ * Returns last async error.
+ *
+ * %DRM_AMDXDNA_BO_USAGE:
+ * Returns usage of heap/internal/external BOs.
*/
__u32 param;
/**
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index c89aede3cb12..ac66fa93b5a3 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -1423,6 +1423,22 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
/*
+ * ARM 64k interleaved modifier
+ *
+ * This is used by ARM Mali v10+ GPUs. With this modifier, the plane is divided
+ * into 64k byte 1:1 or 2:1 -sided tiles. The 64k tiles are laid out linearly.
+ * Each 64k tile is divided into blocks of 16x16 texel blocks, which are
+ * themselves laid out linearly within a 64k tile. Then within each 16x16
+ * block, texel blocks are laid out according to U order, similar to
+ * 16X16_BLOCK_U_INTERLEAVED.
+ *
+ * Note that unlike 16X16_BLOCK_U_INTERLEAVED, the layout does not change
+ * depending on whether a format is compressed or not.
+ */
+#define DRM_FORMAT_MOD_ARM_INTERLEAVED_64K \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 2ULL)
+
+/*
* Allwinner tiled modifier
*
* This tiling mode is implemented by the VPU found on all Allwinner platforms,
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index cbbbfc1dfe2b..a4bdc4bd11bc 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -27,6 +27,9 @@
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
+#include <linux/bits.h>
+#include <linux/const.h>
+
#include "drm.h"
#if defined(__cplusplus)
@@ -166,6 +169,10 @@ extern "C" {
#define DRM_MODE_LINK_STATUS_GOOD 0
#define DRM_MODE_LINK_STATUS_BAD 1
+/* Panel type property */
+#define DRM_MODE_PANEL_TYPE_UNKNOWN 0
+#define DRM_MODE_PANEL_TYPE_OLED 1
+
/*
* DRM_MODE_ROTATE_<degrees>
*
@@ -1545,6 +1552,83 @@ struct drm_mode_closefb {
__u32 pad;
};
+/*
+ * Put 16-bit ARGB values into a standard 64-bit representation that can be
+ * used for ioctl parameters, inter-driver communication, etc.
+ *
+ * If the component values being provided contain less than 16 bits of
+ * precision, use a conversion ratio to get a better color approximation.
+ * The ratio is computed as (2^16 - 1) / (2^bpc - 1), where bpc and 16 are
+ * the input and output precision, respectively.
+ * Also note bpc must be greater than 0.
+ */
+#define __DRM_ARGB64_PREP(c, shift) \
+ (((__u64)(c) & __GENMASK(15, 0)) << (shift))
+
+#define __DRM_ARGB64_PREP_BPC(c, shift, bpc) \
+({ \
+ __u16 mask = __GENMASK((bpc) - 1, 0); \
+ __u16 conv = __KERNEL_DIV_ROUND_CLOSEST((mask & (c)) * \
+ __GENMASK(15, 0), mask);\
+ __DRM_ARGB64_PREP(conv, shift); \
+})
+
+#define DRM_ARGB64_PREP(alpha, red, green, blue) \
+( \
+ __DRM_ARGB64_PREP(alpha, 48) | \
+ __DRM_ARGB64_PREP(red, 32) | \
+ __DRM_ARGB64_PREP(green, 16) | \
+ __DRM_ARGB64_PREP(blue, 0) \
+)
+
+#define DRM_ARGB64_PREP_BPC(alpha, red, green, blue, bpc) \
+({ \
+ __typeof__(bpc) __bpc = bpc; \
+ __DRM_ARGB64_PREP_BPC(alpha, 48, __bpc) | \
+ __DRM_ARGB64_PREP_BPC(red, 32, __bpc) | \
+ __DRM_ARGB64_PREP_BPC(green, 16, __bpc) | \
+ __DRM_ARGB64_PREP_BPC(blue, 0, __bpc); \
+})
+
+/*
+ * Extract the specified color component from a standard 64-bit ARGB value.
+ *
+ * If the requested precision is less than 16 bits, make use of a conversion
+ * ratio calculated as (2^bpc - 1) / (2^16 - 1), where bpc and 16 are the
+ * output and input precision, respectively.
+ *
+ * If speed is more important than accuracy, use DRM_ARGB64_GET*_BPCS()
+ * instead of DRM_ARGB64_GET*_BPC() in order to replace the expensive
+ * division with a simple bit right-shift operation.
+ */
+#define __DRM_ARGB64_GET(c, shift) \
+ ((__u16)(((__u64)(c) >> (shift)) & __GENMASK(15, 0)))
+
+#define __DRM_ARGB64_GET_BPC(c, shift, bpc) \
+({ \
+ __u16 comp = __DRM_ARGB64_GET(c, shift); \
+ __KERNEL_DIV_ROUND_CLOSEST(comp * __GENMASK((bpc) - 1, 0), \
+ __GENMASK(15, 0)); \
+})
+
+#define __DRM_ARGB64_GET_BPCS(c, shift, bpc) \
+ (__DRM_ARGB64_GET(c, shift) >> (16 - (bpc)))
+
+#define DRM_ARGB64_GETA(c) __DRM_ARGB64_GET(c, 48)
+#define DRM_ARGB64_GETR(c) __DRM_ARGB64_GET(c, 32)
+#define DRM_ARGB64_GETG(c) __DRM_ARGB64_GET(c, 16)
+#define DRM_ARGB64_GETB(c) __DRM_ARGB64_GET(c, 0)
+
+#define DRM_ARGB64_GETA_BPC(c, bpc) __DRM_ARGB64_GET_BPC(c, 48, bpc)
+#define DRM_ARGB64_GETR_BPC(c, bpc) __DRM_ARGB64_GET_BPC(c, 32, bpc)
+#define DRM_ARGB64_GETG_BPC(c, bpc) __DRM_ARGB64_GET_BPC(c, 16, bpc)
+#define DRM_ARGB64_GETB_BPC(c, bpc) __DRM_ARGB64_GET_BPC(c, 0, bpc)
+
+#define DRM_ARGB64_GETA_BPCS(c, bpc) __DRM_ARGB64_GET_BPCS(c, 48, bpc)
+#define DRM_ARGB64_GETR_BPCS(c, bpc) __DRM_ARGB64_GET_BPCS(c, 32, bpc)
+#define DRM_ARGB64_GETG_BPCS(c, bpc) __DRM_ARGB64_GET_BPCS(c, 16, bpc)
+#define DRM_ARGB64_GETB_BPCS(c, bpc) __DRM_ARGB64_GET_BPCS(c, 0, bpc)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_ras.h b/include/uapi/drm/drm_ras.h
new file mode 100644
index 000000000000..5f40fa5b869d
--- /dev/null
+++ b/include/uapi/drm/drm_ras.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/drm_ras.yaml */
+/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _UAPI_LINUX_DRM_RAS_H
+#define _UAPI_LINUX_DRM_RAS_H
+
+#define DRM_RAS_FAMILY_NAME "drm-ras"
+#define DRM_RAS_FAMILY_VERSION 1
+
+/*
+ * Type of the node. Currently, only error-counter nodes are supported, which
+ * expose reliability counters for a hardware/software component.
+ */
+enum drm_ras_node_type {
+ DRM_RAS_NODE_TYPE_ERROR_COUNTER = 1,
+};
+
+enum {
+ DRM_RAS_A_NODE_ATTRS_NODE_ID = 1,
+ DRM_RAS_A_NODE_ATTRS_DEVICE_NAME,
+ DRM_RAS_A_NODE_ATTRS_NODE_NAME,
+ DRM_RAS_A_NODE_ATTRS_NODE_TYPE,
+
+ __DRM_RAS_A_NODE_ATTRS_MAX,
+ DRM_RAS_A_NODE_ATTRS_MAX = (__DRM_RAS_A_NODE_ATTRS_MAX - 1)
+};
+
+enum {
+ DRM_RAS_A_ERROR_COUNTER_ATTRS_NODE_ID = 1,
+ DRM_RAS_A_ERROR_COUNTER_ATTRS_ERROR_ID,
+ DRM_RAS_A_ERROR_COUNTER_ATTRS_ERROR_NAME,
+ DRM_RAS_A_ERROR_COUNTER_ATTRS_ERROR_VALUE,
+
+ __DRM_RAS_A_ERROR_COUNTER_ATTRS_MAX,
+ DRM_RAS_A_ERROR_COUNTER_ATTRS_MAX = (__DRM_RAS_A_ERROR_COUNTER_ATTRS_MAX - 1)
+};
+
+enum {
+ DRM_RAS_CMD_LIST_NODES = 1,
+ DRM_RAS_CMD_GET_ERROR_COUNTER,
+
+ __DRM_RAS_CMD_MAX,
+ DRM_RAS_CMD_MAX = (__DRM_RAS_CMD_MAX - 1)
+};
+
+#endif /* _UAPI_LINUX_DRM_RAS_H */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 5c67294edc95..b99098792371 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -117,6 +117,7 @@ struct drm_msm_timespec {
* ioctl will throw -EPIPE.
*/
#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
+#define MSM_PARAM_AQE 0x17 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index dd87f8f30793..1fa82fa6af38 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -432,6 +432,69 @@ struct drm_nouveau_exec {
__u64 push_ptr;
};
+struct drm_nouveau_get_zcull_info {
+ /**
+ * @width_align_pixels: required alignment for region widths, in pixels
+ * (typically #TPC's * 16).
+ */
+ __u32 width_align_pixels;
+ /**
+ * @height_align_pixels: required alignment for region heights, in
+ * pixels (typically 32).
+ */
+ __u32 height_align_pixels;
+ /**
+ * @pixel_squares_by_aliquots: the pixel area covered by an aliquot
+ * (typically #Zcull_banks * 16 * 16).
+ */
+ __u32 pixel_squares_by_aliquots;
+ /**
+ * @aliquot_total: the total aliquot pool available in hardware
+ */
+ __u32 aliquot_total;
+ /**
+ * @zcull_region_byte_multiplier: the size of an aliquot in bytes, which
+ * is used for save/restore operations on a region
+ */
+ __u32 zcull_region_byte_multiplier;
+ /**
+ * @zcull_region_header_size: the region header size in bytes, which is
+ * used for save/restore operations on a region
+ */
+ __u32 zcull_region_header_size;
+ /**
+ * @zcull_subregion_header_size: the subregion header size in bytes,
+ * which is used for save/restore operations on a region
+ */
+ __u32 zcull_subregion_header_size;
+ /**
+ * @subregion_count: the total number of subregions the hardware
+ * supports
+ */
+ __u32 subregion_count;
+ /**
+ * @subregion_width_align_pixels: required alignment for subregion
+ * widths, in pixels (typically #TPC's * 16).
+ */
+ __u32 subregion_width_align_pixels;
+ /**
+ * @subregion_height_align_pixels: required alignment for subregion
+ * heights, in pixels
+ */
+ __u32 subregion_height_align_pixels;
+
+ /**
+ * @ctxsw_size: the size, in bytes, of a zcull context switching region.
+ * Will be zero if the kernel does not support zcull context switching.
+ */
+ __u32 ctxsw_size;
+ /**
+ * @ctxsw_align: the alignment, in bytes, of a zcull context switching
+ * region
+ */
+ __u32 ctxsw_align;
+};
+
#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
@@ -445,6 +508,7 @@ struct drm_nouveau_exec {
#define DRM_NOUVEAU_VM_INIT 0x10
#define DRM_NOUVEAU_VM_BIND 0x11
#define DRM_NOUVEAU_EXEC 0x12
+#define DRM_NOUVEAU_GET_ZCULL_INFO 0x13
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -513,6 +577,8 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
+
+#define DRM_IOCTL_NOUVEAU_GET_ZCULL_INFO DRM_IOR (DRM_COMMAND_BASE + DRM_NOUVEAU_GET_ZCULL_INFO, struct drm_nouveau_get_zcull_info)
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index b401ac585d6a..0e455d91e77d 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -410,6 +410,38 @@ struct drm_panthor_csif_info {
};
/**
+ * enum drm_panthor_timestamp_info_flags - drm_panthor_timestamp_info.flags
+ */
+enum drm_panthor_timestamp_info_flags {
+ /** @DRM_PANTHOR_TIMESTAMP_GPU: Query GPU time. */
+ DRM_PANTHOR_TIMESTAMP_GPU = 1 << 0,
+
+ /** @DRM_PANTHOR_TIMESTAMP_CPU_NONE: Don't query CPU time. */
+ DRM_PANTHOR_TIMESTAMP_CPU_NONE = 0 << 1,
+
+ /** @DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC: Query CPU time using CLOCK_MONOTONIC. */
+ DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC = 1 << 1,
+
+ /** @DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC_RAW: Query CPU time using CLOCK_MONOTONIC_RAW. */
+ DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC_RAW = 2 << 1,
+
+ /** @DRM_PANTHOR_TIMESTAMP_CPU_TYPE_MASK: Space reserved for CPU clock type. */
+ DRM_PANTHOR_TIMESTAMP_CPU_TYPE_MASK = 7 << 1,
+
+ /** @DRM_PANTHOR_TIMESTAMP_GPU_OFFSET: Query GPU offset. */
+ DRM_PANTHOR_TIMESTAMP_GPU_OFFSET = 1 << 4,
+
+ /** @DRM_PANTHOR_TIMESTAMP_GPU_CYCLE_COUNT: Query GPU cycle count. */
+ DRM_PANTHOR_TIMESTAMP_GPU_CYCLE_COUNT = 1 << 5,
+
+ /** @DRM_PANTHOR_TIMESTAMP_FREQ: Query timestamp frequency. */
+ DRM_PANTHOR_TIMESTAMP_FREQ = 1 << 6,
+
+ /** @DRM_PANTHOR_TIMESTAMP_DURATION: Return duration of time query. */
+ DRM_PANTHOR_TIMESTAMP_DURATION = 1 << 7,
+};
+
+/**
* struct drm_panthor_timestamp_info - Timestamp information
*
* Structure grouping all queryable information relating to the GPU timestamp.
@@ -421,11 +453,38 @@ struct drm_panthor_timestamp_info {
*/
__u64 timestamp_frequency;
- /** @current_timestamp: The current timestamp. */
+ /** @current_timestamp: The current GPU timestamp. */
__u64 current_timestamp;
- /** @timestamp_offset: The offset of the timestamp timer. */
+ /** @timestamp_offset: The offset of the GPU timestamp timer. */
__u64 timestamp_offset;
+
+ /**
+ * @flags: Bitmask of drm_panthor_timestamp_info_flags.
+ *
+ * If set to 0, then it is interpreted as:
+ * DRM_PANTHOR_TIMESTAMP_GPU |
+ * DRM_PANTHOR_TIMESTAMP_GPU_OFFSET |
+ * DRM_PANTHOR_TIMESTAMP_FREQ
+ *
+ * Note: these flags are exclusive to each other (only one can be used):
+ * - DRM_PANTHOR_TIMESTAMP_CPU_NONE
+ * - DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC
+ * - DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC_RAW
+ */
+ __u32 flags;
+
+ /** @duration_nsec: Duration of time query. */
+ __u32 duration_nsec;
+
+ /** @cycle_count: Value of GPU_CYCLE_COUNT. */
+ __u64 cycle_count;
+
+ /** @cpu_timestamp_sec: Seconds part of CPU timestamp. */
+ __u64 cpu_timestamp_sec;
+
+ /** @cpu_timestamp_nsec: Nanseconds part of CPU timestamp. */
+ __u64 cpu_timestamp_nsec;
};
/**
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 077e66a682e2..ae2fda23ce7c 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -83,6 +83,7 @@ extern "C" {
* - &DRM_IOCTL_XE_OBSERVATION
* - &DRM_IOCTL_XE_MADVISE
* - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
+ * - &DRM_IOCTL_XE_VM_GET_PROPERTY
*/
/*
@@ -107,6 +108,7 @@ extern "C" {
#define DRM_XE_MADVISE 0x0c
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0e
+#define DRM_XE_VM_GET_PROPERTY 0x0f
/* Must be kept compact -- no holes */
@@ -125,6 +127,7 @@ extern "C" {
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
+#define DRM_IOCTL_XE_VM_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_GET_PROPERTY, struct drm_xe_vm_get_property)
/**
* DOC: Xe IOCTL Extensions
@@ -335,10 +338,6 @@ struct drm_xe_mem_region {
__u64 total_size;
/**
* @used: Estimate of the memory used in bytes for this region.
- *
- * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
- * accounting. Without this the value here will always equal
- * zero.
*/
__u64 used;
/**
@@ -363,9 +362,7 @@ struct drm_xe_mem_region {
* @cpu_visible_used: Estimate of CPU visible memory used, in
* bytes.
*
- * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
- * accounting. Without this the value here will always equal
- * zero. Note this is only currently tracked for
+ * Note this is only currently tracked for
* DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
* here will always be zero).
*/
@@ -412,6 +409,9 @@ struct drm_xe_query_mem_regions {
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT - Flag is set if the
* device supports the userspace hint %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION.
* This is exposed only on Xe2+.
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_DISABLE_STATE_CACHE_PERF_FIX - Flag is set
+ * if a queue can be creaed with
+ * %DRM_XE_EXEC_QUEUE_SET_DISABLE_STATE_CACHE_PERF_FIX
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -431,6 +431,8 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT (1 << 3)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_DISABLE_STATE_CACHE_PERF_FIX (1 << 4)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_PURGING_SUPPORT (1 << 5)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -975,6 +977,11 @@ struct drm_xe_gem_mmap_offset {
* demand when accessed, and also allows per-VM overcommit of memory.
* The xe driver internally uses recoverable pagefaults to implement
* this.
+ * - %DRM_XE_VM_CREATE_FLAG_NO_VM_OVERCOMMIT - Requires also
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE. This disallows per-VM overcommit
+ * but only during a &DRM_IOCTL_XE_VM_BIND operation with the
+ * %DRM_XE_VM_BIND_FLAG_IMMEDIATE flag set. This may be useful for
+ * user-space naively probing the amount of available memory.
*/
struct drm_xe_vm_create {
/** @extensions: Pointer to the first extension struct, if any */
@@ -983,6 +990,7 @@ struct drm_xe_vm_create {
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+#define DRM_XE_VM_CREATE_FLAG_NO_VM_OVERCOMMIT (1 << 3)
/** @flags: Flags */
__u32 flags;
@@ -1053,6 +1061,13 @@ struct drm_xe_vm_destroy {
* not invoke autoreset. Neither will stack variables going out of scope.
* Therefore it's recommended to always explicitly reset the madvises when
* freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call.
+ * - %DRM_XE_VM_BIND_FLAG_DECOMPRESS - Request on-device decompression for a MAP.
+ * When set on a MAP bind operation, request the driver schedule an on-device
+ * in-place decompression (via the migrate/resolve path) for the GPU mapping
+ * created by this bind. Only valid for DRM_XE_VM_BIND_OP_MAP; usage on
+ * other ops is rejected. The bind's pat_index must select the device's
+ * "no-compression" PAT. Only meaningful for VRAM-backed BOs on devices that
+ * support Flat CCS and the required HW generation XE2+.
*
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
@@ -1103,7 +1118,9 @@ struct drm_xe_vm_bind_op {
* incoherent GT access is possible.
*
* Note: For userptr and externally imported dma-buf the kernel expects
- * either 1WAY or 2WAY for the @pat_index.
+ * either 1WAY or 2WAY for the @pat_index. Starting from NVL-P, for
+ * userptr, svm, madvise and externally imported dma-buf the kernel expects
+ * either 2WAY or 1WAY and XA @pat_index.
*
* For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
* on the @pat_index. For such mappings there is no actual memory being
@@ -1160,6 +1177,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6)
+#define DRM_XE_VM_BIND_FLAG_DECOMPRESS (1 << 7)
/** @flags: Bind flags */
__u32 flags;
@@ -1249,6 +1267,89 @@ struct drm_xe_vm_bind {
__u64 reserved[2];
};
+/** struct xe_vm_fault - Describes faults for %DRM_XE_VM_GET_PROPERTY_FAULTS */
+struct xe_vm_fault {
+ /** @address: Canonical address of the fault */
+ __u64 address;
+ /** @address_precision: Precision of faulted address */
+ __u32 address_precision;
+ /** @access_type: Type of address access that resulted in fault */
+#define FAULT_ACCESS_TYPE_READ 0
+#define FAULT_ACCESS_TYPE_WRITE 1
+#define FAULT_ACCESS_TYPE_ATOMIC 2
+ __u8 access_type;
+ /** @fault_type: Type of fault reported */
+#define FAULT_TYPE_NOT_PRESENT 0
+#define FAULT_TYPE_WRITE_ACCESS 1
+#define FAULT_TYPE_ATOMIC_ACCESS 2
+ __u8 fault_type;
+ /** @fault_level: fault level of the fault */
+#define FAULT_LEVEL_PTE 0
+#define FAULT_LEVEL_PDE 1
+#define FAULT_LEVEL_PDP 2
+#define FAULT_LEVEL_PML4 3
+#define FAULT_LEVEL_PML5 4
+ __u8 fault_level;
+ /** @pad: MBZ */
+ __u8 pad;
+ /** @reserved: MBZ */
+ __u64 reserved[4];
+};
+
+/**
+ * struct drm_xe_vm_get_property - Input of &DRM_IOCTL_XE_VM_GET_PROPERTY
+ *
+ * The user provides a VM and a property to query among DRM_XE_VM_GET_PROPERTY_*,
+ * and sets the values in the vm_id and property members, respectively. This
+ * determines both the VM to get the property of, as well as the property to
+ * report.
+ *
+ * If size is set to 0, the driver fills it with the required size for the
+ * requested property. The user is expected here to allocate memory for the
+ * property structure and to provide a pointer to the allocated memory using the
+ * data member. For some properties, this may be zero, in which case, the
+ * value of the property will be saved to the value member and size will remain
+ * zero on return.
+ *
+ * If size is not zero, then the IOCTL will attempt to copy the requested
+ * property into the data member.
+ *
+ * The IOCTL will return -ENOENT if the VM could not be identified from the
+ * provided VM ID, or -EINVAL if the IOCTL fails for any other reason, such as
+ * providing an invalid size for the given property or if the property data
+ * could not be copied to the memory allocated to the data member.
+ *
+ * The property member can be:
+ * - %DRM_XE_VM_GET_PROPERTY_FAULTS
+ */
+struct drm_xe_vm_get_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: The ID of the VM to query the properties of */
+ __u32 vm_id;
+
+#define DRM_XE_VM_GET_PROPERTY_FAULTS 0
+ /** @property: property to get */
+ __u32 property;
+
+ /** @size: Size to allocate for @data */
+ __u32 size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ union {
+ /** @data: Pointer to user-defined array of flexible size and type */
+ __u64 data;
+ /** @value: Return value for scalar queries */
+ __u64 value;
+ };
+
+ /** @reserved: MBZ */
+ __u64 reserved[3];
+};
+
/**
* struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
*
@@ -1285,6 +1386,9 @@ struct drm_xe_vm_bind {
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
* priority within the multi-queue group. Current valid priority values are 0–2
* (default is 1), with higher values indicating higher priority.
+ * - %DRM_XE_EXEC_QUEUE_SET_DISABLE_STATE_CACHE_PERF_FIX - Set the queue to
+ * enable render color cache keying on BTP+BTI instead of just BTI
+ * (only valid for render queues).
*
* The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class
@@ -1329,6 +1433,7 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
+#define DRM_XE_EXEC_QUEUE_SET_DISABLE_STATE_CACHE_PERF_FIX 6
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -2067,6 +2172,7 @@ struct drm_xe_query_eu_stall {
* - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
* - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
* - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ * - DRM_XE_VMA_ATTR_PURGEABLE_STATE: Set purgeable state for BOs.
*
* Example:
*
@@ -2099,6 +2205,7 @@ struct drm_xe_madvise {
#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
/** @type: type of attribute */
__u32 type;
@@ -2189,6 +2296,72 @@ struct drm_xe_madvise {
/** @pat_index.reserved: Reserved */
__u64 reserved;
} pat_index;
+
+ /**
+ * @purge_state_val: Purgeable state configuration
+ *
+ * Used when @type == DRM_XE_VMA_ATTR_PURGEABLE_STATE.
+ *
+ * Configures the purgeable state of buffer objects in the specified
+ * virtual address range. This allows applications to hint to the kernel
+ * about bo's usage patterns for better memory management.
+ *
+ * By default all VMAs are in WILLNEED state.
+ *
+ * Supported values for @purge_state_val.val:
+ * - DRM_XE_VMA_PURGEABLE_STATE_WILLNEED (0): Marks BO as needed.
+ * If the BO was previously purged, the kernel sets the __u32 at
+ * @retained_ptr to 0 (backing store lost) so the application knows
+ * it must recreate the BO.
+ *
+ * - DRM_XE_VMA_PURGEABLE_STATE_DONTNEED (1): Marks BO as not currently
+ * needed. Kernel may purge it under memory pressure to reclaim memory.
+ * Only applies to non-shared BOs. The kernel sets the __u32 at
+ * @retained_ptr to 1 if the backing store still exists (not yet purged),
+ * or 0 if it was already purged.
+ *
+ * Important: Once marked as DONTNEED, touching the BO's memory
+ * is undefined behavior. It may succeed temporarily (before the
+ * kernel purges the backing store) but will suddenly fail once
+ * the BO transitions to PURGED state.
+ *
+ * To transition back: use WILLNEED and check @retained_ptr —
+ * if 0, backing store was lost and the BO must be recreated.
+ *
+ * The following operations are blocked in DONTNEED state to
+ * prevent the BO from being re-mapped after madvise:
+ * - New mmap() calls: Fail with -EBUSY
+ * - VM_BIND operations: Fail with -EBUSY
+ * - New dma-buf exports: Fail with -EBUSY
+ * - CPU page faults (existing mmap): Fail with SIGBUS
+ * - GPU page faults (fault-mode VMs): Fail with -EACCES
+ */
+ struct {
+#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
+#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
+ /** @purge_state_val.val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
+ __u32 val;
+
+ /** @purge_state_val.pad: MBZ */
+ __u32 pad;
+ /**
+ * @purge_state_val.retained_ptr: Pointer to a __u32 output
+ * field for backing store status.
+ *
+ * Userspace must initialize the __u32 value at this address
+ * to 0 before the ioctl. Kernel writes a __u32 after the
+ * operation:
+ * - 1 if backing store exists (not purged)
+ * - 0 if backing store was purged
+ *
+ * If userspace fails to initialize to 0, ioctl returns -EINVAL.
+ * This ensures a safe default (0 = assume purged) if kernel
+ * cannot write the result.
+ *
+ * Similar to i915's drm_i915_gem_madvise.retained field.
+ */
+ __u64 retained_ptr;
+ } purge_state_val;
};
/** @reserved: Reserved */
@@ -2357,6 +2530,85 @@ struct drm_xe_exec_queue_set_property {
__u64 reserved[2];
};
+/**
+ * DOC: Xe DRM RAS
+ *
+ * The enums and strings defined below map to the attributes of the DRM RAS Netlink Interface.
+ * Refer to Documentation/netlink/specs/drm_ras.yaml for complete interface specification.
+ *
+ * Node Registration
+ * =================
+ *
+ * The driver registers DRM RAS nodes for each error severity level.
+ * enum drm_xe_ras_error_severity defines the node-id, while DRM_XE_RAS_ERROR_SEVERITY_NAMES maps
+ * node-id to node-name.
+ *
+ * Error Classification
+ * ====================
+ *
+ * Each node contains a list of error counters. Each error is identified by a error-id and
+ * an error-name. enum drm_xe_ras_error_component defines the error-id, while
+ * DRM_XE_RAS_ERROR_COMPONENT_NAMES maps error-id to error-name.
+ *
+ * User Interface
+ * ==============
+ *
+ * To retrieve error values of a error counter, userspace applications should
+ * follow the below steps:
+ *
+ * 1. Use command LIST_NODES to enumerate all available nodes
+ * 2. Select node by node-id or node-name
+ * 3. Use command GET_ERROR_COUNTERS to list errors of specific node
+ * 4. Query specific error values using either error-id or error-name
+ *
+ * .. code-block:: C
+ *
+ * // Lookup tables for ID-to-name resolution
+ * static const char *nodes[] = DRM_XE_RAS_ERROR_SEVERITY_NAMES;
+ * static const char *errors[] = DRM_XE_RAS_ERROR_COMPONENT_NAMES;
+ *
+ */
+
+/**
+ * enum drm_xe_ras_error_severity - DRM RAS error severity.
+ */
+enum drm_xe_ras_error_severity {
+ /** @DRM_XE_RAS_ERR_SEV_CORRECTABLE: Correctable Error */
+ DRM_XE_RAS_ERR_SEV_CORRECTABLE = 0,
+ /** @DRM_XE_RAS_ERR_SEV_UNCORRECTABLE: Uncorrectable Error */
+ DRM_XE_RAS_ERR_SEV_UNCORRECTABLE,
+ /** @DRM_XE_RAS_ERR_SEV_MAX: Max severity */
+ DRM_XE_RAS_ERR_SEV_MAX /* non-ABI */
+};
+
+/**
+ * enum drm_xe_ras_error_component - DRM RAS error component.
+ */
+enum drm_xe_ras_error_component {
+ /** @DRM_XE_RAS_ERR_COMP_CORE_COMPUTE: Core Compute Error */
+ DRM_XE_RAS_ERR_COMP_CORE_COMPUTE = 1,
+ /** @DRM_XE_RAS_ERR_COMP_SOC_INTERNAL: SoC Internal Error */
+ DRM_XE_RAS_ERR_COMP_SOC_INTERNAL,
+ /** @DRM_XE_RAS_ERR_COMP_MAX: Max Error */
+ DRM_XE_RAS_ERR_COMP_MAX /* non-ABI */
+};
+
+/*
+ * Error severity to name mapping.
+ */
+#define DRM_XE_RAS_ERROR_SEVERITY_NAMES { \
+ [DRM_XE_RAS_ERR_SEV_CORRECTABLE] = "correctable-errors", \
+ [DRM_XE_RAS_ERR_SEV_UNCORRECTABLE] = "uncorrectable-errors", \
+}
+
+/*
+ * Error component to name mapping.
+ */
+#define DRM_XE_RAS_ERROR_COMPONENT_NAMES { \
+ [DRM_XE_RAS_ERR_COMP_CORE_COMPUTE] = "core-compute", \
+ [DRM_XE_RAS_ERR_COMP_SOC_INTERNAL] = "soc-internal" \
+}
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/fwctl/bnxt.h b/include/uapi/fwctl/bnxt.h
new file mode 100644
index 000000000000..32e0bfb9a836
--- /dev/null
+++ b/include/uapi/fwctl/bnxt.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2026, Broadcom Inc
+ */
+
+#ifndef _UAPI_FWCTL_BNXT_H_
+#define _UAPI_FWCTL_BNXT_H_
+
+#include <linux/types.h>
+
+enum fwctl_bnxt_commands {
+ FWCTL_BNXT_INLINE_COMMANDS = 0,
+ FWCTL_BNXT_QUERY_COMMANDS,
+ FWCTL_BNXT_SEND_COMMANDS,
+};
+
+/**
+ * struct fwctl_info_bnxt - ioctl(FWCTL_INFO) out_device_data
+ * @uctx_caps: The command capabilities driver accepts.
+ *
+ * Return basic information about the FW interface available.
+ */
+struct fwctl_info_bnxt {
+ __u32 uctx_caps;
+};
+#endif
diff --git a/include/uapi/fwctl/fwctl.h b/include/uapi/fwctl/fwctl.h
index 716ac0eee42d..2d6d4049c205 100644
--- a/include/uapi/fwctl/fwctl.h
+++ b/include/uapi/fwctl/fwctl.h
@@ -44,6 +44,7 @@ enum fwctl_device_type {
FWCTL_DEVICE_TYPE_ERROR = 0,
FWCTL_DEVICE_TYPE_MLX5 = 1,
FWCTL_DEVICE_TYPE_CXL = 2,
+ FWCTL_DEVICE_TYPE_BNXT = 3,
FWCTL_DEVICE_TYPE_PDS = 4,
};
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 14a1c1fe013a..e8f5ce677df7 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -350,7 +350,7 @@ enum {
#define AUDIT_STATUS_ENABLED 0x0001
#define AUDIT_STATUS_FAILURE 0x0002
#define AUDIT_STATUS_PID 0x0004
-#define AUDIT_STATUS_RATE_LIMIT 0x0008
+#define AUDIT_STATUS_RATE_LIMIT 0x0008
#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
#define AUDIT_STATUS_LOST 0x0040
@@ -386,8 +386,8 @@ enum {
* These bits disambiguate different calling conventions that share an
* ELF machine type, bitness, and endianness
*/
-#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
-#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
+#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
+#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
/* distinguish syscall tables */
#define __AUDIT_ARCH_64BIT 0x80000000
@@ -508,7 +508,7 @@ struct audit_tty_status {
__u32 log_passwd; /* 1 = enabled, 0 = disabled */
};
-#define AUDIT_UID_UNSET (unsigned int)-1
+#define AUDIT_UID_UNSET ((unsigned int)-1)
#define AUDIT_SID_UNSET ((unsigned int)-1)
/* audit_rule_data supports filter rules with both integer and string
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c8d400b7680a..552bc5d9afbd 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4645,7 +4645,9 @@ union bpf_attr {
* Description
* Discard reserved ring buffer sample, pointed to by *data*.
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
- * of new data availability is sent.
+ * of new data availability is sent. Discarded records remain in
+ * the ring buffer until consumed by user space, so a later submit
+ * using adaptive wakeup might not wake up the consumer.
* If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
* of new data availability is sent unconditionally.
* If **0** is specified in *flags*, an adaptive notification
diff --git a/include/uapi/linux/bsg.h b/include/uapi/linux/bsg.h
index cd6302def5ed..6cff77f5b857 100644
--- a/include/uapi/linux/bsg.h
+++ b/include/uapi/linux/bsg.h
@@ -2,6 +2,9 @@
#ifndef _UAPIBSG_H
#define _UAPIBSG_H
+#ifdef __KERNEL__
+#include <linux/build_bug.h>
+#endif /* __KERNEL__ */
#include <linux/types.h>
#define BSG_PROTOCOL_SCSI 0
@@ -63,5 +66,77 @@ struct sg_io_v4 {
__u32 padding;
};
+struct bsg_uring_cmd {
+ __u64 request; /* [i], [*i] command descriptor address */
+ __u32 request_len; /* [i] command descriptor length in bytes */
+ __u32 protocol; /* [i] protocol type (BSG_PROTOCOL_*) */
+ __u32 subprotocol; /* [i] subprotocol type (BSG_SUB_PROTOCOL_*) */
+ __u32 max_response_len; /* [i] response buffer size in bytes */
+
+ __u64 response; /* [i], [*o] response data address */
+ __u64 dout_xferp; /* [i], [*i] */
+ __u32 dout_xfer_len; /* [i] bytes to be transferred to device */
+ __u32 dout_iovec_count; /* [i] 0 -> "flat" dout transfer else
+ * dout_xferp points to array of iovec
+ */
+ __u64 din_xferp; /* [i], [*o] */
+ __u32 din_xfer_len; /* [i] bytes to be transferred from device */
+ __u32 din_iovec_count; /* [i] 0 -> "flat" din transfer */
+
+ __u32 timeout_ms; /* [i] timeout in milliseconds */
+ __u8 reserved[12]; /* reserved for future extension */
+};
+
+#ifdef __KERNEL__
+/* Must match IORING_OP_URING_CMD payload size (e.g. SQE128). */
+static_assert(sizeof(struct bsg_uring_cmd) == 80);
+#endif /* __KERNEL__ */
+
+
+/*
+ * SCSI BSG io_uring completion (res2, 64-bit)
+ *
+ * When using BSG_PROTOCOL_SCSI + BSG_SUB_PROTOCOL_SCSI_CMD with
+ * IORING_OP_URING_CMD, the completion queue entry (CQE) contains:
+ * - result: errno (0 on success)
+ * - res2: packed SCSI status
+ *
+ * res2 bit layout:
+ * [0..7] device_status (SCSI status byte, e.g. CHECK_CONDITION)
+ * [8..15] driver_status (e.g. DRIVER_SENSE when sense data is valid)
+ * [16..23] host_status (e.g. DID_OK, DID_TIME_OUT)
+ * [24..31] sense_len_wr (bytes of sense data written to response buffer)
+ * [32..63] resid_len (residual transfer length)
+ */
+static inline __u8 bsg_scsi_res2_device_status(__u64 res2)
+{
+ return res2 & 0xff;
+}
+static inline __u8 bsg_scsi_res2_driver_status(__u64 res2)
+{
+ return res2 >> 8;
+}
+static inline __u8 bsg_scsi_res2_host_status(__u64 res2)
+{
+ return res2 >> 16;
+}
+static inline __u8 bsg_scsi_res2_sense_len(__u64 res2)
+{
+ return res2 >> 24;
+}
+static inline __u32 bsg_scsi_res2_resid_len(__u64 res2)
+{
+ return res2 >> 32;
+}
+static inline __u64 bsg_scsi_res2_build(__u8 device_status, __u8 driver_status,
+ __u8 host_status, __u8 sense_len_wr,
+ __u32 resid_len)
+{
+ return ((__u64)(__u32)(resid_len) << 32) |
+ ((__u64)sense_len_wr << 24) |
+ ((__u64)host_status << 16) |
+ ((__u64)driver_status << 8) |
+ (__u64)device_status;
+}
#endif /* _UAPIBSG_H */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 266d4ffa6c07..638615ebddc2 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -8,6 +8,16 @@
#define BTF_MAGIC 0xeB9F
#define BTF_VERSION 1
+/*
+ * BTF layout section consists of a struct btf_layout for each known
+ * kind at BTF encoding time.
+ */
+struct btf_layout {
+ __u8 info_sz; /* size of singular element after btf_type */
+ __u8 elem_sz; /* size of each of btf_vlen(t) elements */
+ __u16 flags; /* currently unused */
+};
+
struct btf_header {
__u16 magic;
__u8 version;
@@ -19,6 +29,8 @@ struct btf_header {
__u32 type_len; /* length of type section */
__u32 str_off; /* offset of string section */
__u32 str_len; /* length of string section */
+ __u32 layout_off; /* offset of layout section */
+ __u32 layout_len; /* length of layout section */
};
/* Max # of type identifier */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index f7843e6bb978..cc3b9f7dccaf 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -1245,7 +1245,8 @@ struct btrfs_free_space_info {
__le32 flags;
} __attribute__ ((__packed__));
-#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0)
+#define BTRFS_FREE_SPACE_USING_BITMAPS (1UL << 0)
+#define BTRFS_FREE_SPACE_FLAGS_MASK (BTRFS_FREE_SPACE_USING_BITMAPS)
#define BTRFS_QGROUP_LEVEL_SHIFT 48
static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index b8f629ef135f..565f309b9df8 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -50,4 +50,22 @@
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+/*
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if the dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
+ */
+#define __KERNEL_DIV_ROUND_CLOSEST(x, divisor) \
+({ \
+ __typeof__(x) __x = x; \
+ __typeof__(divisor) __d = divisor; \
+ \
+ (((__typeof__(x))-1) > 0 || \
+ ((__typeof__(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
+})
+
#endif /* _UAPI_LINUX_CONST_H */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index e7d6b6d13470..0b165eac7619 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -19,6 +19,8 @@
#define DEVLINK_GENL_VERSION 0x1
#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config"
+#define DEVLINK_INDEX_BUS_NAME "devlink_index"
+
enum devlink_command {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_CMD_UNSPEC,
@@ -642,6 +644,9 @@ enum devlink_attr {
DEVLINK_ATTR_PARAM_VALUE_DEFAULT, /* dynamic */
DEVLINK_ATTR_PARAM_RESET_DEFAULT, /* flag */
+ DEVLINK_ATTR_INDEX, /* uint */
+ DEVLINK_ATTR_RESOURCE_SCOPE_MASK, /* u32 */
+
/* Add new attributes above here, update the spec in
* Documentation/netlink/specs/devlink.yaml and re-generate
* net/devlink/netlink_gen.c.
@@ -700,6 +705,16 @@ enum devlink_resource_unit {
DEVLINK_RESOURCE_UNIT_ENTRY,
};
+enum devlink_resource_scope {
+ DEVLINK_RESOURCE_SCOPE_DEV_BIT,
+ DEVLINK_RESOURCE_SCOPE_PORT_BIT,
+};
+
+#define DEVLINK_RESOURCE_SCOPE_DEV \
+ _BITUL(DEVLINK_RESOURCE_SCOPE_DEV_BIT)
+#define DEVLINK_RESOURCE_SCOPE_PORT \
+ _BITUL(DEVLINK_RESOURCE_SCOPE_PORT_BIT)
+
enum devlink_port_fn_attr_cap {
DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT,
DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT,
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index de0005f28e5c..871685f7c353 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -191,7 +191,8 @@ enum dpll_pin_capabilities {
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4,
};
-#define DPLL_PHASE_OFFSET_DIVIDER 1000
+#define DPLL_PHASE_OFFSET_DIVIDER 1000
+#define DPLL_PIN_MEASURED_FREQUENCY_DIVIDER 1000
/**
* enum dpll_feature_state - Allow control (enable/disable) and status checking
@@ -218,6 +219,7 @@ enum dpll_a {
DPLL_A_CLOCK_QUALITY_LEVEL,
DPLL_A_PHASE_OFFSET_MONITOR,
DPLL_A_PHASE_OFFSET_AVG_FACTOR,
+ DPLL_A_FREQUENCY_MONITOR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
@@ -254,6 +256,7 @@ enum dpll_a_pin {
DPLL_A_PIN_REFERENCE_SYNC,
DPLL_A_PIN_PHASE_ADJUST_GRAN,
DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET_PPT,
+ DPLL_A_PIN_MEASURED_FREQUENCY,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b74b80508553..1cdfb8341df2 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -225,7 +225,7 @@ enum tunable_id {
ETHTOOL_ID_UNSPEC,
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
- ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ ETHTOOL_PFC_PREVENTION_TOUT, /* both pause and pfc, see man ethtool */
ETHTOOL_TX_COPYBREAK_BUF_SIZE,
/*
* Add your fresh new tunable attribute above and remember to update
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index 556a0c834df5..8134baf7860f 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -371,6 +371,8 @@ enum {
ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS,
ETHTOOL_A_COALESCE_RX_PROFILE,
ETHTOOL_A_COALESCE_TX_PROFILE,
+ ETHTOOL_A_COALESCE_RX_CQE_FRAMES,
+ ETHTOOL_A_COALESCE_RX_CQE_NSECS,
__ETHTOOL_A_COALESCE_CNT,
ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1)
@@ -381,6 +383,7 @@ enum {
ETHTOOL_A_PAUSE_STAT_PAD,
ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
+ ETHTOOL_A_PAUSE_STAT_TX_PAUSE_STORM_EVENTS,
__ETHTOOL_A_PAUSE_STAT_CNT,
ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 70b2b661f42c..13f71202845e 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -657,4 +657,16 @@ struct procmap_query {
__u64 build_id_addr; /* in */
};
+/*
+ * Shutdown the filesystem.
+ */
+#define FS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+/*
+ * Flags for FS_IOC_SHUTDOWN
+ */
+#define FS_SHUTDOWN_FLAGS_DEFAULT 0x0
+#define FS_SHUTDOWN_FLAGS_LOGFLUSH 0x1 /* flush log but not data*/
+#define FS_SHUTDOWN_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
+
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index e9b5f79e1ee1..79ce4bc24cba 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -744,6 +744,11 @@ enum in6_addr_gen_mode {
* @IFLA_BR_FDB_MAX_LEARNED
* Set the number of max dynamically learned FDB entries for the current
* bridge.
+ *
+ * @IFLA_BR_STP_MODE
+ * Set the STP mode for the bridge, which controls how the bridge
+ * selects between userspace and kernel STP. The valid values are
+ * documented below in the ``BR_STP_MODE_*`` constants.
*/
enum {
IFLA_BR_UNSPEC,
@@ -796,11 +801,45 @@ enum {
IFLA_BR_MCAST_QUERIER_STATE,
IFLA_BR_FDB_N_LEARNED,
IFLA_BR_FDB_MAX_LEARNED,
+ IFLA_BR_STP_MODE,
__IFLA_BR_MAX,
};
#define IFLA_BR_MAX (__IFLA_BR_MAX - 1)
+/**
+ * DOC: Bridge STP mode values
+ *
+ * @BR_STP_MODE_AUTO
+ * Default. The kernel invokes the ``/sbin/bridge-stp`` helper to hand
+ * the bridge to a userspace STP daemon (e.g. mstpd). Only attempted in
+ * the initial network namespace; in other namespaces this falls back to
+ * kernel STP.
+ *
+ * @BR_STP_MODE_USER
+ * Directly enable userspace STP (``BR_USER_STP``) without invoking the
+ * ``/sbin/bridge-stp`` helper. Works in any network namespace.
+ * Userspace is responsible for ensuring an STP daemon manages the
+ * bridge.
+ *
+ * @BR_STP_MODE_KERNEL
+ * Directly enable kernel STP (``BR_KERNEL_STP``) without invoking the
+ * helper.
+ *
+ * The mode controls how the bridge selects between userspace and kernel
+ * STP when STP is enabled via ``IFLA_BR_STP_STATE``. It can only be
+ * changed while STP is disabled (``IFLA_BR_STP_STATE`` == 0), returns
+ * ``-EBUSY`` otherwise. The default value is ``BR_STP_MODE_AUTO``.
+ */
+enum br_stp_mode {
+ BR_STP_MODE_AUTO,
+ BR_STP_MODE_USER,
+ BR_STP_MODE_KERNEL,
+ __BR_STP_MODE_MAX
+};
+
+#define BR_STP_MODE_MAX (__BR_STP_MODE_MAX - 1)
+
struct ifla_bridge_id {
__u8 prio[2];
__u8 addr[6]; /* ETH_ALEN */
@@ -1296,6 +1335,11 @@ enum netkit_mode {
NETKIT_L3,
};
+enum netkit_pairing {
+ NETKIT_DEVICE_PAIR,
+ NETKIT_DEVICE_SINGLE,
+};
+
/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to
* the BPF program if attached. This also means the latter can
* consume the two fields if they were populated earlier.
@@ -1320,6 +1364,7 @@ enum {
IFLA_NETKIT_PEER_SCRUB,
IFLA_NETKIT_HEADROOM,
IFLA_NETKIT_TAILROOM,
+ IFLA_NETKIT_PAIRING,
__IFLA_NETKIT_MAX,
};
#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
@@ -1568,6 +1613,8 @@ enum {
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
IFLA_BOND_SLAVE_PRIO,
IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
+ IFLA_BOND_SLAVE_AD_CHURN_ACTOR_STATE,
+ IFLA_BOND_SLAVE_AD_CHURN_PARTNER_STATE,
__IFLA_BOND_SLAVE_MAX,
};
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index 9abd80dcc46f..7ae044d71fb7 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -103,16 +103,6 @@ struct sockaddr_pppol2tpv3in6 {
struct pppol2tpv3in6_addr pppol2tp;
} __packed;
-/*********************************************************************
- *
- * ioctl interface for defining forwarding of connections
- *
- ********************************************************************/
-
-#define PPPOEIOCSFWD _IOW(0xB1 ,0, size_t)
-#define PPPOEIOCDFWD _IO(0xB1 ,1)
-/*#define PPPOEIOCGFWD _IOWR(0xB1,2, size_t)*/
-
/* Codes to identify message types */
#define PADI_CODE 0x09
#define PADO_CODE 0x07
@@ -122,7 +112,9 @@ struct sockaddr_pppol2tpv3in6 {
struct pppoe_tag {
__be16 tag_type;
__be16 tag_len;
+#ifndef __KERNEL__
char tag_data[];
+#endif
} __attribute__ ((packed));
/* Tag identifiers */
@@ -150,7 +142,9 @@ struct pppoe_hdr {
__u8 code;
__be16 sid;
__be16 length;
+#ifndef __KERNEL__
struct pppoe_tag tag[];
+#endif
} __packed;
/* Length of entire PPPoE + PPP header */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 86bb2e8b17c9..21f0d735fbae 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -129,6 +129,15 @@ struct inet_diag_msg {
__u32 idiag_inode;
};
+enum {
+ IDIAG_TIMER_OFF,
+ IDIAG_TIMER_ON,
+ IDIAG_TIMER_KEEPALIVE,
+ IDIAG_TIMER_TIMEWAIT,
+ IDIAG_TIMER_PROBE0,
+ IDIAG_TIMER_DELACK,
+};
+
/* Extensions */
enum {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 4bdb6a165987..3528168f7c6d 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -643,6 +643,10 @@
#define KEY_EPRIVACY_SCREEN_ON 0x252
#define KEY_EPRIVACY_SCREEN_OFF 0x253
+#define KEY_ACTION_ON_SELECTION 0x254 /* AL Action on Selection (HUTRR119) */
+#define KEY_CONTEXTUAL_INSERT 0x255 /* AL Contextual Insertion (HUTRR119) */
+#define KEY_CONTEXTUAL_QUERY 0x256 /* AL Contextual Query (HUTRR119) */
+
#define KEY_KBDINPUTASSIST_PREV 0x260
#define KEY_KBDINPUTASSIST_NEXT 0x261
#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 1ff16141c8a5..17ac1b785440 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -10,6 +10,8 @@
#include <linux/fs.h>
#include <linux/types.h>
+#include <linux/io_uring/zcrx.h>
+
/*
* this file is shared with liburing and that has to autodetect
* if linux/time_types.h is available or not, it can
@@ -341,6 +343,10 @@ enum io_uring_op {
/*
* sqe->timeout_flags
+ *
+ * IORING_TIMEOUT_IMMEDIATE_ARG: If set, sqe->addr stores the timeout
+ * value in nanoseconds instead of
+ * pointing to a timespec.
*/
#define IORING_TIMEOUT_ABS (1U << 0)
#define IORING_TIMEOUT_UPDATE (1U << 1)
@@ -349,6 +355,7 @@ enum io_uring_op {
#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
#define IORING_TIMEOUT_MULTISHOT (1U << 6)
+#define IORING_TIMEOUT_IMMEDIATE_ARG (1U << 7)
#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
@@ -1050,100 +1057,6 @@ struct io_timespec {
__u64 tv_nsec;
};
-/* Zero copy receive refill queue entry */
-struct io_uring_zcrx_rqe {
- __u64 off;
- __u32 len;
- __u32 __pad;
-};
-
-struct io_uring_zcrx_cqe {
- __u64 off;
- __u64 __pad;
-};
-
-/* The bit from which area id is encoded into offsets */
-#define IORING_ZCRX_AREA_SHIFT 48
-#define IORING_ZCRX_AREA_MASK (~(((__u64)1 << IORING_ZCRX_AREA_SHIFT) - 1))
-
-struct io_uring_zcrx_offsets {
- __u32 head;
- __u32 tail;
- __u32 rqes;
- __u32 __resv2;
- __u64 __resv[2];
-};
-
-enum io_uring_zcrx_area_flags {
- IORING_ZCRX_AREA_DMABUF = 1,
-};
-
-struct io_uring_zcrx_area_reg {
- __u64 addr;
- __u64 len;
- __u64 rq_area_token;
- __u32 flags;
- __u32 dmabuf_fd;
- __u64 __resv2[2];
-};
-
-enum zcrx_reg_flags {
- ZCRX_REG_IMPORT = 1,
-};
-
-enum zcrx_features {
- /*
- * The user can ask for the desired rx page size by passing the
- * value in struct io_uring_zcrx_ifq_reg::rx_buf_len.
- */
- ZCRX_FEATURE_RX_PAGE_SIZE = 1 << 0,
-};
-
-/*
- * Argument for IORING_REGISTER_ZCRX_IFQ
- */
-struct io_uring_zcrx_ifq_reg {
- __u32 if_idx;
- __u32 if_rxq;
- __u32 rq_entries;
- __u32 flags;
-
- __u64 area_ptr; /* pointer to struct io_uring_zcrx_area_reg */
- __u64 region_ptr; /* struct io_uring_region_desc * */
-
- struct io_uring_zcrx_offsets offsets;
- __u32 zcrx_id;
- __u32 rx_buf_len;
- __u64 __resv[3];
-};
-
-enum zcrx_ctrl_op {
- ZCRX_CTRL_FLUSH_RQ,
- ZCRX_CTRL_EXPORT,
-
- __ZCRX_CTRL_LAST,
-};
-
-struct zcrx_ctrl_flush_rq {
- __u64 __resv[6];
-};
-
-struct zcrx_ctrl_export {
- __u32 zcrx_fd;
- __u32 __resv1[11];
-};
-
-struct zcrx_ctrl {
- __u32 zcrx_id;
- __u32 op; /* see enum zcrx_ctrl_op */
- __u64 __resv[2];
-
- union {
- struct zcrx_ctrl_export zc_export;
- struct zcrx_ctrl_flush_rq zc_flush;
- };
-};
-
#ifdef __cplusplus
}
#endif
diff --git a/include/uapi/linux/io_uring/zcrx.h b/include/uapi/linux/io_uring/zcrx.h
new file mode 100644
index 000000000000..5ce02c7a6096
--- /dev/null
+++ b/include/uapi/linux/io_uring/zcrx.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Header file for the io_uring zerocopy receive (zcrx) interface.
+ *
+ * Copyright (C) 2026 Pavel Begunkov
+ * Copyright (C) 2026 David Wei
+ * Copyright (C) Meta Platforms, Inc.
+ */
+#ifndef LINUX_IO_ZCRX_H
+#define LINUX_IO_ZCRX_H
+
+#include <linux/types.h>
+
+/* Zero copy receive refill queue entry */
+struct io_uring_zcrx_rqe {
+ __u64 off;
+ __u32 len;
+ __u32 __pad;
+};
+
+struct io_uring_zcrx_cqe {
+ __u64 off;
+ __u64 __pad;
+};
+
+/* The bit from which area id is encoded into offsets */
+#define IORING_ZCRX_AREA_SHIFT 48
+#define IORING_ZCRX_AREA_MASK (~(((__u64)1 << IORING_ZCRX_AREA_SHIFT) - 1))
+
+struct io_uring_zcrx_offsets {
+ __u32 head;
+ __u32 tail;
+ __u32 rqes;
+ __u32 __resv2;
+ __u64 __resv[2];
+};
+
+enum io_uring_zcrx_area_flags {
+ IORING_ZCRX_AREA_DMABUF = 1,
+};
+
+struct io_uring_zcrx_area_reg {
+ __u64 addr;
+ __u64 len;
+ __u64 rq_area_token;
+ __u32 flags;
+ __u32 dmabuf_fd;
+ __u64 __resv2[2];
+};
+
+enum zcrx_reg_flags {
+ ZCRX_REG_IMPORT = 1,
+
+ /*
+ * Register a zcrx instance without a net device. All data will be
+ * copied. The refill queue entries might not be automatically
+ * consumed and need to be flushed, see ZCRX_CTRL_FLUSH_RQ.
+ */
+ ZCRX_REG_NODEV = 2,
+};
+
+enum zcrx_features {
+ /*
+ * The user can ask for the desired rx page size by passing the
+ * value in struct io_uring_zcrx_ifq_reg::rx_buf_len.
+ */
+ ZCRX_FEATURE_RX_PAGE_SIZE = 1 << 0,
+};
+
+/*
+ * Argument for IORING_REGISTER_ZCRX_IFQ
+ */
+struct io_uring_zcrx_ifq_reg {
+ __u32 if_idx;
+ __u32 if_rxq;
+ __u32 rq_entries;
+ __u32 flags;
+
+ __u64 area_ptr; /* pointer to struct io_uring_zcrx_area_reg */
+ __u64 region_ptr; /* struct io_uring_region_desc * */
+
+ struct io_uring_zcrx_offsets offsets;
+ __u32 zcrx_id;
+ __u32 rx_buf_len;
+ __u64 __resv[3];
+};
+
+enum zcrx_ctrl_op {
+ ZCRX_CTRL_FLUSH_RQ,
+ ZCRX_CTRL_EXPORT,
+
+ __ZCRX_CTRL_LAST,
+};
+
+struct zcrx_ctrl_flush_rq {
+ __u64 __resv[6];
+};
+
+struct zcrx_ctrl_export {
+ __u32 zcrx_fd;
+ __u32 __resv1[11];
+};
+
+struct zcrx_ctrl {
+ __u32 zcrx_id;
+ __u32 op; /* see enum zcrx_ctrl_op */
+ __u64 __resv[2];
+
+ union {
+ struct zcrx_ctrl_export zc_export;
+ struct zcrx_ctrl_flush_rq zc_flush;
+ };
+};
+
+#endif /* LINUX_IO_ZCRX_H */
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 1dafbc552d37..e998dfbd6960 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -695,11 +695,15 @@ enum iommu_hw_info_type {
* @IOMMU_HW_CAP_PCI_PASID_PRIV: Privileged Mode Supported, user ignores it
* when the struct
* iommu_hw_info::out_max_pasid_log2 is zero.
+ * @IOMMU_HW_CAP_PCI_ATS_NOT_SUPPORTED: ATS is not supported or cannot be used
+ * on this device (absence implies ATS
+ * may be enabled)
*/
enum iommufd_hw_capabilities {
IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
IOMMU_HW_CAP_PCI_PASID_EXEC = 1 << 1,
IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2,
+ IOMMU_HW_CAP_PCI_ATS_NOT_SUPPORTED = 1 << 3,
};
/**
@@ -1052,6 +1056,11 @@ struct iommu_fault_alloc {
enum iommu_viommu_type {
IOMMU_VIOMMU_TYPE_DEFAULT = 0,
IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
+ /*
+ * TEGRA241_CMDQV requirements (otherwise, VCMDQs will not work)
+ * - Kernel will allocate a VINTF (HYP_OWN=0) to back this VIOMMU. So,
+ * VMM must wire the HYP_OWN bit to 0 in guest VINTF_CONFIG register
+ */
IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV = 2,
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 80364d4dbebb..6c8afa2047bf 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -11,6 +11,7 @@
#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/stddef.h>
#include <linux/ioctl.h>
#include <asm/kvm.h>
@@ -542,7 +543,7 @@ struct kvm_coalesced_mmio {
struct kvm_coalesced_mmio_ring {
__u32 first, last;
- struct kvm_coalesced_mmio coalesced_mmio[];
+ __DECLARE_FLEX_ARRAY(struct kvm_coalesced_mmio, coalesced_mmio);
};
#define KVM_COALESCED_MMIO_MAX \
@@ -592,7 +593,7 @@ struct kvm_clear_dirty_log {
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
- __u8 sigset[];
+ __DECLARE_FLEX_ARRAY(__u8, sigset);
};
/* for KVM_TPR_ACCESS_REPORTING */
@@ -703,6 +704,11 @@ struct kvm_enable_cap {
#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL
#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \
((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+
+#define KVM_VM_TYPE_ARM_PROTECTED (1UL << 31)
+#define KVM_VM_TYPE_ARM_MASK (KVM_VM_TYPE_ARM_IPA_SIZE_MASK | \
+ KVM_VM_TYPE_ARM_PROTECTED)
+
/*
* ioctls for /dev/kvm fds:
*/
@@ -989,6 +995,7 @@ struct kvm_enable_cap {
#define KVM_CAP_ARM_SEA_TO_USER 245
#define KVM_CAP_S390_USER_OPEREXEC 246
#define KVM_CAP_S390_KEYOP 247
+#define KVM_CAP_S390_VSIE_ESAMODE 248
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1051,7 +1058,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
- struct kvm_irq_routing_entry entries[];
+ __DECLARE_FLEX_ARRAY(struct kvm_irq_routing_entry, entries);
};
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
@@ -1142,7 +1149,7 @@ struct kvm_dirty_tlb {
struct kvm_reg_list {
__u64 n; /* number of regs */
- __u64 reg[];
+ __DECLARE_FLEX_ARRAY(__u64, reg);
};
struct kvm_one_reg {
@@ -1224,6 +1231,10 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC
KVM_DEV_TYPE_LOONGARCH_PCHPIC,
#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC
+ KVM_DEV_TYPE_LOONGARCH_DMSINTC,
+#define KVM_DEV_TYPE_LOONGARCH_DMSINTC KVM_DEV_TYPE_LOONGARCH_DMSINTC
+ KVM_DEV_TYPE_ARM_VGIC_V5,
+#define KVM_DEV_TYPE_ARM_VGIC_V5 KVM_DEV_TYPE_ARM_VGIC_V5
KVM_DEV_TYPE_MAX,
@@ -1608,7 +1619,7 @@ struct kvm_stats_desc {
#ifdef __KERNEL__
char name[KVM_STATS_NAME_SIZE];
#else
- char name[];
+ __DECLARE_FLEX_ARRAY(char, name);
#endif
};
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index f88fa1f68b77..10a346e55e95 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -116,7 +116,9 @@ struct landlock_ruleset_attr {
* ``LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF``, this flag only affects
* future nested domains, not the one being created. It can also be used
* with a @ruleset_fd value of -1 to mute subdomain logs without creating a
- * domain.
+ * domain. When combined with %LANDLOCK_RESTRICT_SELF_TSYNC and a
+ * @ruleset_fd value of -1, this configuration is propagated to all threads
+ * of the current process.
*
* The following flag supports policy enforcement in multithreaded processes:
*
@@ -248,6 +250,26 @@ struct landlock_net_port_attr {
*
* This access right is available since the fifth version of the Landlock
* ABI.
+ * - %LANDLOCK_ACCESS_FS_RESOLVE_UNIX: Look up pathname UNIX domain sockets
+ * (:manpage:`unix(7)`). On UNIX domain sockets, this restricts both calls to
+ * :manpage:`connect(2)` as well as calls to :manpage:`sendmsg(2)` with an
+ * explicit recipient address.
+ *
+ * This access right only applies to connections to UNIX server sockets which
+ * were created outside of the newly created Landlock domain (e.g. from within
+ * a parent domain or from an unrestricted process). Newly created UNIX
+ * servers within the same Landlock domain continue to be accessible. In this
+ * regard, %LANDLOCK_ACCESS_FS_RESOLVE_UNIX has the same semantics as the
+ * ``LANDLOCK_SCOPE_*`` flags.
+ *
+ * If a resolve attempt is denied, the operation returns an ``EACCES`` error,
+ * in line with other filesystem access rights (but different to denials for
+ * abstract UNIX domain sockets).
+ *
+ * This access right is available since the ninth version of the Landlock ABI.
+ *
+ * The rationale for this design is described in
+ * :ref:`Documentation/security/landlock.rst <scoped-flags-interaction>`.
*
* Whether an opened file can be truncated with :manpage:`ftruncate(2)` or used
* with `ioctl(2)` is determined during :manpage:`open(2)`, in the same way as
@@ -333,6 +355,7 @@ struct landlock_net_port_attr {
#define LANDLOCK_ACCESS_FS_REFER (1ULL << 13)
#define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14)
#define LANDLOCK_ACCESS_FS_IOCTL_DEV (1ULL << 15)
+#define LANDLOCK_ACCESS_FS_RESOLVE_UNIX (1ULL << 16)
/* clang-format on */
/**
diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h
index c2d91088a40d..4b17829a9f17 100644
--- a/include/uapi/linux/map_benchmark.h
+++ b/include/uapi/linux/map_benchmark.h
@@ -17,6 +17,12 @@
#define DMA_MAP_TO_DEVICE 1
#define DMA_MAP_FROM_DEVICE 2
+enum {
+ DMA_MAP_BENCH_SINGLE_MODE,
+ DMA_MAP_BENCH_SG_MODE,
+ DMA_MAP_BENCH_MODE_MAX
+};
+
struct map_benchmark {
__u64 avg_map_100ns; /* average map latency in 100ns */
__u64 map_stddev; /* standard deviation of map latency */
@@ -28,8 +34,11 @@ struct map_benchmark {
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
- __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
- __u8 expansion[76]; /* For future use */
+ __u32 granule; /* - SINGLE_MODE: number of pages mapped/unmapped per operation
+ * - SG_MODE: number of scatterlist entries (each maps one page)
+ */
+ __u8 map_mode; /* the mode of dma map */
+ __u8 expansion[75]; /* For future use */
};
#endif /* _UAPI_DMA_BENCHMARK_H */
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 39f7c44baf53..61d6edad4b94 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -82,7 +82,8 @@
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
-#define ADVERTISE_RESV 0x1000 /* Unused... */
+#define ADVERTISE_XNP 0x1000 /* Extended Next Page */
+#define ADVERTISE_RESV ADVERTISE_XNP /* Used to be reserved */
#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */
#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */
#define ADVERTISE_NPAGE 0x8000 /* Next page bit */
diff --git a/include/uapi/linux/module_signature.h b/include/uapi/linux/module_signature.h
new file mode 100644
index 000000000000..634c9f1c8fc2
--- /dev/null
+++ b/include/uapi/linux/module_signature.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Module signature handling.
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _UAPI_LINUX_MODULE_SIGNATURE_H
+#define _UAPI_LINUX_MODULE_SIGNATURE_H
+
+#include <linux/types.h>
+
+/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+#define MODULE_SIGNATURE_MARKER "~Module signature appended~\n"
+
+enum module_signature_type {
+ MODULE_SIGNATURE_TYPE_PKCS7 = 2, /* Signature in PKCS#7 message */
+};
+
+/*
+ * Module signature information block.
+ *
+ * The constituents of the signature section are, in order:
+ *
+ * - Signer's name
+ * - Key identifier
+ * - Signature data
+ * - Information block
+ */
+struct module_signature {
+ __u8 algo; /* Public-key crypto algorithm [0] */
+ __u8 hash; /* Digest algorithm [0] */
+ __u8 id_type; /* Key identifier type [enum module_signature_type] */
+ __u8 signer_len; /* Length of signer's name [0] */
+ __u8 key_id_len; /* Length of key identifier [0] */
+ __u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
+};
+
+#endif /* _UAPI_LINUX_MODULE_SIGNATURE_H */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index d9d86598d100..2204708dbf7a 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -110,6 +110,7 @@ enum fsconfig_command {
* fsmount() flags.
*/
#define FSMOUNT_CLOEXEC 0x00000001
+#define FSMOUNT_NAMESPACE 0x00000002 /* Create the mount in a new mount namespace */
/*
* Mount attributes.
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index e0b579a1df4f..7df1056a35fd 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -160,6 +160,7 @@ enum {
NETDEV_A_QUEUE_DMABUF,
NETDEV_A_QUEUE_IO_URING,
NETDEV_A_QUEUE_XSK,
+ NETDEV_A_QUEUE_LEASE,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -203,6 +204,15 @@ enum {
};
enum {
+ NETDEV_A_LEASE_IFINDEX = 1,
+ NETDEV_A_LEASE_QUEUE,
+ NETDEV_A_LEASE_NETNS_ID,
+
+ __NETDEV_A_LEASE_MAX,
+ NETDEV_A_LEASE_MAX = (__NETDEV_A_LEASE_MAX - 1)
+};
+
+enum {
NETDEV_A_DMABUF_IFINDEX = 1,
NETDEV_A_DMABUF_QUEUES,
NETDEV_A_DMABUF_FD,
@@ -228,6 +238,7 @@ enum {
NETDEV_CMD_BIND_RX,
NETDEV_CMD_NAPI_SET,
NETDEV_CMD_BIND_TX,
+ NETDEV_CMD_QUEUE_CREATE,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 45c71f7d21c2..0b708153469c 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -46,6 +46,10 @@ enum nft_registers {
};
#define NFT_REG_MAX (__NFT_REG_MAX - 1)
+#ifdef __KERNEL__
+#define NFT_REG32_MAX NFT_REG32_15
+#endif
+
#define NFT_REG_SIZE 16
#define NFT_REG32_SIZE 4
#define NFT_REG32_COUNT (NFT_REG32_15 - NFT_REG32_00 + 1)
@@ -884,7 +888,7 @@ enum nft_exthdr_flags {
* @NFT_EXTHDR_OP_TCPOPT: match against tcp options
* @NFT_EXTHDR_OP_IPV4: match against ipv4 options
* @NFT_EXTHDR_OP_SCTP: match against sctp chunks
- * @NFT_EXTHDR_OP_DCCP: match against dccp otions
+ * @NFT_EXTHDR_OP_DCCP: match against dccp options
*/
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 2f5b4be25261..82805eee4357 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -55,7 +55,7 @@
* (it sends %NFC_ATTR_DEVICE_INDEX)
* @NFC_EVENT_TM_ACTIVATED: event emitted when the adapter is activated in
* target mode.
- * @NFC_EVENT_DEVICE_DEACTIVATED: event emitted when the adapter is deactivated
+ * @NFC_EVENT_TM_DEACTIVATED: event emitted when the adapter is deactivated
* from target mode.
* @NFC_CMD_LLC_GET_PARAMS: request LTO, RW, and MIUX parameters for a device
* @NFC_CMD_LLC_SET_PARAMS: set one or more of LTO, RW, and MIUX parameters for
@@ -156,7 +156,7 @@ enum nfc_commands {
* @NFC_ATTR_SE_INDEX: Secure element index
* @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
* @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
- * @NFC_ATTR_APDU: Secure element APDU
+ * @NFC_ATTR_SE_APDU: Secure element APDU
* @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
* @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
* @NFC_ATTR_SE_PARAMS: Parameters data from an evt_transaction
@@ -291,7 +291,7 @@ struct sockaddr_nfc_llcp {
#define NFC_HEADER_SIZE 1
-/**
+/*
* Pseudo-header info for raw socket packets
* First byte is the adapter index
* Second byte contains flags
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b63f71850906..3d55bf4be36f 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -906,8 +906,9 @@
* @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP
* (or GO) interface (i.e. hostapd) to ask for unexpected frames to
* implement sending deauth to stations that send unexpected class 3
- * frames. Also used as the event sent by the kernel when such a frame
- * is received.
+ * frames. For NAN_DATA interfaces, this is used to report frames from
+ * unknown peers (A2 not assigned to any active NDP).
+ * Also used as the event sent by the kernel when such a frame is received.
* For the event, the %NL80211_ATTR_MAC attribute carries the TA and
* other attributes like the interface index are present.
* If used as the command it must have an interface index and you can
@@ -1361,6 +1362,59 @@
* user space that the NAN new cluster has been joined. The cluster ID is
* indicated by %NL80211_ATTR_MAC.
*
+ * @NL80211_CMD_INCUMBENT_SIGNAL_DETECT: Once any incumbent signal is detected
+ * on the operating channel in 6 GHz band, userspace is notified with the
+ * signal interference bitmap using
+ * %NL80211_ATTR_INCUMBENT_SIGNAL_INTERFERENCE_BITMAP. The current channel
+ * definition is also sent.
+ *
+ * @NL80211_CMD_NAN_SET_LOCAL_SCHED: Set the local NAN schedule. NAN must be
+ * operational (%NL80211_CMD_START_NAN was executed). Must contain
+ * %NL80211_ATTR_NAN_TIME_SLOTS and %NL80211_ATTR_NAN_AVAIL_BLOB, but
+ * %NL80211_ATTR_NAN_CHANNEL is optional (for example in case of a channel
+ * removal, that channel won't be provided).
+ * If %NL80211_ATTR_NAN_SCHED_DEFERRED is set, the command is a request
+ * from the device to perform an announced schedule update. See
+ * %NL80211_ATTR_NAN_SCHED_DEFERRED for more details.
+ * If not set, the schedule should be applied immediately.
+ * @NL80211_CMD_NAN_SCHED_UPDATE_DONE: Event sent to user space to notify that
+ * a deferred local NAN schedule update (requested with
+ * %NL80211_CMD_NAN_SET_LOCAL_SCHED and %NL80211_ATTR_NAN_SCHED_DEFERRED)
+ * has been completed. The presence of %NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS
+ * indicates that the update was successful.
+ * @NL80211_CMD_NAN_SET_PEER_SCHED: Set the peer NAN schedule. NAN
+ * must be operational (%NL80211_CMD_START_NAN was executed).
+ * Required attributes: %NL80211_ATTR_MAC (peer NMI address) and
+ * %NL80211_ATTR_NAN_COMMITTED_DW.
+ * Optionally, the full schedule can be provided by including all of:
+ * %NL80211_ATTR_NAN_SEQ_ID, %NL80211_ATTR_NAN_CHANNEL (one or more), and
+ * %NL80211_ATTR_NAN_PEER_MAPS (see &enum nl80211_nan_peer_map_attrs).
+ * If any of these three optional attributes is provided, all three must
+ * be provided.
+ * Each peer channel must be compatible with at least one local channel
+ * set by %NL80211_CMD_SET_LOCAL_NAN_SCHED. Different maps must not
+ * contain compatible channels.
+ * For single-radio devices (n_radio <= 1), different maps must not
+ * schedule the same time slot, as the device cannot operate on multiple
+ * channels simultaneously.
+ * When updating an existing peer schedule, the full new schedule must be
+ * provided - partial updates are not supported. The new schedule will
+ * completely replace the previous one.
+ * The peer schedule is automatically removed when the NMI station is
+ * removed.
+ * @NL80211_CMD_NAN_ULW_UPDATE: Notification from the driver to user space
+ * with the updated ULW blob of the device. User space can use this blob
+ * to attach to frames sent to peers. This notification contains
+ * %NL80211_ATTR_NAN_ULW with the ULW blob.
+ * @NL80211_CMD_NAN_CHANNEL_EVAC: Notification to indicate that a NAN
+ * channel has been evacuated due to resource conflicts with other
+ * interfaces. This can happen when another interface sharing the channel
+ * resource with NAN needs to move to a different channel (e.g., channel
+ * switch or link switch on a BSS interface).
+ * The notification contains %NL80211_ATTR_NAN_CHANNEL attribute
+ * identifying the evacuated channel.
+ * User space may reconfigure the local schedule in response to this
+ * notification.
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1624,6 +1678,18 @@ enum nl80211_commands {
NL80211_CMD_NAN_NEXT_DW_NOTIFICATION,
NL80211_CMD_NAN_CLUSTER_JOINED,
+ NL80211_CMD_INCUMBENT_SIGNAL_DETECT,
+
+ NL80211_CMD_NAN_SET_LOCAL_SCHED,
+
+ NL80211_CMD_NAN_SCHED_UPDATE_DONE,
+
+ NL80211_CMD_NAN_SET_PEER_SCHED,
+
+ NL80211_CMD_NAN_ULW_UPDATE,
+
+ NL80211_CMD_NAN_CHANNEL_EVAC,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2651,7 +2717,8 @@ enum nl80211_commands {
* a flow is assigned on each round of the DRR scheduler.
* @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION). Can be set
- * only if %NL80211_STA_FLAG_WME is set.
+ * only if %NL80211_STA_FLAG_WME is set (except for NAN, which uses WME
+ * anyway).
*
* @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
* in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
@@ -2983,6 +3050,95 @@ enum nl80211_commands {
* @NL80211_ATTR_DISABLE_UHR: Force UHR capable interfaces to disable
* this feature during association. This is a flag attribute.
* Currently only supported in mac80211 drivers.
+ * @NL80211_ATTR_NAN_CHANNEL: This is a nested attribute. There can be multiple
+ * attributes of this type, each one represents a channel definition and
+ * consists of top-level attributes like %NL80211_ATTR_WIPHY_FREQ.
+ * When used with %NL80211_CMD_NAN_SET_LOCAL_SCHED, it specifies
+ * the channel definitions on which the radio needs to operate during
+ * specific time slots. All of the channel definitions should be mutually
+ * incompatible. With this command, %NL80211_ATTR_NAN_CHANNEL_ENTRY and
+ * %NL80211_ATTR_NAN_RX_NSS are mandatory.
+ * When used with %NL80211_CMD_NAN_SET_PEER_SCHED, it configures the
+ * peer NAN channels. In that case, the channel definitions can be
+ * compatible to each other, or even identical just with different RX NSS.
+ * With this command, %NL80211_ATTR_NAN_CHANNEL_ENTRY and
+ * %NL80211_ATTR_NAN_RX_NSS are mandatory.
+ * The number of channels should fit the current configuration of channels
+ * and the possible interface combinations.
+ * If an existing NAN channel is changed but the chandef isn't, the
+ * channel entry must also remain unchanged.
+ * When used with %NL80211_CMD_NAN_CHANNEL_EVAC, this identifies the
+ * channels that were evacuated.
+ * @NL80211_ATTR_NAN_CHANNEL_ENTRY: a byte array of 6 bytes. contains the
+ * Channel Entry as defined in Wi-Fi Aware (TM) 4.0 specification Table
+ * 100 (Channel Entry format for the NAN Availability attribute).
+ * @NL80211_ATTR_NAN_RX_NSS: (u8) RX NSS used for a NAN channel. This is
+ * used with %NL80211_ATTR_NAN_CHANNEL when configuring NAN channels with
+ * %NL80211_CMD_NAN_SET_LOCAL_SCHED or %NL80211_CMD_NAN_SET_PEER_SCHED.
+ * @NL80211_ATTR_NAN_TIME_SLOTS: an array of u8 values and 32 cells. each value
+ * maps a time slot to the chandef on which the radio should operate on in
+ * that time. %NL80211_NAN_SCHED_NOT_AVAIL_SLOT indicates unscheduled.
+ * The chandef is represented using its index, where the index is the
+ * sequential number of the %NL80211_ATTR_NAN_CHANNEL attribute within all
+ * the attributes of this type.
+ * Each slots spans over 16TUs, hence the entire schedule spans over
+ * 512TUs. Other slot durations and periods are currently not supported.
+ * @NL80211_ATTR_NAN_AVAIL_BLOB: (Binary) The NAN Availability attribute blob,
+ * including the attribute header, as defined in Wi-Fi Aware (TM) 4.0
+ * specification Table 93 (NAN Availability attribute format). Required with
+ * %NL80211_CMD_NAN_SET_LOCAL_SCHED to provide the raw NAN Availability
+ * attribute. Used by the device to publish Schedule Update NAFs.
+ * @NL80211_ATTR_NAN_SCHED_DEFERRED: Flag attribute used with
+ * %NL80211_CMD_NAN_SET_LOCAL_SCHED. When present, the command is a
+ * request from the device to perform an announced schedule update. This
+ * means that it needs to send the updated NAN availability to the peers,
+ * and do the actual switch on the right time (i.e. at the end of the slot
+ * after the slot in which the updated NAN Availability was sent). Since
+ * the slots management is done in the device, the update to the peers
+ * needs to be sent by the device, so it knows the actual switch time.
+ * If the flag is not set, the schedule should be applied immediately.
+ * When this flag is set, the total number of NAN channels from both the
+ * old and new schedules must not exceed the allowed number of local NAN
+ * channels, because with deferred scheduling the old channels cannot be
+ * removed before adding the new ones to free up space.
+ * @NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS: flag attribute used with
+ * %NL80211_CMD_NAN_SCHED_UPDATE_DONE to indicate that the deferred
+ * schedule update completed successfully. If this flag is not present,
+ * the update failed.
+ * @NL80211_ATTR_NAN_NMI_MAC: The address of the NMI station to which this NDI
+ * station belongs. Used with %NL80211_CMD_NEW_STATION when adding an NDI
+ * station.
+ * @NL80211_ATTR_NAN_ULW: (Binary) The initial ULW(s) as published by the
+ * peer, as defined in the Wi-Fi Aware (TM) 4.0 specification Table 109
+ * (Unaligned Schedule attribute format). Used to configure the device
+ * with the initial ULW(s) of a peer, before the device starts tracking it.
+ * @NL80211_ATTR_NAN_COMMITTED_DW: (u16) The committed DW as published by the
+ * peer, as defined in the Wi-Fi Aware (TM) 4.0 specification Table 80
+ * (Committed DW Information field format).
+ * @NL80211_ATTR_NAN_SEQ_ID: (u8) The sequence ID of the peer schedule that
+ * %NL80211_CMD_NAN_SET_PEER_SCHED defines. The device follows the
+ * sequence ID in the frames to identify newer schedules. Once a schedule
+ * with a higher sequence ID is received, the device may stop communicating
+ * with that peer until a new peer schedule with a matching sequence ID is
+ * received.
+ * @NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME: (u16) The maximum channel switch
+ * time, in microseconds.
+ * @NL80211_ATTR_NAN_PEER_MAPS: Nested array of peer schedule maps.
+ * Used with %NL80211_CMD_NAN_SET_PEER_SCHED. Contains up to 2 entries,
+ * each containing nested attributes from &enum nl80211_nan_peer_map_attrs.
+ *
+ * @NL80211_ATTR_INCUMBENT_SIGNAL_INTERFERENCE_BITMAP: u32 attribute specifying
+ * the signal interference bitmap detected on the operating bandwidth for
+ * %NL80211_CMD_INCUMBENT_SIGNAL_DETECT. Each bit represents a 20 MHz
+ * segment, lowest bit corresponds to the lowest 20 MHz segment, in the
+ * operating bandwidth where the interference is detected. Punctured
+ * sub-channels are included in the bitmap structure; however, since
+ * interference detection is not performed on these sub-channels, their
+ * corresponding bits are consistently set to zero.
+ *
+ * @NL80211_ATTR_UHR_OPERATION: Full UHR Operation element, as it appears in
+ * association response etc., since it's abridged in the beacon. Used
+ * for START_AP etc.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -3557,6 +3713,26 @@ enum nl80211_attrs {
NL80211_ATTR_UHR_CAPABILITY,
NL80211_ATTR_DISABLE_UHR,
+ NL80211_ATTR_INCUMBENT_SIGNAL_INTERFERENCE_BITMAP,
+
+ NL80211_ATTR_UHR_OPERATION,
+
+ NL80211_ATTR_NAN_CHANNEL,
+ NL80211_ATTR_NAN_CHANNEL_ENTRY,
+ NL80211_ATTR_NAN_TIME_SLOTS,
+ NL80211_ATTR_NAN_RX_NSS,
+ NL80211_ATTR_NAN_AVAIL_BLOB,
+ NL80211_ATTR_NAN_SCHED_DEFERRED,
+ NL80211_ATTR_NAN_SCHED_UPDATE_SUCCESS,
+
+ NL80211_ATTR_NAN_NMI_MAC,
+
+ NL80211_ATTR_NAN_ULW,
+ NL80211_ATTR_NAN_COMMITTED_DW,
+ NL80211_ATTR_NAN_SEQ_ID,
+ NL80211_ATTR_NAN_MAX_CHAN_SWITCH_TIME,
+ NL80211_ATTR_NAN_PEER_MAPS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3650,6 +3826,9 @@ enum nl80211_attrs {
* @NL80211_IFTYPE_OCB: Outside Context of a BSS
* This mode corresponds to the MIB variable dot11OCBActivated=true
* @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
+ * @NL80211_IFTYPE_NAN_DATA: NAN data interface type (netdev); NAN data
+ * interfaces can only be brought up (IFF_UP) when a NAN interface
+ * already exists and NAN has been started (using %NL80211_CMD_START_NAN).
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @NUM_NL80211_IFTYPES: number of defined interface types
*
@@ -3671,6 +3850,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_P2P_DEVICE,
NL80211_IFTYPE_OCB,
NL80211_IFTYPE_NAN,
+ NL80211_IFTYPE_NAN_DATA,
/* keep last */
NUM_NL80211_IFTYPES,
@@ -4360,6 +4540,46 @@ enum nl80211_band_attr {
#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA
/**
+ * enum nl80211_nan_phy_cap_attr - NAN PHY capabilities attributes
+ * @__NL80211_NAN_PHY_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_NAN_PHY_CAP_ATTR_HT_MCS_SET: 16-byte attribute containing HT MCS set
+ * @NL80211_NAN_PHY_CAP_ATTR_HT_CAPA: HT capabilities (u16)
+ * @NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_FACTOR: HT A-MPDU factor (u8)
+ * @NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_DENSITY: HT A-MPDU density (u8)
+ * @NL80211_NAN_PHY_CAP_ATTR_VHT_MCS_SET: 8-byte attribute containing VHT MCS set
+ * @NL80211_NAN_PHY_CAP_ATTR_VHT_CAPA: VHT capabilities (u32)
+ * @NL80211_NAN_PHY_CAP_ATTR_HE_MAC: HE MAC capabilities
+ * @NL80211_NAN_PHY_CAP_ATTR_HE_PHY: HE PHY capabilities
+ * @NL80211_NAN_PHY_CAP_ATTR_HE_MCS_SET: HE supported NSS/MCS combinations
+ * @NL80211_NAN_PHY_CAP_ATTR_HE_PPE: HE PPE thresholds
+ * @NL80211_NAN_PHY_CAP_ATTR_MAX: highest NAN PHY cap attribute number
+ * @__NL80211_NAN_PHY_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_nan_phy_cap_attr {
+ __NL80211_NAN_PHY_CAP_ATTR_INVALID,
+
+ /* HT capabilities */
+ NL80211_NAN_PHY_CAP_ATTR_HT_MCS_SET,
+ NL80211_NAN_PHY_CAP_ATTR_HT_CAPA,
+ NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_FACTOR,
+ NL80211_NAN_PHY_CAP_ATTR_HT_AMPDU_DENSITY,
+
+ /* VHT capabilities */
+ NL80211_NAN_PHY_CAP_ATTR_VHT_MCS_SET,
+ NL80211_NAN_PHY_CAP_ATTR_VHT_CAPA,
+
+ /* HE capabilities */
+ NL80211_NAN_PHY_CAP_ATTR_HE_MAC,
+ NL80211_NAN_PHY_CAP_ATTR_HE_PHY,
+ NL80211_NAN_PHY_CAP_ATTR_HE_MCS_SET,
+ NL80211_NAN_PHY_CAP_ATTR_HE_PPE,
+
+ /* keep last */
+ __NL80211_NAN_PHY_CAP_ATTR_AFTER_LAST,
+ NL80211_NAN_PHY_CAP_ATTR_MAX = __NL80211_NAN_PHY_CAP_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_wmm_rule - regulatory wmm rule
*
* @__NL80211_WMMR_INVALID: attribute number 0 is reserved
@@ -4480,6 +4700,10 @@ enum nl80211_wmm_rule {
* as a non-primary subchannel. Only applicable to S1G channels.
* @NL80211_FREQUENCY_ATTR_NO_UHR: UHR operation is not allowed on this channel
* in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_CAC_START_TIME: Channel Availability Check (CAC)
+ * start time (CLOCK_BOOTTIME, nanoseconds). Only present when CAC is
+ * currently in progress on this channel.
+ * @NL80211_FREQUENCY_ATTR_PAD: attribute used for padding for 64-bit alignment
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4530,6 +4754,8 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_16MHZ,
NL80211_FREQUENCY_ATTR_S1G_NO_PRIMARY,
NL80211_FREQUENCY_ATTR_NO_UHR,
+ NL80211_FREQUENCY_ATTR_CAC_START_TIME,
+ NL80211_FREQUENCY_ATTR_PAD,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -5466,6 +5692,8 @@ enum nl80211_bss_status {
* @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
* @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
* @NL80211_AUTHTYPE_EPPKE: Enhanced Privacy Protection Key Exchange
+ * @NL80211_AUTHTYPE_IEEE8021X: IEEE 802.1X authentication utilizing
+ * Authentication frames
* @__NL80211_AUTHTYPE_NUM: internal
* @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
* @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -5482,6 +5710,7 @@ enum nl80211_auth_type {
NL80211_AUTHTYPE_FILS_SK_PFS,
NL80211_AUTHTYPE_FILS_PK,
NL80211_AUTHTYPE_EPPKE,
+ NL80211_AUTHTYPE_IEEE8021X,
/* keep last */
__NL80211_AUTHTYPE_NUM,
@@ -6795,6 +7024,11 @@ enum nl80211_feature_flags {
* frames in both non‑AP STA and AP mode as specified in
* "IEEE P802.11bi/D3.0, 12.16.6".
*
+ * @NL80211_EXT_FEATURE_IEEE8021X_AUTH: Driver supports IEEE 802.1X
+ * authentication utilizing Authentication frames with user space SME
+ * (NL80211_CMD_AUTHENTICATE) in non-AP STA mode, as specified in
+ * "IEEE P802.11bi/D4.0, 12.16.5".
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6873,6 +7107,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_BEACON_RATE_EHT,
NL80211_EXT_FEATURE_EPPKE,
NL80211_EXT_FEATURE_ASSOC_FRAME_ENCRYPTION,
+ NL80211_EXT_FEATURE_IEEE8021X_AUTH,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -8517,6 +8752,8 @@ enum nl80211_s1g_short_beacon_attrs {
* @NL80211_NAN_CAPA_CAPABILITIES: u8 attribute containing the
* capabilities of the device as defined in Wi-Fi Aware (TM)
* specification Table 79 (Capabilities field).
+ * @NL80211_NAN_CAPA_PHY: nested attribute containing band-agnostic
+ * capabilities for NAN data path. See &enum nl80211_nan_phy_cap_attr.
* @__NL80211_NAN_CAPABILITIES_LAST: Internal
* @NL80211_NAN_CAPABILITIES_MAX: Highest NAN capability attribute.
*/
@@ -8529,9 +8766,38 @@ enum nl80211_nan_capabilities {
NL80211_NAN_CAPA_NUM_ANTENNAS,
NL80211_NAN_CAPA_MAX_CHANNEL_SWITCH_TIME,
NL80211_NAN_CAPA_CAPABILITIES,
+ NL80211_NAN_CAPA_PHY,
/* keep last */
__NL80211_NAN_CAPABILITIES_LAST,
NL80211_NAN_CAPABILITIES_MAX = __NL80211_NAN_CAPABILITIES_LAST - 1,
};
+/**
+ * enum nl80211_nan_peer_map_attrs - NAN peer schedule map attributes
+ *
+ * Nested attributes used within %NL80211_ATTR_NAN_PEER_MAPS to define
+ * individual peer schedule maps.
+ *
+ * @__NL80211_NAN_PEER_MAP_ATTR_INVALID: Invalid
+ * @NL80211_NAN_PEER_MAP_ATTR_MAP_ID: (u8) The map ID for this schedule map.
+ * @NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS: An array of u8 values with 32 cells.
+ * Each value maps a time slot to a channel index within the schedule's
+ * channel list (%NL80211_ATTR_NAN_CHANNEL attributes).
+ * %NL80211_NAN_SCHED_NOT_AVAIL_SLOT indicates unscheduled.
+ * @__NL80211_NAN_PEER_MAP_ATTR_LAST: Internal
+ * @NL80211_NAN_PEER_MAP_ATTR_MAX: Highest peer map attribute
+ */
+enum nl80211_nan_peer_map_attrs {
+ __NL80211_NAN_PEER_MAP_ATTR_INVALID,
+
+ NL80211_NAN_PEER_MAP_ATTR_MAP_ID,
+ NL80211_NAN_PEER_MAP_ATTR_TIME_SLOTS,
+
+ /* keep last */
+ __NL80211_NAN_PEER_MAP_ATTR_LAST,
+ NL80211_NAN_PEER_MAP_ATTR_MAX = __NL80211_NAN_PEER_MAP_ATTR_LAST - 1,
+};
+
+#define NL80211_NAN_SCHED_NOT_AVAIL_SLOT 0xff
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3092c2c6f1d2..aa2acdbda8f8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -70,12 +70,15 @@ enum ovs_datapath_cmd {
* set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on
* %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
* not be sent.
+ * @OVS_DP_ATTR_MASKS_CACHE_SIZE: Number of the entries in the flow table
+ * masks cache.
* @OVS_DP_ATTR_PER_CPU_PIDS: Per-cpu array of PIDs for upcalls when
* OVS_DP_F_DISPATCH_UPCALL_PER_CPU feature is set.
* @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
* datapath. Always present in notifications.
* @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the
* datapath. Always present in notifications.
+ * @OVS_DP_ATTR_USER_FEATURES: OVS_DP_F_* flags.
* @OVS_DP_ATTR_IFINDEX: Interface index for a new datapath netdev. Only
* valid for %OVS_DP_CMD_NEW requests.
*
@@ -83,18 +86,23 @@ enum ovs_datapath_cmd {
* payload for %OVS_DP_* commands.
*/
enum ovs_datapath_attr {
+ /* private: */
OVS_DP_ATTR_UNSPEC,
+ /* public: */
OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */
OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
+ /* private: */
OVS_DP_ATTR_PAD,
+ /* public: */
OVS_DP_ATTR_MASKS_CACHE_SIZE,
OVS_DP_ATTR_PER_CPU_PIDS, /* Netlink PIDS to receive upcalls in
* per-cpu dispatch mode
*/
OVS_DP_ATTR_IFINDEX,
+ /* private: */
__OVS_DP_ATTR_MAX
};
@@ -181,6 +189,7 @@ enum ovs_packet_cmd {
* %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
* output port is actually a tunnel port. Contains the output tunnel key
* extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_PROBE: Packet operation is a feature probe.
* @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
* @OVS_PACKET_ATTR_LEN: Packet size before truncation.
* %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
@@ -196,21 +205,26 @@ enum ovs_packet_cmd {
* payload for %OVS_PACKET_* commands.
*/
enum ovs_packet_attr {
+ /* private: */
OVS_PACKET_ATTR_UNSPEC,
+ /* public: */
OVS_PACKET_ATTR_PACKET, /* Packet data. */
OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */
OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
attributes. */
+ /* private: */
OVS_PACKET_ATTR_UNUSED1,
OVS_PACKET_ATTR_UNUSED2,
+ /* public: */
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
OVS_PACKET_ATTR_HASH, /* Packet hash. */
OVS_PACKET_ATTR_UPCALL_PID, /* u32 Netlink PID. */
+ /* private: */
__OVS_PACKET_ATTR_MAX
};
@@ -257,6 +271,11 @@ enum ovs_vport_type {
* upcalls should not be sent.
* @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
* packets sent or received through the vport.
+ * @OVS_VPORT_ATTR_IFINDEX: Provides the ifindex of a vport, or sets the desired
+ * ifindex while creating a new vport with type %OVS_VPORT_TYPE_INTERNAL.
+ * @OVS_VPORT_ATTR_NETNSID: Provides the netns id of the vport if it's not local.
+ * @OVS_VPORT_ATTR_UPCALL_STATS: Provides upcall statistics for a vport.
+ * Contains nested %OVS_VPORT_UPCALL_ATTR_* attributes.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_VPORT_* commands.
@@ -272,7 +291,9 @@ enum ovs_vport_type {
* ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport.
*/
enum ovs_vport_attr {
+ /* private: */
OVS_VPORT_ATTR_UNSPEC,
+ /* public: */
OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */
OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */
OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */
@@ -280,23 +301,27 @@ enum ovs_vport_attr {
OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
/* receiving upcalls */
OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */
+ /* private: */
OVS_VPORT_ATTR_PAD,
+ /* public: */
OVS_VPORT_ATTR_IFINDEX,
OVS_VPORT_ATTR_NETNSID,
OVS_VPORT_ATTR_UPCALL_STATS,
+ /* private: */
__OVS_VPORT_ATTR_MAX
};
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
/**
- * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL* commands
- * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
- * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
+ * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_ATTR_UPCALL_STATS
+ * @OVS_VPORT_UPCALL_ATTR_SUCCESS: 64-bit upcall success packets.
+ * @OVS_VPORT_UPCALL_ATTR_FAIL: 64-bit upcall fail packets.
*/
enum ovs_vport_upcall_attr {
OVS_VPORT_UPCALL_ATTR_SUCCESS,
OVS_VPORT_UPCALL_ATTR_FAIL,
+ /* private: */
__OVS_VPORT_UPCALL_ATTR_MAX
};
@@ -431,6 +456,7 @@ enum ovs_frag_type {
OVS_FRAG_TYPE_NONE,
OVS_FRAG_TYPE_FIRST,
OVS_FRAG_TYPE_LATER,
+ /* private: */
__OVS_FRAG_TYPE_MAX
};
@@ -604,6 +630,8 @@ struct ovs_nsh_key_md1 {
* a wildcarded match. Omitting attribute is treated as wildcarding all
* corresponding fields. Optional for all requests. If not present,
* all flow key bits are exact match bits.
+ * @OVS_FLOW_ATTR_PROBE: Flow operation is a feature probe, error logging
+ * should be suppressed.
* @OVS_FLOW_ATTR_UFID: A value between 1-16 octets specifying a unique
* identifier for the flow. Causes the flow to be indexed by this value rather
* than the value of the %OVS_FLOW_ATTR_KEY attribute. Optional for all
@@ -617,7 +645,9 @@ struct ovs_nsh_key_md1 {
* payload for %OVS_FLOW_* commands.
*/
enum ovs_flow_attr {
+ /* private: */
OVS_FLOW_ATTR_UNSPEC,
+ /* public: */
OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */
OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */
@@ -629,13 +659,14 @@ enum ovs_flow_attr {
* logging should be suppressed. */
OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
+ /* private: */
OVS_FLOW_ATTR_PAD,
__OVS_FLOW_ATTR_MAX
};
#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
-/**
+/*
* Omit attributes for notifications.
*
* If a datapath request contains an %OVS_UFID_F_OMIT_* flag, then the datapath
@@ -653,17 +684,23 @@ enum ovs_flow_attr {
* fractions of packets.
* @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event.
* Actions are passed as nested attributes.
+ * @OVS_SAMPLE_ATTR_ARG: For in-kernel use, passing &struct sample_arg
+ * derived from other attributes.
*
* Executes the specified actions with the given probability on a per-packet
* basis. Nested actions will be able to access the probability value of the
* parent @OVS_ACTION_ATTR_SAMPLE.
*/
enum ovs_sample_attr {
+ /* private: */
OVS_SAMPLE_ATTR_UNSPEC,
+ /* public: */
OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
+ /* private: */
__OVS_SAMPLE_ATTR_MAX,
+ /* public: */
#ifdef __KERNEL__
OVS_SAMPLE_ATTR_ARG /* struct sample_arg */
#endif
@@ -693,12 +730,15 @@ struct sample_arg {
* @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
*/
enum ovs_userspace_attr {
+ /* private: */
OVS_USERSPACE_ATTR_UNSPEC,
+ /* public: */
OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */
OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
* to get tunnel info. */
OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */
+ /* private: */
__OVS_USERSPACE_ATTR_MAX
};
@@ -819,7 +859,9 @@ struct ovs_action_hash {
* @OVS_CT_ATTR_TIMEOUT: Variable length string defining conntrack timeout.
*/
enum ovs_ct_attr {
+ /* private: */
OVS_CT_ATTR_UNSPEC,
+ /* public: */
OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */
OVS_CT_ATTR_ZONE, /* u16 zone id. */
OVS_CT_ATTR_MARK, /* mark to associate with this connection. */
@@ -831,6 +873,7 @@ enum ovs_ct_attr {
OVS_CT_ATTR_EVENTMASK, /* u32 mask of IPCT_* events. */
OVS_CT_ATTR_TIMEOUT, /* Associate timeout with this connection for
* fine-grain timeout tuning. */
+ /* private: */
__OVS_CT_ATTR_MAX
};
@@ -859,7 +902,9 @@ enum ovs_ct_attr {
* @OVS_NAT_ATTR_PROTO_RANDOM: Flag for fully randomized L4 port mapping
*/
enum ovs_nat_attr {
+ /* private: */
OVS_NAT_ATTR_UNSPEC,
+ /* public: */
OVS_NAT_ATTR_SRC,
OVS_NAT_ATTR_DST,
OVS_NAT_ATTR_IP_MIN,
@@ -869,38 +914,44 @@ enum ovs_nat_attr {
OVS_NAT_ATTR_PERSISTENT,
OVS_NAT_ATTR_PROTO_HASH,
OVS_NAT_ATTR_PROTO_RANDOM,
+ /* private: */
__OVS_NAT_ATTR_MAX,
};
#define OVS_NAT_ATTR_MAX (__OVS_NAT_ATTR_MAX - 1)
-/*
+/**
* struct ovs_action_push_eth - %OVS_ACTION_ATTR_PUSH_ETH action argument.
* @addresses: Source and destination MAC addresses.
- * @eth_type: Ethernet type
*/
struct ovs_action_push_eth {
struct ovs_key_ethernet addresses;
};
-/*
+/**
* enum ovs_check_pkt_len_attr - Attributes for %OVS_ACTION_ATTR_CHECK_PKT_LEN.
*
* @OVS_CHECK_PKT_LEN_ATTR_PKT_LEN: u16 Packet length to check for.
* @OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER: Nested OVS_ACTION_ATTR_*
* actions to apply if the packer length is greater than the specified
* length in the attr - OVS_CHECK_PKT_LEN_ATTR_PKT_LEN.
- * @OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL - Nested OVS_ACTION_ATTR_*
+ * @OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL: Nested OVS_ACTION_ATTR_*
* actions to apply if the packer length is lesser or equal to the specified
* length in the attr - OVS_CHECK_PKT_LEN_ATTR_PKT_LEN.
+ * @OVS_CHECK_PKT_LEN_ATTR_ARG: For in-kernel use, passing &struct
+ * check_pkt_len_arg derived from other attributes.
*/
enum ovs_check_pkt_len_attr {
+ /* private: */
OVS_CHECK_PKT_LEN_ATTR_UNSPEC,
+ /* public: */
OVS_CHECK_PKT_LEN_ATTR_PKT_LEN,
OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER,
OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL,
+ /* private: */
__OVS_CHECK_PKT_LEN_ATTR_MAX,
+ /* public: */
#ifdef __KERNEL__
OVS_CHECK_PKT_LEN_ATTR_ARG /* struct check_pkt_len_arg */
#endif
@@ -968,6 +1019,9 @@ enum ovs_psample_attr {
* from the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_RECIRC: Recirculate the clone of the packet through the
+ * datapath with the new id (u32 recirc_id).
+ * @OVS_ACTION_ATTR_HASH: Compute the packet hash, using &struct ovs_action_hash.
* @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
* top of the packets MPLS label stack. Set the ethertype of the
* encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to
@@ -997,6 +1051,8 @@ enum ovs_psample_attr {
* start of the packet or at the start of the l3 header depending on the value
* of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
* argument.
+ * @OVS_ACTION_ATTR_DEC_TTL: Decrement TTL or hop limit of the packet. Execute
+ * nested %OVS_DEC_TTL_ATTR_* actions if the value is less or equal to 1.
* @OVS_ACTION_ATTR_DROP: Explicit drop action.
* @OVS_ACTION_ATTR_PSAMPLE: Send a sample of the packet to external observers
* via psample.
@@ -1010,7 +1066,9 @@ enum ovs_psample_attr {
*/
enum ovs_action_attr {
+ /* private: */
OVS_ACTION_ATTR_UNSPEC,
+ /* public: */
OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */
OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */
OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */
@@ -1040,9 +1098,11 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_DROP, /* u32 error code. */
OVS_ACTION_ATTR_PSAMPLE, /* Nested OVS_PSAMPLE_ATTR_*. */
+ /* private: */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
+ /* public: */
#ifdef __KERNEL__
OVS_ACTION_ATTR_SET_TO_MASKED, /* Kernel module internal masked
* set action converted from
diff --git a/include/uapi/linux/ovpn.h b/include/uapi/linux/ovpn.h
index 959b41def61f..06690090a1a9 100644
--- a/include/uapi/linux/ovpn.h
+++ b/include/uapi/linux/ovpn.h
@@ -55,6 +55,7 @@ enum {
OVPN_A_PEER_LINK_TX_BYTES,
OVPN_A_PEER_LINK_RX_PACKETS,
OVPN_A_PEER_LINK_TX_PACKETS,
+ OVPN_A_PEER_TX_ID,
__OVPN_A_PEER_MAX,
OVPN_A_PEER_MAX = (__OVPN_A_PEER_MAX - 1)
@@ -100,6 +101,7 @@ enum {
OVPN_CMD_KEY_SWAP,
OVPN_CMD_KEY_SWAP_NTF,
OVPN_CMD_KEY_DEL,
+ OVPN_CMD_PEER_FLOAT_NTF,
__OVPN_CMD_MAX,
OVPN_CMD_MAX = (__OVPN_CMD_MAX - 1)
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
index ea9a6811fc76..0919246a1611 100644
--- a/include/uapi/linux/pidfd.h
+++ b/include/uapi/linux/pidfd.h
@@ -13,6 +13,7 @@
#ifdef __KERNEL__
#include <linux/sched.h>
#define PIDFD_STALE CLONE_PIDFD
+#define PIDFD_AUTOKILL O_TRUNC
#endif
/* Flags for pidfd_send_signal(). */
@@ -28,10 +29,12 @@
#define PIDFD_INFO_COREDUMP (1UL << 4) /* Only returned if requested. */
#define PIDFD_INFO_SUPPORTED_MASK (1UL << 5) /* Want/got supported mask flags */
#define PIDFD_INFO_COREDUMP_SIGNAL (1UL << 6) /* Always returned if PIDFD_INFO_COREDUMP is requested. */
+#define PIDFD_INFO_COREDUMP_CODE (1UL << 7) /* Always returned if PIDFD_INFO_COREDUMP is requested. */
#define PIDFD_INFO_SIZE_VER0 64 /* sizeof first published struct */
#define PIDFD_INFO_SIZE_VER1 72 /* sizeof second published struct */
#define PIDFD_INFO_SIZE_VER2 80 /* sizeof third published struct */
+#define PIDFD_INFO_SIZE_VER3 88 /* sizeof fourth published struct */
/*
* Values for @coredump_mask in pidfd_info.
@@ -98,6 +101,8 @@ struct pidfd_info {
struct /* coredump info */ {
__u32 coredump_mask;
__u32 coredump_signal;
+ __u32 coredump_code;
+ __u32 coredump_pad; /* align supported_mask to 8 bytes */
};
__u64 supported_mask; /* Mask flags that this kernel supports */
};
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 55b0446fff9d..b6ec6f693719 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -397,30 +397,23 @@ struct prctl_mm_map {
# define PR_RSEQ_SLICE_EXT_ENABLE 0x01
/*
- * Get the current indirect branch tracking configuration for the current
- * thread, this will be the value configured via PR_SET_INDIR_BR_LP_STATUS.
+ * Get or set the control flow integrity (CFI) configuration for the
+ * current thread.
+ *
+ * Some per-thread control flow integrity settings are not yet
+ * controlled through this prctl(); see for example
+ * PR_{GET,SET,LOCK}_SHADOW_STACK_STATUS
*/
-#define PR_GET_INDIR_BR_LP_STATUS 80
-
+#define PR_GET_CFI 80
+#define PR_SET_CFI 81
/*
- * Set the indirect branch tracking configuration. PR_INDIR_BR_LP_ENABLE will
- * enable cpu feature for user thread, to track all indirect branches and ensure
- * they land on arch defined landing pad instruction.
- * x86 - If enabled, an indirect branch must land on an ENDBRANCH instruction.
- * arch64 - If enabled, an indirect branch must land on a BTI instruction.
- * riscv - If enabled, an indirect branch must land on an lpad instruction.
- * PR_INDIR_BR_LP_DISABLE will disable feature for user thread and indirect
- * branches will no more be tracked by cpu to land on arch defined landing pad
- * instruction.
- */
-#define PR_SET_INDIR_BR_LP_STATUS 81
-# define PR_INDIR_BR_LP_ENABLE (1UL << 0)
-
-/*
- * Prevent further changes to the specified indirect branch tracking
- * configuration. All bits may be locked via this call, including
- * undefined bits.
+ * Forward-edge CFI variants (excluding ARM64 BTI, which has its own
+ * prctl()s).
*/
-#define PR_LOCK_INDIR_BR_LP_STATUS 82
+#define PR_CFI_BRANCH_LANDING_PADS 0
+/* Return and control values for PR_{GET,SET}_CFI */
+# define PR_CFI_ENABLE _BITUL(0)
+# define PR_CFI_DISABLE _BITUL(1)
+# define PR_CFI_LOCK _BITUL(2)
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 2b5b042eb73b..52dae70b058b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -277,7 +277,7 @@ struct sev_user_data_snp_wrapped_vlek_hashstick {
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
- * @opaque: pointer to the command structure
+ * @data: pointer to the command structure
* @error: SEV FW return code on failure
*/
struct sev_issue_cmd {
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 359a14cc76a4..33a4624285cd 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -34,8 +34,12 @@
#define CLONE_IO 0x80000000 /* Clone io context */
/* Flags for the clone3() syscall. */
-#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
-#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
+#define CLONE_CLEAR_SIGHAND (1ULL << 32) /* Clear any signal handler and reset to SIG_DFL. */
+#define CLONE_INTO_CGROUP (1ULL << 33) /* Clone into a specific cgroup given the right permissions. */
+#define CLONE_AUTOREAP (1ULL << 34) /* Auto-reap child on exit. */
+#define CLONE_NNP (1ULL << 35) /* Set no_new_privs on child. */
+#define CLONE_PIDFD_AUTOKILL (1ULL << 36) /* Kill child when clone pidfd closes. */
+#define CLONE_EMPTY_MNTNS (1ULL << 37) /* Create an empty mount namespace. */
/*
* cloning flags intersect with CSIGNAL so can be used with unshare and clone3
@@ -43,6 +47,12 @@
*/
#define CLONE_NEWTIME 0x00000080 /* New time namespace */
+/*
+ * unshare flags share the bit space with clone flags but only apply to the
+ * unshare syscall:
+ */
+#define UNSHARE_EMPTY_MNTNS 0x00100000 /* Unshare an empty mount namespace. */
+
#ifndef __ASSEMBLY__
/**
* struct clone_args - arguments for the clone3 syscall
@@ -146,4 +156,7 @@ struct clone_args {
SCHED_FLAG_KEEP_ALL | \
SCHED_FLAG_UTIL_CLAMP)
+/* Only for sched_getattr() own flag param, if task is SCHED_DEADLINE */
+#define SCHED_GETATTR_FLAG_DL_DYNAMIC 0x01
+
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index 9025dd5a4f0f..ef4d3be6ca7f 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -74,6 +74,19 @@ struct opal_lr_act {
__u8 align[2]; /* Align to 8 byte boundary */
};
+struct opal_lr_react {
+ struct opal_key key;
+ struct opal_key new_admin_key; /* Set new Admin1 PIN if key_len is > 0 */
+ __u8 num_lrs; /*
+ * Configure selected ranges (from lr[]) in SUM.
+ * If num_lrs > 0 the 'entire_table' must be 0
+ */
+ __u8 lr[OPAL_MAX_LRS];
+ __u8 range_policy; /* Set RangeStartRangeLengthPolicy parameter */
+ __u8 entire_table; /* Set all locking objects in SUM */
+ __u8 align[4]; /* Align to 8 byte boundary */
+};
+
struct opal_session_info {
__u32 sum;
__u32 who;
@@ -98,6 +111,18 @@ struct opal_lr_status {
__u8 align[4];
};
+struct opal_sum_ranges {
+ /*
+ * Initiate Admin1 session if key_len > 0,
+ * use Anybody session otherwise.
+ */
+ struct opal_key key;
+ __u8 num_lrs;
+ __u8 lr[OPAL_MAX_LRS];
+ __u8 range_policy;
+ __u8 align[5]; /* Align to 8 byte boundary */
+};
+
struct opal_lock_unlock {
struct opal_session_info session;
__u32 l_state;
@@ -216,5 +241,10 @@ struct opal_revert_lsp {
#define IOC_OPAL_DISCOVERY _IOW('p', 239, struct opal_discovery)
#define IOC_OPAL_REVERT_LSP _IOW('p', 240, struct opal_revert_lsp)
#define IOC_OPAL_SET_SID_PW _IOW('p', 241, struct opal_new_pw)
+#define IOC_OPAL_REACTIVATE_LSP _IOW('p', 242, struct opal_lr_react)
+#define IOC_OPAL_LR_SET_START_LEN _IOW('p', 243, struct opal_user_lr_setup)
+#define IOC_OPAL_ENABLE_DISABLE_LR _IOW('p', 244, struct opal_user_lr_setup)
+#define IOC_OPAL_GET_SUM_STATUS _IOW('p', 245, struct opal_sum_ranges)
+#define IOC_OPAL_STACK_RESET _IO('p', 246)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index ae78791372b8..485889b19900 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -20,6 +20,7 @@
enum {
SEG6_IPTUNNEL_UNSPEC,
SEG6_IPTUNNEL_SRH,
+ SEG6_IPTUNNEL_SRC, /* struct in6_addr */
__SEG6_IPTUNNEL_MAX,
};
#define SEG6_IPTUNNEL_MAX (__SEG6_IPTUNNEL_MAX - 1)
diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
index c102ef35d11e..7e5e3900d39f 100644
--- a/include/uapi/linux/trace_mmap.h
+++ b/include/uapi/linux/trace_mmap.h
@@ -10,6 +10,7 @@
* @meta_struct_len: Size of this structure.
* @subbuf_size: Size of each sub-buffer.
* @nr_subbufs: Number of subbfs in the ring-buffer, including the reader.
+ * @reader: The reader composite info structure
* @reader.lost_events: Number of events lost at the time of the reader swap.
* @reader.id: subbuf ID of the current reader. ID range [0 : @nr_subbufs - 1]
* @reader.read: Number of bytes read on the reader subbuf.
@@ -17,8 +18,8 @@
* @entries: Number of entries in the ring-buffer.
* @overrun: Number of entries lost in the ring-buffer.
* @read: Number of entries that have been read.
- * @Reserved1: Internal use only.
- * @Reserved2: Internal use only.
+ * @pages_lost: Number of pages overwritten by the writer.
+ * @pages_touched: Number of pages written by the writer.
*/
struct trace_buffer_meta {
__u32 meta_page_size;
@@ -39,8 +40,8 @@ struct trace_buffer_meta {
__u64 overrun;
__u64 read;
- __u64 Reserved1;
- __u64 Reserved2;
+ __u64 pages_lost;
+ __u64 pages_touched;
};
#define TRACE_MMAP_IOCTL_GET_READER _IO('R', 0x20)
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index a88876756805..6991370a72ce 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -58,6 +58,45 @@
#define UBLK_U_CMD_TRY_STOP_DEV \
_IOWR('u', 0x17, struct ublksrv_ctrl_cmd)
/*
+ * Register a shared memory buffer for zero-copy I/O.
+ * Input: ctrl_cmd.addr points to struct ublk_shmem_buf_reg (buffer VA + size)
+ * ctrl_cmd.len = sizeof(struct ublk_shmem_buf_reg)
+ * Result: >= 0 is the assigned buffer index, < 0 is error
+ *
+ * The kernel pins pages from the calling process's address space
+ * and inserts PFN ranges into a per-device maple tree. When a block
+ * request's pages match registered pages, the driver sets
+ * UBLK_IO_F_SHMEM_ZC and encodes the buffer index + offset in addr,
+ * allowing the server to access the data via its own mapping of the
+ * same shared memory — true zero copy.
+ *
+ * The memory can be backed by memfd, hugetlbfs, or any GUP-compatible
+ * shared mapping. Queue freeze is handled internally.
+ *
+ * The buffer VA and size are passed via a user buffer (not inline in
+ * ctrl_cmd) so that unprivileged devices can prepend the device path
+ * to ctrl_cmd.addr without corrupting the VA.
+ */
+#define UBLK_U_CMD_REG_BUF \
+ _IOWR('u', 0x18, struct ublksrv_ctrl_cmd)
+/*
+ * Unregister a shared memory buffer.
+ * Input: ctrl_cmd.data[0] = buffer index
+ */
+#define UBLK_U_CMD_UNREG_BUF \
+ _IOWR('u', 0x19, struct ublksrv_ctrl_cmd)
+
+/* Parameter buffer for UBLK_U_CMD_REG_BUF, pointed to by ctrl_cmd.addr */
+struct ublk_shmem_buf_reg {
+ __u64 addr; /* userspace virtual address of shared memory */
+ __u64 len; /* buffer size in bytes, page-aligned, default max 4GB */
+ __u32 flags;
+ __u32 reserved;
+};
+
+/* Pin pages without FOLL_WRITE; usable with write-sealed memfd */
+#define UBLK_SHMEM_BUF_READ_ONLY (1U << 0)
+/*
* 64bits are enough now, and it should be easy to extend in case of
* running out of feature flags
*/
@@ -370,6 +409,14 @@
/* Disable automatic partition scanning when device is started */
#define UBLK_F_NO_AUTO_PART_SCAN (1ULL << 18)
+/*
+ * Enable shared memory zero copy. When enabled, the server can register
+ * shared memory buffers via UBLK_U_CMD_REG_BUF. If a block request's
+ * pages match a registered buffer, UBLK_IO_F_SHMEM_ZC is set and addr
+ * encodes the buffer index + offset instead of a userspace buffer address.
+ */
+#define UBLK_F_SHMEM_ZC (1ULL << 19)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -469,6 +516,12 @@ struct ublksrv_ctrl_dev_info {
#define UBLK_IO_F_NEED_REG_BUF (1U << 17)
/* Request has an integrity data buffer */
#define UBLK_IO_F_INTEGRITY (1UL << 18)
+/*
+ * I/O buffer is in a registered shared memory buffer. When set, the addr
+ * field in ublksrv_io_desc encodes buffer index and byte offset instead
+ * of a userspace virtual address.
+ */
+#define UBLK_IO_F_SHMEM_ZC (1U << 19)
/*
* io cmd is described by this structure, and stored in share memory, indexed
@@ -743,4 +796,31 @@ struct ublk_params {
struct ublk_param_integrity integrity;
};
+/*
+ * Shared memory zero-copy addr encoding for UBLK_IO_F_SHMEM_ZC.
+ *
+ * When UBLK_IO_F_SHMEM_ZC is set, ublksrv_io_desc.addr is encoded as:
+ * bits [0:31] = byte offset within the buffer (up to 4GB)
+ * bits [32:47] = buffer index (up to 65536)
+ * bits [48:63] = reserved (must be zero)
+ */
+#define UBLK_SHMEM_ZC_OFF_MASK 0xffffffffULL
+#define UBLK_SHMEM_ZC_IDX_OFF 32
+#define UBLK_SHMEM_ZC_IDX_MASK 0xffffULL
+
+static inline __u64 ublk_shmem_zc_addr(__u16 index, __u32 offset)
+{
+ return ((__u64)index << UBLK_SHMEM_ZC_IDX_OFF) | offset;
+}
+
+static inline __u16 ublk_shmem_zc_index(__u64 addr)
+{
+ return (addr >> UBLK_SHMEM_ZC_IDX_OFF) & UBLK_SHMEM_ZC_IDX_MASK;
+}
+
+static inline __u32 ublk_shmem_zc_offset(__u64 addr)
+{
+ return (__u32)(addr & UBLK_SHMEM_ZC_OFF_MASK);
+}
+
#endif
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index edca3e430305..877fb02df8fb 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -29,6 +29,8 @@ struct udphdr {
/* UDP socket options */
#define UDP_CORK 1 /* Never send partially complete segments */
+/* Deprecated, reserved for UDPLITE_SEND_CSCOV 10 */
+/* Deprecated, reserved for UDPLITE_RECV_CSCOV 11 */
#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */
#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
#define UDP_NO_CHECK6_RX 102 /* Disable accepting checksum for UDP6 */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index bb7b89330d35..5de618a3a5ee 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -141,7 +141,7 @@ struct vfio_info_cap_header {
*
* Retrieve information about the group. Fills in provided
* struct vfio_group_info. Caller sets argsz.
- * Return: 0 on succes, -errno on failure.
+ * Return: 0 on success, -errno on failure.
* Availability: Always
*/
struct vfio_group_status {
@@ -1266,6 +1266,19 @@ enum vfio_device_mig_state {
* The initial_bytes field indicates the amount of initial precopy
* data available from the device. This field should have a non-zero initial
* value and decrease as migration data is read from the device.
+ * The presence of the VFIO_PRECOPY_INFO_REINIT output flag indicates
+ * that new initial data is present on the stream.
+ * The new initial data may result, for example, from device reconfiguration
+ * during migration that requires additional initialization data.
+ * In that case initial_bytes may report a non-zero value irrespective of
+ * any previously reported values, which progresses towards zero as precopy
+ * data is read from the data stream. dirty_bytes is also reset
+ * to zero and represents the state change of the device relative to the new
+ * initial_bytes.
+ * VFIO_PRECOPY_INFO_REINIT can be reported only after userspace opts in to
+ * VFIO_DEVICE_FEATURE_MIG_PRECOPY_INFOv2. Without this opt-in, the flags field
+ * of struct vfio_precopy_info is reserved for bug-compatibility reasons.
+ *
* It is recommended to leave PRE_COPY for STOP_COPY only after this field
* reaches zero. Leaving PRE_COPY earlier might make things slower.
*
@@ -1301,6 +1314,7 @@ enum vfio_device_mig_state {
struct vfio_precopy_info {
__u32 argsz;
__u32 flags;
+#define VFIO_PRECOPY_INFO_REINIT (1 << 0) /* output - new initial data is present */
__aligned_u64 initial_bytes;
__aligned_u64 dirty_bytes;
};
@@ -1510,6 +1524,16 @@ struct vfio_device_feature_dma_buf {
struct vfio_region_dma_range dma_ranges[] __counted_by(nr_ranges);
};
+/*
+ * Enables the migration precopy_info_v2 behaviour.
+ *
+ * VFIO_DEVICE_FEATURE_MIG_PRECOPY_INFOv2.
+ *
+ * On SET, enables the v2 pre_copy_info behaviour, where the
+ * vfio_precopy_info.flags is a valid output field.
+ */
+#define VFIO_DEVICE_FEATURE_MIG_PRECOPY_INFOv2 12
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 23c39b96190f..5977723fb3b5 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -4,24 +4,16 @@
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
+#include <linux/types.h>
+
#include <uapi/linux/bits.h>
#include <uapi/linux/time.h>
-#include <uapi/linux/types.h>
-#include <uapi/asm-generic/errno-base.h>
#include <vdso/align.h>
#include <vdso/bits.h>
#include <vdso/cache.h>
-#include <vdso/clocksource.h>
-#include <vdso/ktime.h>
-#include <vdso/limits.h>
-#include <vdso/math64.h>
#include <vdso/page.h>
-#include <vdso/processor.h>
#include <vdso/time.h>
-#include <vdso/time32.h>
-#include <vdso/time64.h>
#ifdef CONFIG_ARCH_HAS_VDSO_TIME_DATA
#include <asm/vdso/time_data.h>
@@ -80,8 +72,8 @@ struct vdso_timestamp {
* @mask: clocksource mask
* @mult: clocksource multiplier
* @shift: clocksource shift
- * @basetime[clock_id]: basetime per clock_id
- * @offset[clock_id]: time namespace offset per clock_id
+ * @basetime: basetime per clock_id
+ * @offset: time namespace offset per clock_id
*
* See also struct vdso_time_data for basic access and ordering information as
* struct vdso_clock is used there.
@@ -184,17 +176,6 @@ enum vdso_pages {
VDSO_NR_PAGES
};
-/*
- * The generic vDSO implementation requires that gettimeofday.h
- * provides:
- * - __arch_get_hw_counter(): to get the hw counter based on the
- * clock_mode.
- * - gettimeofday_fallback(): fallback for gettimeofday.
- * - clock_gettime_fallback(): fallback for clock_gettime.
- * - clock_getres_fallback(): fallback for clock_getres.
- */
-#include <asm/vdso/gettimeofday.h>
-
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_VDSO_GETRANDOM
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 1a5ee9d9052c..a3bf4f1c0d37 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -6,6 +6,13 @@
#include <asm/barrier.h>
#include <vdso/datapage.h>
+#include <vdso/processor.h>
+#include <vdso/clocksource.h>
+
+static __always_inline bool vdso_is_timens_clock(const struct vdso_clock *vc)
+{
+ return IS_ENABLED(CONFIG_TIME_NS) && vc->clock_mode == VDSO_CLOCKMODE_TIMENS;
+}
static __always_inline u32 vdso_read_begin(const struct vdso_clock *vc)
{
@@ -18,6 +25,28 @@ static __always_inline u32 vdso_read_begin(const struct vdso_clock *vc)
return seq;
}
+/*
+ * Variant of vdso_read_begin() to handle VDSO_CLOCKMODE_TIMENS.
+ *
+ * Time namespace enabled tasks have a special VVAR page installed which has
+ * vc->seq set to 1 and vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non
+ * time namespace affected tasks this does not affect performance because if
+ * vc->seq is odd, i.e. a concurrent update is in progress the extra check for
+ * vc->clock_mode is just a few extra instructions while spin waiting for
+ * vc->seq to become even again.
+ */
+static __always_inline bool vdso_read_begin_timens(const struct vdso_clock *vc, u32 *seq)
+{
+ while (unlikely((*seq = READ_ONCE(vc->seq)) & 1)) {
+ if (vdso_is_timens_clock(vc))
+ return true;
+ cpu_relax();
+ }
+ smp_rmb();
+
+ return false;
+}
+
static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
u32 start)
{
@@ -25,7 +54,7 @@ static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
smp_rmb();
seq = READ_ONCE(vc->seq);
- return seq != start;
+ return unlikely(seq != start);
}
static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
diff --git a/include/video/vga.h b/include/video/vga.h
index 468764d6727a..2f13c371800b 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -46,6 +46,7 @@
#define VGA_MIS_R 0x3CC /* Misc Output Read Register */
#define VGA_MIS_W 0x3C2 /* Misc Output Write Register */
#define VGA_FTC_R 0x3CA /* Feature Control Read Register */
+#define VGA_IS0_R 0x3C2 /* Input Status Register 0 */
#define VGA_IS1_RC 0x3DA /* Input Status Register 1 - color emulation */
#define VGA_IS1_RM 0x3BA /* Input Status Register 1 - mono emulation */
#define VGA_PEL_D 0x3C9 /* PEL Data Register */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 69ac6d80a006..a33a60a2ea72 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -84,8 +84,20 @@ struct gntab_unmap_queue_data
};
int gnttab_init(void);
+#ifdef CONFIG_HIBERNATE_CALLBACKS
int gnttab_suspend(void);
int gnttab_resume(void);
+#else
+static inline int gnttab_suspend(void)
+{
+ return 0;
+}
+
+static inline int gnttab_resume(void)
+{
+ return 0;
+}
+#endif
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
int readonly);
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
index cf17e89ed861..687949bdebb1 100644
--- a/include/xen/interface/io/console.h
+++ b/include/xen/interface/io/console.h
@@ -19,6 +19,19 @@ struct xencons_interface {
char out[2048];
XENCONS_RING_IDX in_cons, in_prod;
XENCONS_RING_IDX out_cons, out_prod;
+/*
+ * Flag values signaling from backend to frontend whether the console is
+ * connected. i.e. Whether it will be serviced and emptied.
+ *
+ * The flag starts as disconnected.
+ */
+#define XENCONSOLE_DISCONNECTED 1
+/*
+ * The flag is set to connected when the backend connects and the console
+ * will be serviced.
+ */
+#define XENCONSOLE_CONNECTED 0
+ uint8_t connection;
};
#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */