summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl1.h257
-rw-r--r--include/acpi/actbl2.h194
-rw-r--r--include/acpi/actbl3.h1
-rw-r--r--include/acpi/acuuid.h6
-rw-r--r--include/acpi/cppc_acpi.h11
-rw-r--r--include/acpi/ghes.h2
-rw-r--r--include/acpi/processor.h34
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/atomic64.h2
-rw-r--r--include/asm-generic/audit_change_attr.h3
-rw-r--r--include/asm-generic/audit_read.h6
-rw-r--r--include/asm-generic/rqspinlock.h4
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/asm-generic/topology.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/crypto/aes.h278
-rw-r--r--include/crypto/df_sp80090a.h2
-rw-r--r--include/crypto/gcm.h2
-rw-r--r--include/crypto/internal/acompress.h7
-rw-r--r--include/crypto/internal/engine.h2
-rw-r--r--include/crypto/internal/skcipher.h7
-rw-r--r--include/crypto/mldsa.h62
-rw-r--r--include/crypto/nh.h52
-rw-r--r--include/crypto/nhpoly1305.h74
-rw-r--r--include/crypto/public_key.h6
-rw-r--r--include/crypto/sha1.h10
-rw-r--r--include/cxl/event.h22
-rw-r--r--include/drm/bridge/inno_hdmi.h35
-rw-r--r--include/drm/bridge/samsung-dsim.h1
-rw-r--r--include/drm/display/drm_dp_helper.h3
-rw-r--r--include/drm/drm_atomic.h44
-rw-r--r--include/drm/drm_bridge.h156
-rw-r--r--include/drm/drm_connector.h105
-rw-r--r--include/drm/drm_device.h15
-rw-r--r--include/drm/drm_fb_helper.h21
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_gem.h45
-rw-r--r--include/drm/drm_gem_shmem_helper.h14
-rw-r--r--include/drm/drm_gpusvm.h29
-rw-r--r--include/drm/drm_gpuvm.h12
-rw-r--r--include/drm/drm_mode_object.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h23
-rw-r--r--include/drm/drm_of.h6
-rw-r--r--include/drm/drm_pagemap.h106
-rw-r--r--include/drm/drm_pagemap_util.h92
-rw-r--r--include/drm/drm_property.h1
-rw-r--r--include/drm/drm_vblank.h3
-rw-r--r--include/drm/gpu_scheduler.h52
-rw-r--r--include/drm/intel/display_parent_interface.h104
-rw-r--r--include/drm/intel/intel_lb_mei_interface.h3
-rw-r--r--include/dt-bindings/clock/google,gs101.h36
-rw-r--r--include/dt-bindings/clock/oxsemi,ox810se.h19
-rw-r--r--include/dt-bindings/clock/oxsemi,ox820.h29
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h1
-rw-r--r--include/dt-bindings/clock/qcom,mss-sc7180.h12
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h3
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h3
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h3
-rw-r--r--include/dt-bindings/clock/xlnx-versal-clk.h123
-rw-r--r--include/dt-bindings/clock/xlnx-zynqmp-clk.h133
-rw-r--r--include/dt-bindings/dma/jz4775-dma.h44
-rw-r--r--include/dt-bindings/dma/x2000-dma.h54
-rw-r--r--include/dt-bindings/gce/mt6779-gce.h222
-rw-r--r--include/dt-bindings/gpio/nvidia,tegra264-gpio.h61
-rw-r--r--include/dt-bindings/memory/mt6779-larb-port.h206
-rw-r--r--include/dt-bindings/mux/ti-serdes.h190
-rw-r--r--include/dt-bindings/phy/phy.h4
-rw-r--r--include/dt-bindings/pinctrl/mt6397-pinfunc.h257
-rw-r--r--include/dt-bindings/regulator/samsung,s2mpg10-regulator.h53
-rw-r--r--include/dt-bindings/reset/bcm6318-reset.h20
-rw-r--r--include/dt-bindings/reset/imx8ulp-pcc-reset.h59
-rw-r--r--include/dt-bindings/reset/oxsemi,ox810se.h42
-rw-r--r--include/dt-bindings/reset/oxsemi,ox820.h42
-rw-r--r--include/dt-bindings/reset/spacemit,k3-resets.h171
-rw-r--r--include/dt-bindings/sound/audio-jack-events.h10
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h29
-rw-r--r--include/keys/trusted-type.h7
-rw-r--r--include/keys/trusted_pkwm.h33
-rw-r--r--include/kunit/test.h3
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/acpi_iort.h11
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/array_size.h6
-rw-r--r--include/linux/ata.h3
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h18
-rw-r--r--include/linux/atomic/atomic-instrumented.h26
-rw-r--r--include/linux/atomic/atomic-long.h10
-rw-r--r--include/linux/attribute_container.h2
-rw-r--r--include/linux/audit.h26
-rw-r--r--include/linux/audit_arch.h7
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/balloon.h77
-rw-r--r--include/linux/balloon_compaction.h160
-rw-r--r--include/linux/bio.h33
-rw-r--r--include/linux/bit_spinlock.h24
-rw-r--r--include/linux/bitfield.h5
-rw-r--r--include/linux/blk-crypto.h32
-rw-r--r--include/linux/blk-integrity.h6
-rw-r--r--include/linux/blk-mq-dma.h2
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h7
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/bnge/hsi.h12609
-rw-r--r--include/linux/bnxt/hsi.h167
-rw-r--r--include/linux/bpf-cgroup.h4
-rw-r--r--include/linux/bpf.h178
-rw-r--r--include/linux/bpf_local_storage.h29
-rw-r--r--include/linux/bpf_mprog.h10
-rw-r--r--include/linux/bpf_verifier.h14
-rw-r--r--include/linux/btf.h9
-rw-r--r--include/linux/can/core.h1
-rw-r--r--include/linux/can/skb.h38
-rw-r--r--include/linux/capability.h6
-rw-r--r--include/linux/cgroup-defs.h8
-rw-r--r--include/linux/cleanup.h58
-rw-r--r--include/linux/clk.h23
-rw-r--r--include/linux/cma.h27
-rw-r--r--include/linux/compiler-clang.h2
-rw-r--r--include/linux/compiler-context-analysis.h436
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/compiler_types.h122
-rw-r--r--include/linux/console.h20
-rw-r--r--include/linux/cper.h3
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/cpuhplock.h1
-rw-r--r--include/linux/cpuset.h10
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/damon.h65
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/delayacct.h8
-rw-r--r--include/linux/device/bus.h4
-rw-r--r--include/linux/device/devres.h4
-rw-r--r--include/linux/device_cgroup.h2
-rw-r--r--include/linux/dma-buf-mapping.h2
-rw-r--r--include/linux/dma-buf.h22
-rw-r--r--include/linux/dma-fence.h35
-rw-r--r--include/linux/dma-heap.h2
-rw-r--r--include/linux/dma-map-ops.h4
-rw-r--r--include/linux/dpll.h64
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/entry-common.h167
-rw-r--r--include/linux/ethtool.h36
-rw-r--r--include/linux/exportfs.h33
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/filelock.h18
-rw-r--r--include/linux/filter.h27
-rw-r--r--include/linux/firewire.h36
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h6
-rw-r--r--include/linux/firmware/cirrus/wmfw.h7
-rw-r--r--include/linux/firmware/imx/sm.h2
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h30
-rw-r--r--include/linux/firmware/xlnx-zynqmp-crypto.h119
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h14
-rw-r--r--include/linux/fortify-string.h8
-rw-r--r--include/linux/framer/framer.h5
-rw-r--r--include/linux/fs.h82
-rw-r--r--include/linux/fs/super_types.h8
-rw-r--r--include/linux/fserror.h75
-rw-r--r--include/linux/fsnotify_backend.h5
-rw-r--r--include/linux/fsverity.h190
-rw-r--r--include/linux/ftrace.h37
-rw-r--r--include/linux/ftrace_regs.h25
-rw-r--r--include/linux/getcpu.h19
-rw-r--r--include/linux/gfp.h60
-rw-r--r--include/linux/gfp_types.h7
-rw-r--r--include/linux/gpio/consumer.h36
-rw-r--r--include/linux/highmem.h98
-rw-r--r--include/linux/hippidevice.h40
-rw-r--r--include/linux/hisi_acc_qm.h15
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/linux/hrtimer.h15
-rw-r--r--include/linux/hrtimer_defs.h20
-rw-r--r--include/linux/hugetlb.h15
-rw-r--r--include/linux/hw_random.h2
-rw-r--r--include/linux/i3c/device.h22
-rw-r--r--include/linux/i3c/master.h11
-rw-r--r--include/linux/ieee80211-eht.h13
-rw-r--r--include/linux/ieee80211-s1g.h2
-rw-r--r--include/linux/ieee80211-uhr.h220
-rw-r--r--include/linux/ieee80211.h40
-rw-r--r--include/linux/if_vlan.h51
-rw-r--r--include/linux/ima.h1
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_syscalls.h1
-rw-r--r--include/linux/initrd.h2
-rw-r--r--include/linux/instrumented.h17
-rw-r--r--include/linux/interrupt.h26
-rw-r--r--include/linux/io_uring.h14
-rw-r--r--include/linux/io_uring_types.h42
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/iommu-debug-pagealloc.h32
-rw-r--r--include/linux/iommu.h14
-rw-r--r--include/linux/ioport.h14
-rw-r--r--include/linux/ipv6.h35
-rw-r--r--include/linux/irq.h15
-rw-r--r--include/linux/irqchip/arm-gic-v5.h8
-rw-r--r--include/linux/irqchip/irq-renesas-rzt2h.h23
-rw-r--r--include/linux/irqdesc.h17
-rw-r--r--include/linux/irqdomain.h30
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/kernel.h211
-rw-r--r--include/linux/kexec_handover.h33
-rw-r--r--include/linux/kho/abi/kexec_handover.h163
-rw-r--r--include/linux/kho/abi/luo.h89
-rw-r--r--include/linux/kho/abi/memblock.h73
-rw-r--r--include/linux/kho/abi/memfd.h6
-rw-r--r--include/linux/khugepaged.h9
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/libata.h76
-rw-r--r--include/linux/list_bl.h2
-rw-r--r--include/linux/list_private.h256
-rw-r--r--include/linux/liveupdate.h147
-rw-r--r--include/linux/local_lock.h59
-rw-r--r--include/linux/local_lock_internal.h72
-rw-r--r--include/linux/lockdep.h12
-rw-r--r--include/linux/lockref.h4
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h19
-rw-r--r--include/linux/maple_tree.h9
-rw-r--r--include/linux/mdio.h14
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h73
-rw-r--r--include/linux/memory-failure.h13
-rw-r--r--include/linux/mfd/rohm-bd72720.h634
-rw-r--r--include/linux/mfd/rohm-generic.h1
-rw-r--r--include/linux/mfd/samsung/core.h2
-rw-r--r--include/linux/mfd/samsung/irq.h105
-rw-r--r--include/linux/mfd/samsung/s2mpg10.h44
-rw-r--r--include/linux/mfd/samsung/s2mpg11.h434
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h16
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/mlx5/vport.h6
-rw-r--r--include/linux/mm.h115
-rw-r--r--include/linux/mm_types.h57
-rw-r--r--include/linux/mm_types_task.h5
-rw-r--r--include/linux/mmap_lock.h279
-rw-r--r--include/linux/mmc/sdio_func.h1
-rw-r--r--include/linux/mmdebug.h10
-rw-r--r--include/linux/mmu_context.h2
-rw-r--r--include/linux/mmzone.h19
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/module.h27
-rw-r--r--include/linux/moduleparam.h15
-rw-r--r--include/linux/msi.h16
-rw-r--r--include/linux/mutex.h40
-rw-r--r--include/linux/mutex_types.h4
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h3
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_xdr.h9
-rw-r--r--include/linux/nodemask.h8
-rw-r--r--include/linux/ns/ns_common_types.h4
-rw-r--r--include/linux/nubus.h3
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_irq.h41
-rw-r--r--include/linux/oid_registry.h5
-rw-r--r--include/linux/overflow.h52
-rw-r--r--include/linux/page-isolation.h2
-rw-r--r--include/linux/page_ext.h6
-rw-r--r--include/linux/page_table_check.h69
-rw-r--r--include/linux/panic.h8
-rw-r--r--include/linux/pci-acpi.h3
-rw-r--r--include/linux/pci-epc.h9
-rw-r--r--include/linux/pci-epf.h23
-rw-r--r--include/linux/pci-p2pdma.h2
-rw-r--r--include/linux/pci-pwrctrl.h16
-rw-r--r--include/linux/pci.h31
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/pcs/pcs-mtk-lynxi.h5
-rw-r--r--include/linux/percpu-rwsem.h1
-rw-r--r--include/linux/perf_event.h35
-rw-r--r--include/linux/pgtable.h168
-rw-r--r--include/linux/phy.h72
-rw-r--r--include/linux/phy/phy-common-props.h32
-rw-r--r--include/linux/phy_port.h99
-rw-r--r--include/linux/phylink.h12
-rw-r--r--include/linux/platform_data/cros_ec_commands.h24
-rw-r--r--include/linux/platform_data/davinci_asp.h3
-rw-r--r--include/linux/platform_data/hwmon-s3c.h36
-rw-r--r--include/linux/platform_data/mipi-i3c-hci.h15
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h1
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/posix_acl_xattr.h5
-rw-r--r--include/linux/rcupdate.h121
-rw-r--r--include/linux/rcupdate_trace.h166
-rw-r--r--include/linux/refcount.h6
-rw-r--r--include/linux/regmap.h14
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/resctrl.h57
-rw-r--r--include/linux/resctrl_types.h11
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/rhashtable.h16
-rw-r--r--include/linux/rmap.h76
-rw-r--r--include/linux/rseq.h11
-rw-r--r--include/linux/rseq_entry.h192
-rw-r--r--include/linux/rseq_types.h32
-rw-r--r--include/linux/rv.h4
-rw-r--r--include/linux/rwlock.h19
-rw-r--r--include/linux/rwlock_api_smp.h43
-rw-r--r--include/linux/rwlock_rt.h43
-rw-r--r--include/linux/rwlock_types.h10
-rw-r--r--include/linux/rwsem.h78
-rw-r--r--include/linux/sched.h77
-rw-r--r--include/linux/sched/cputime.h18
-rw-r--r--include/linux/sched/isolation.h16
-rw-r--r--include/linux/sched/signal.h16
-rw-r--r--include/linux/sched/task.h6
-rw-r--r--include/linux/sched/wake_q.h3
-rw-r--r--include/linux/scmi_imx_protocol.h2
-rw-r--r--include/linux/screen_info.h2
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/seqlock.h57
-rw-r--r--include/linux/seqlock_types.h5
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/skbuff_ref.h10
-rw-r--r--include/linux/skmsg.h70
-rw-r--r--include/linux/slab.h146
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/soc/apple/rtkit.h7
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h93
-rw-r--r--include/linux/soc/qcom/apr.h6
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h4
-rw-r--r--include/linux/soc/qcom/mdt_loader.h22
-rw-r--r--include/linux/soc/qcom/ubwc.h1
-rw-r--r--include/linux/spi/spi-mem.h14
-rw-r--r--include/linux/spi/spi.h39
-rw-r--r--include/linux/spinlock.h119
-rw-r--r--include/linux/spinlock_api_smp.h34
-rw-r--r--include/linux/spinlock_api_up.h112
-rw-r--r--include/linux/spinlock_rt.h36
-rw-r--r--include/linux/spinlock_types.h10
-rw-r--r--include/linux/spinlock_types_raw.h5
-rw-r--r--include/linux/srcu.h73
-rw-r--r--include/linux/srcutiny.h6
-rw-r--r--include/linux/srcutree.h10
-rw-r--r--include/linux/stmmac.h20
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/sunrpc/debug.h2
-rw-r--r--include/linux/sunrpc/svc.h13
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h80
-rw-r--r--include/linux/sunrpc/xdrgen/nfs4_1.h112
-rw-r--r--include/linux/swap.h71
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysfb.h23
-rw-r--r--include/linux/sysfs.h13
-rw-r--r--include/linux/tcp.h9
-rw-r--r--include/linux/tee_core.h9
-rw-r--r--include/linux/tee_drv.h12
-rw-r--r--include/linux/thread_info.h16
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/timecounter.h31
-rw-r--r--include/linux/tnum.h5
-rw-r--r--include/linux/trace_printk.h204
-rw-r--r--include/linux/transport_class.h7
-rw-r--r--include/linux/types.h8
-rw-r--r--include/linux/u64_stats_sync.h25
-rw-r--r--include/linux/udp.h2
-rw-r--r--include/linux/uio.h3
-rw-r--r--include/linux/unwind_user.h18
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usb/uvc.h8
-rw-r--r--include/linux/util_macros.h2
-rw-r--r--include/linux/vfio_pci_core.h13
-rw-r--r--include/linux/virtio_vsock.h9
-rw-r--r--include/linux/vm_event_item.h8
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/linux/ww_mutex.h22
-rw-r--r--include/linux/wwan.h2
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/linux/zsmalloc.h8
-rw-r--r--include/media/dvb_vb2.h17
-rw-r--r--include/media/media-device.h9
-rw-r--r--include/media/media-devnode.h4
-rw-r--r--include/media/media-request.h40
-rw-r--r--include/media/v4l2-ctrls.h3
-rw-r--r--include/media/v4l2-fwnode.h8
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mem2mem.h21
-rw-r--r--include/media/videobuf2-core.h23
-rw-r--r--include/media/videobuf2-v4l2.h18
-rw-r--r--include/net/af_vsock.h61
-rw-r--r--include/net/ax25.h8
-rw-r--r--include/net/bluetooth/bluetooth.h47
-rw-r--r--include/net/bluetooth/hci.h336
-rw-r--r--include/net/bluetooth/hci_core.h9
-rw-r--r--include/net/bluetooth/hci_sync.h3
-rw-r--r--include/net/bluetooth/l2cap.h3
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/can.h28
-rw-r--r--include/net/cfg80211.h117
-rw-r--r--include/net/dsa.h12
-rw-r--r--include/net/dst.h6
-rw-r--r--include/net/flow_offload.h34
-rw-r--r--include/net/gro.h5
-rw-r--r--include/net/inet6_connection_sock.h4
-rw-r--r--include/net/inet_ecn.h20
-rw-r--r--include/net/inet_sock.h24
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ipv6.h145
-rw-r--r--include/net/iucv/iucv.h207
-rw-r--r--include/net/l3mdev.h7
-rw-r--r--include/net/mac80211.h140
-rw-r--r--include/net/mana/gdma.h54
-rw-r--r--include/net/mana/mana.h3
-rw-r--r--include/net/net_namespace.h5
-rw-r--r--include/net/netdev_queues.h73
-rw-r--r--include/net/netdev_rx_queue.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h1
-rw-r--r--include/net/netfilter/nf_conntrack_count.h1
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h2
-rw-r--r--include/net/netfilter/nf_queue.h4
-rw-r--r--include/net/netfilter/nf_tables.h7
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h4
-rw-r--r--include/net/netmem.h32
-rw-r--r--include/net/netns/ipv6.h10
-rw-r--r--include/net/netns/vsock.h21
-rw-r--r--include/net/page_pool/types.h1
-rw-r--r--include/net/phy/realtek_phy.h7
-rw-r--r--include/net/pkt_sched.h24
-rw-r--r--include/net/request_sock.h11
-rw-r--r--include/net/sch_priv.h27
-rw-r--r--include/net/sock.h11
-rw-r--r--include/net/tcp.h103
-rw-r--r--include/net/tcp_ecn.h103
-rw-r--r--include/net/udp.h8
-rw-r--r--include/net/udp_tunnel.h32
-rw-r--r--include/net/xfrm.h10
-rw-r--r--include/net/xsk_buff_pool.h5
-rw-r--r--include/ras/ras_event.h12
-rw-r--r--include/rdma/ib_verbs.h70
-rw-r--r--include/rdma/rdma_cm.h17
-rw-r--r--include/rdma/rw.h22
-rw-r--r--include/rdma/uverbs_types.h1
-rw-r--r--include/rv/automata.h132
-rw-r--r--include/rv/da_monitor.h887
-rw-r--r--include/scsi/libfc.h3
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libsas.h3
-rw-r--r--include/scsi/scsi.h13
-rw-r--r--include/scsi/scsi_driver.h7
-rw-r--r--include/scsi/scsi_host.h12
-rw-r--r--include/scsi/scsi_transport_fc.h12
-rw-r--r--include/soc/spacemit/ccu.h21
-rw-r--r--include/soc/spacemit/k1-syscon.h12
-rw-r--r--include/soc/spacemit/k3-syscon.h273
-rw-r--r--include/soc/tegra/pmc.h60
-rw-r--r--include/sound/ak4641.h23
-rw-r--r--include/sound/cs-amp-lib.h3
-rw-r--r--include/sound/cs35l56.h37
-rw-r--r--include/sound/sdca_function.h4
-rw-r--r--include/sound/sdca_interrupts.h7
-rw-r--r--include/sound/sdca_jack.h32
-rw-r--r--include/sound/seq_device.h2
-rw-r--r--include/sound/soc-acpi-intel-ssp-common.h4
-rw-r--r--include/sound/soc-component.h29
-rw-r--r--include/sound/soc-dapm.h44
-rw-r--r--include/sound/soc.h4
-rw-r--r--include/sound/sof.h3
-rw-r--r--include/sound/sof/ipc4/header.h75
-rw-r--r--include/sound/tas2781.h3
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/trace/events/cgroup.h2
-rw-r--r--include/trace/events/damon.h41
-rw-r--r--include/trace/events/dma_buf.h159
-rw-r--r--include/trace/events/erofs.h10
-rw-r--r--include/trace/events/ext4.h8
-rw-r--r--include/trace/events/huge_memory.h3
-rw-r--r--include/trace/events/mptcp.h80
-rw-r--r--include/trace/events/pci.h129
-rw-r--r--include/trace/events/tcp.h2
-rw-r--r--include/trace/events/vmscan.h51
-rw-r--r--include/trace/events/writeback.h7
-rw-r--r--include/uapi/asm-generic/errno.h2
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/drm/amdgpu_drm.h25
-rw-r--r--include/uapi/drm/amdxdna_accel.h8
-rw-r--r--include/uapi/drm/panfrost_drm.h76
-rw-r--r--include/uapi/drm/panthor_drm.h157
-rw-r--r--include/uapi/drm/rocket_accel.h98
-rw-r--r--include/uapi/drm/xe_drm.h95
-rw-r--r--include/uapi/linux/bpf.h28
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/btrfs_tree.h34
-rw-r--r--include/uapi/linux/dpll.h1
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h28
-rw-r--r--include/uapi/linux/hyperv.h2
-rw-r--r--include/uapi/linux/if_alg.h2
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/io_uring.h24
-rw-r--r--include/uapi/linux/io_uring/bpf_filter.h62
-rw-r--r--include/uapi/linux/iommufd.h39
-rw-r--r--include/uapi/linux/kfd_ioctl.h16
-rw-r--r--include/uapi/linux/kfd_sysfs.h3
-rw-r--r--include/uapi/linux/kvm.h3
-rw-r--r--include/uapi/linux/landlock.h30
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/mempolicy.h3
-rw-r--r--include/uapi/linux/mount.h13
-rw-r--r--include/uapi/linux/mptcp_pm.h2
-rw-r--r--include/uapi/linux/netfilter_bridge.h9
-rw-r--r--include/uapi/linux/netfilter_ipv4.h9
-rw-r--r--include/uapi/linux/netfilter_ipv6.h7
-rw-r--r--include/uapi/linux/nfs.h2
-rw-r--r--include/uapi/linux/nfsd_netlink.h1
-rw-r--r--include/uapi/linux/nilfs2_api.h4
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h163
-rw-r--r--include/uapi/linux/nl80211.h107
-rw-r--r--include/uapi/linux/pci.h7
-rw-r--r--include/uapi/linux/pci_regs.h69
-rw-r--r--include/uapi/linux/pcitest.h1
-rw-r--r--include/uapi/linux/perf_event.h27
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/prctl.h37
-rw-r--r--include/uapi/linux/rseq.h41
-rw-r--r--include/uapi/linux/shm.h3
-rw-r--r--include/uapi/linux/stddef.h4
-rw-r--r--include/uapi/linux/sysctl.h3
-rw-r--r--include/uapi/linux/taskstats.h13
-rw-r--r--include/uapi/linux/tcp.h26
-rw-r--r--include/uapi/linux/typelimits.h8
-rw-r--r--include/uapi/linux/ublk_cmd.h121
-rw-r--r--include/uapi/linux/v4l2-controls.h63
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h4
-rw-r--r--include/uapi/linux/vfio.h4
-rw-r--r--include/uapi/linux/videodev2.h3
-rw-r--r--include/uapi/linux/vmclock-abi.h20
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h16
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h16
-rw-r--r--include/uapi/rdma/mana-abi.h3
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h17
-rw-r--r--include/uapi/sound/sof/tokens.h6
-rw-r--r--include/ufs/ufs.h5
-rw-r--r--include/ufs/ufshcd.h6
-rw-r--r--include/ufs/ufshci.h1
-rw-r--r--include/vdso/gettime.h1
-rw-r--r--include/vdso/unaligned.h41
-rw-r--r--include/video/edid.h4
-rw-r--r--include/xen/xen.h2
551 files changed, 27474 insertions, 5457 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e65a2afe9250..49d1749f30bb 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20250807
+#define ACPI_CA_VERSION 0x20251212
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7f35eb0e8458..4e15583e0d25 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -37,6 +37,7 @@
#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */
+#define ACPI_SIG_DTPR "DTPR" /* DMA TXT Protection Ranges table */
#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
#define ACPI_SIG_EINJ "EINJ" /* Error Injection table */
#define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */
@@ -1001,6 +1002,262 @@ struct acpi_drtm_dps_id {
/*******************************************************************************
*
+ * DTPR - DMA TXT Protection Ranges Table
+ * Version 1
+ *
+ * Conforms to "Intel® Trusted Execution Technology (Intel® TXT) DMA Protection
+ * Ranges",
+ * Revision 0.73, August 2021
+ *
+ ******************************************************************************/
+
+struct acpi_table_dtpr {
+ struct acpi_table_header header;
+ u32 flags; /* 36 */
+ u32 ins_cnt;
+};
+
+struct acpi_tpr_array {
+ u64 base;
+};
+
+struct acpi_tpr_instance {
+ u32 flags;
+ u32 tpr_cnt;
+};
+
+struct acpi_tpr_aux_sr {
+ u32 srl_cnt;
+};
+
+/*
+ * TPRn_BASE (ACPI_TPRN_BASE_REG)
+ *
+ * Specifies the start address of TPRn region. TPR region address and size must
+ * be with 1MB resolution. These bits are compared with the result of the
+ * TPRn_LIMIT[63:20], which is applied to the incoming address, to
+ * determine if an access fall within the TPRn defined region.
+ *
+ * Minimal TPRn_Base resolution is 1MB. Applied to the incoming address, to
+ * determine if an access fall within the TPRn defined region. Width is
+ * determined by a bus width which can be obtained via CPUID
+ * function 0x80000008.
+ */
+
+typedef u64 ACPI_TPRN_BASE_REG;
+
+/* TPRn_BASE Register Bit Masks */
+
+/* Bit 3 - RW: access: 1 == RO, 0 == RW register (for TPR must be RW) */
+#define ACPI_TPRN_BASE_RW_SHIFT 3
+
+#define ACPI_TPRN_BASE_RW_MASK ((u64) 1 << ACPI_TPRN_BASE_RW_SHIFT)
+
+/*
+ * Bit 4 - Enable: 0 – TPRn address range enabled;
+ * 1 – TPRn address range disabled.
+ */
+#define ACPI_TPRN_BASE_ENABLE_SHIFT 4
+
+#define ACPI_TPRN_BASE_ENABLE_MASK ((u64) 1 << ACPI_TPRN_BASE_ENABLE_SHIFT)
+
+/* Bits 63:20 - tpr_base_rw */
+#define ACPI_TPRN_BASE_ADDR_SHIFT 20
+
+#define ACPI_TPRN_BASE_ADDR_MASK ((u64) 0xFFFFFFFFFFF << \
+ ACPI_TPRN_BASE_ADDR_SHIFT)
+
+/* TPRn_BASE Register Bit Handlers*/
+
+/*
+ * GET_TPRN_BASE_RW:
+ *
+ * Read RW bit from TPRn Base register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ *
+ * Output:
+ *
+ * Returns RW bit value (u64).
+ */
+#define GET_TPRN_BASE_RW(reg) (((u64) reg & ACPI_TPRN_BASE_RW_MASK) >> \
+ ACPI_TPRN_BASE_RW_SHIFT)
+
+/*
+ * GET_TPRN_BASE_ENABLE:
+ *
+ * Read Enable bit from TPRn Base register - bit 4.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ *
+ * Output:
+ *
+ * Returns Enable bit value (u64).
+ */
+#define GET_TPRN_BASE_ENABLE(reg) (((u64) reg & ACPI_TPRN_BASE_ENABLE_MASK) \
+ >> ACPI_TPRN_BASE_ENABLE_SHIFT)
+
+/*
+ * GET_TPRN_BASE_ADDR:
+ *
+ * Read TPRn Base Register address from bits 63:20.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ *
+ * Output:
+ *
+ * Returns TPRn Base Register address (u64).
+ */
+#define GET_TPRN_BASE_ADDR(reg) (((u64) reg & ACPI_TPRN_BASE_ADDR_MASK) \
+ >> ACPI_TPRN_BASE_ADDR_SHIFT)
+
+/*
+ * SET_TPRN_BASE_RW:
+ *
+ * Set RW bit in TPRn Base register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ * - val (represents RW value to be set (u64))
+ */
+#define SET_TPRN_BASE_RW(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_BASE_RW_SHIFT, \
+ ACPI_TPRN_BASE_RW_MASK, val);
+
+/*
+ * SET_TPRN_BASE_ENABLE:
+ *
+ * Set Enable bit in TPRn Base register - bit 4.
+ *
+ * Input:
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ * - val (represents Enable value to be set (u64))
+ */
+#define SET_TPRN_BASE_ENABLE(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_BASE_ENABLE_SHIFT, \
+ ACPI_TPRN_BASE_ENABLE_MASK, val);
+
+/*
+ * SET_TPRN_BASE_ADDR:
+ *
+ * Set TPRn Base Register address - bits 63:20
+ *
+ * Input
+ * - reg (represents TPRn Base Register (ACPI_TPRN_BASE_REG))
+ * - val (represents address value to be set (u64))
+ */
+#define SET_TPRN_BASE_ADDR(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_BASE_ADDR_SHIFT, \
+ ACPI_TPRN_BASE_ADDR_MASK, val);
+
+/*
+ * TPRn_LIMIT
+ *
+ * This register defines an isolated region of memory that can be enabled
+ * to prohibit certain system agents from accessing memory. When an agent
+ * sends a request upstream, whether snooped or not, a TPR prevents that
+ * transaction from changing the state of memory.
+ *
+ * Minimal TPRn_Limit resolution is 1MB. Width is determined by a bus width.
+ */
+
+typedef u64 ACPI_TPRN_LIMIT_REG;
+
+/* TPRn_LIMIT Register Bit Masks */
+
+/* Bit 3 - RW: access: 1 == RO, 0 == RW register (for TPR must be RW) */
+#define ACPI_TPRN_LIMIT_RW_SHIFT 3
+
+#define ACPI_TPRN_LIMIT_RW_MASK ((u64) 1 << ACPI_TPRN_LIMIT_RW_SHIFT)
+
+/* Bits 63:20 - tpr_limit_rw */
+#define ACPI_TPRN_LIMIT_ADDR_SHIFT 20
+
+#define ACPI_TPRN_LIMIT_ADDR_MASK ((u64) 0xFFFFFFFFFFF << \
+ ACPI_TPRN_LIMIT_ADDR_SHIFT)
+
+/* TPRn_LIMIT Register Bit Handlers*/
+
+/*
+ * GET_TPRN_LIMIT_RW:
+ *
+ * Read RW bit from TPRn Limit register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ *
+ * Output:
+ *
+ * Returns RW bit value (u64).
+ */
+#define GET_TPRN_LIMIT_RW(reg) (((u64) reg & ACPI_TPRN_LIMIT_RW_MASK) \
+ >> ACPI_TPRN_LIMIT_RW_SHIFT)
+
+/*
+ * GET_TPRN_LIMIT_ADDR:
+ *
+ * Read TPRn Limit Register address from bits 63:20.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ *
+ * Output:
+ *
+ * Returns TPRn Limit Register address (u64).
+ */
+#define GET_TPRN_LIMIT_ADDR(reg) (((u64) reg & ACPI_TPRN_LIMIT_ADDR_MASK) \
+ >> ACPI_TPRN_LIMIT_ADDR_SHIFT)
+
+/*
+ * SET_TPRN_LIMIT_RW:
+ *
+ * Set RW bit in TPRn Limit register - bit 3.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ * - val (represents RW value to be set (u64))
+ */
+#define SET_TPRN_LIMIT_RW(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_LIMIT_RW_SHIFT, \
+ ACPI_TPRN_LIMIT_RW_MASK, val);
+
+/*
+ * SET_TPRN_LIMIT_ADDR:
+ *
+ * Set TPRn Limit Register address - bits 63:20.
+ *
+ * Input:
+ * - reg (represents TPRn Limit Register (ACPI_TPRN_LIMIT_REG))
+ * - val (represents address value to be set (u64))
+ */
+#define SET_TPRN_LIMIT_ADDR(reg, val) ACPI_REGISTER_INSERT_VALUE(reg, \
+ ACPI_TPRN_LIMIT_ADDR_SHIFT, \
+ ACPI_TPRN_LIMIT_ADDR_MASK, val);
+
+/*
+ * SERIALIZE_REQUEST
+ *
+ * This register is used to request serialization of non-coherent DMA
+ * transactions. OS shall issue it before changing of TPR settings
+ * (base / size).
+ */
+
+struct acpi_tpr_serialize_request {
+ u64 sr_register;
+ /*
+ * BIT 1 - Status of serialization request (RO)
+ * 0 == register idle, 1 == serialization in progress
+ * BIT 2 - Control field to initiate serialization (RW)
+ * 0 == normal, 1 == initialize serialization
+ * (self-clear to allow multiple serialization requests)
+ */
+};
+
+/*******************************************************************************
+ *
* ECDT - Embedded Controller Boot Resources Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index f726bce3eb84..5c0b55e7b3e4 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -31,7 +31,9 @@
#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
#define ACPI_SIG_ERDT "ERDT" /* Enhanced Resource Director Technology */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
+#define ACPI_SIG_IOVT "IOVT" /* I/O Virtualization Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
+#define ACPI_SIG_KEYP "KEYP" /* Key Programming Interface for IDE */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
@@ -680,6 +682,7 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_SMMU_V3 = 0x04,
ACPI_IORT_NODE_PMCG = 0x05,
ACPI_IORT_NODE_RMR = 0x06,
+ ACPI_IORT_NODE_IWB = 0x07,
};
struct acpi_iort_id_mapping {
@@ -858,6 +861,79 @@ struct acpi_iort_rmr_desc {
u32 reserved;
};
+struct acpi_iort_iwb {
+ u64 base_address;
+ u16 iwb_index; /* Unique IWB identifier matching with the IWB GSI namespace. */
+ char device_name[]; /* Path of the IWB namespace object */
+};
+
+/*******************************************************************************
+ *
+ * IOVT - I/O Virtualization Table
+ *
+ * Conforms to "LoongArch I/O Virtualization Table",
+ * Version 0.1, October 2024
+ *
+ ******************************************************************************/
+
+struct acpi_table_iovt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 iommu_count;
+ u16 iommu_offset;
+ u8 reserved[8];
+};
+
+/* IOVT subtable header */
+
+struct acpi_iovt_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_iovt_iommu_type {
+ ACPI_IOVT_IOMMU_V1 = 0x00,
+ ACPI_IOVT_IOMMU_RESERVED = 0x01 /* 1 and greater are reserved */
+};
+
+/* IOVT subtables */
+
+struct acpi_iovt_iommu {
+ struct acpi_iovt_header header;
+ u32 flags;
+ u16 segment;
+ u16 phy_width; /* Physical Address Width */
+ u16 virt_width; /* Virtual Address Width */
+ u16 max_page_level;
+ u64 page_size;
+ u32 device_id;
+ u64 base_address;
+ u32 address_space_size;
+ u8 interrupt_type;
+ u8 reserved[3];
+ u32 gsi_number;
+ u32 proximity_domain;
+ u32 max_device_num;
+ u32 device_entry_num;
+ u32 device_entry_offset;
+};
+
+struct acpi_iovt_device_entry {
+ u8 type;
+ u8 length;
+ u8 flags;
+ u8 reserved[3];
+ u16 device_id;
+};
+
+enum acpi_iovt_device_entry_type {
+ ACPI_IOVT_DEVICE_ENTRY_SINGLE = 0x00,
+ ACPI_IOVT_DEVICE_ENTRY_START = 0x01,
+ ACPI_IOVT_DEVICE_ENTRY_END = 0x02,
+ ACPI_IOVT_DEVICE_ENTRY_RESERVED = 0x03 /* 3 and greater are reserved */
+};
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -1067,6 +1143,64 @@ struct acpi_ivrs_memory {
/*******************************************************************************
*
+ * KEYP - Key Programming Interface for Root Complex Integrity and Data
+ * Encryption (IDE)
+ * Version 1
+ *
+ * Conforms to "Key Programming Interface for Root Complex Integrity and Data
+ * Encryption (IDE)" document. See under ACPI-Related Documents.
+ *
+ ******************************************************************************/
+struct acpi_table_keyp {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+};
+
+/* KEYP common subtable header */
+
+struct acpi_keyp_common_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_keyp_type {
+ ACPI_KEYP_TYPE_CONFIG_UNIT = 0,
+};
+
+/* Root Port Information Structure */
+
+struct acpi_keyp_rp_info {
+ u16 segment;
+ u8 bus;
+ u8 devfn;
+};
+
+/* Key Configuration Unit Structure */
+
+struct acpi_keyp_config_unit {
+ struct acpi_keyp_common_header header;
+ u8 protocol_type;
+ u8 version;
+ u8 root_port_count;
+ u8 flags;
+ u64 register_base_address;
+ struct acpi_keyp_rp_info rp_info[];
+};
+
+enum acpi_keyp_protocol_type {
+ ACPI_KEYP_PROTO_TYPE_INVALID = 0,
+ ACPI_KEYP_PROTO_TYPE_PCIE,
+ ACPI_KEYP_PROTO_TYPE_CXL,
+ ACPI_KEYP_PROTO_TYPE_RESERVED
+};
+
+#define ACPI_KEYP_F_TVM_USABLE (1)
+
+/*******************************************************************************
+ *
* LPIT - Low Power Idle Table
*
* Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014.
@@ -1167,7 +1301,10 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_IMSIC = 25,
ACPI_MADT_TYPE_APLIC = 26,
ACPI_MADT_TYPE_PLIC = 27,
- ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */
+ ACPI_MADT_TYPE_GICV5_IRS = 28,
+ ACPI_MADT_TYPE_GICV5_ITS = 29,
+ ACPI_MADT_TYPE_GICV5_ITS_TRANSLATE = 30,
+ ACPI_MADT_TYPE_RESERVED = 31, /* 31 to 0x7F are reserved */
ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
@@ -1289,7 +1426,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 + ACPI 6.5 changes) */
+/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 + ACPI 6.5 + ACPI 6.7 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -1310,6 +1447,8 @@ struct acpi_madt_generic_interrupt {
u8 reserved2[1];
u16 spe_interrupt; /* ACPI 6.3 */
u16 trbe_interrupt; /* ACPI 6.5 */
+ u16 iaffid; /* ACPI 6.7 */
+ u32 irs_id;
};
/* Masks for Flags field above */
@@ -1332,7 +1471,7 @@ struct acpi_madt_generic_distributor {
u8 reserved2[3]; /* reserved - must be zero */
};
-/* Values for Version field above */
+/* Values for Version field above and Version field in acpi_madt_gicv5_irs */
enum acpi_madt_gic_version {
ACPI_MADT_GIC_VERSION_NONE = 0,
@@ -1340,7 +1479,8 @@ enum acpi_madt_gic_version {
ACPI_MADT_GIC_VERSION_V2 = 2,
ACPI_MADT_GIC_VERSION_V3 = 3,
ACPI_MADT_GIC_VERSION_V4 = 4,
- ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_MADT_GIC_VERSION_V5 = 5,
+ ACPI_MADT_GIC_VERSION_RESERVED = 6 /* 6 and greater are reserved */
};
/* 13: Generic MSI Frame (ACPI 5.1) */
@@ -1611,6 +1751,41 @@ struct acpi_madt_plic {
u32 gsi_base;
};
+/* 28: Arm GICv5 IRS (ACPI 6.7) */
+struct acpi_madt_gicv5_irs {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 irs_id;
+ u32 flags;
+ u32 reserved2;
+ u64 config_base_address;
+ u64 setlpi_base_address;
+};
+
+#define ACPI_MADT_IRS_NON_COHERENT (1)
+
+/* 29: Arm GICv5 ITS Config Frame (ACPI 6.7) */
+struct acpi_madt_gicv5_translator {
+ struct acpi_subtable_header header;
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
+ u32 translator_id;
+ u64 base_address;
+};
+
+#define ACPI_MADT_GICV5_ITS_NON_COHERENT (1)
+
+/* 30: Arm GICv5 ITS Translate Frame (ACPI 6.7) */
+struct acpi_madt_gicv5_translate_frame {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 linked_translator_id;
+ u32 translate_frame_id;
+ u32 reserved2;
+ u64 base_address;
+};
+
/* 80: OEM data */
struct acpi_madt_oem_data {
@@ -2826,6 +3001,15 @@ struct acpi_pptt_cache {
/* 1: Cache Type Structure for PPTT version 3 */
struct acpi_pptt_cache_v1 {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 flags;
+ u32 next_level_of_cache;
+ u32 size;
+ u32 number_of_sets;
+ u8 associativity;
+ u8 attributes;
+ u16 line_size;
u32 cache_id;
};
@@ -3065,6 +3249,8 @@ struct acpi_ras2_patrol_scrub_param {
u32 flags;
u32 scrub_params_out;
u32 scrub_params_in;
+ u32 ext_scrub_params;
+ u8 scrub_rate_desc[256];
};
/* Masks for Flags field above */
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 79d3aa5a4bad..7ca456e88377 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -238,6 +238,7 @@ struct acpi_srat_mem_affinity {
#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */
#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
+#define ACPI_SRAT_MEM_SPEC_PURPOSE (1<<3) /* 03: Memory is intended for specific-purpose usage */
/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 25dd3e998727..b2e29da6ba0a 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -37,6 +37,11 @@
#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d"
#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653"
+/* TPM */
+#define UUID_HARDWARE_INFORMATION "cf8e16a5-c1e8-4e25-b712-4f54a96702c8"
+#define UUID_START_METHOD "6bbf6cab-5463-4714-b7cd-f0203c0368d4"
+#define UUID_MEMORY_CLEAR "376054ed-cc13-4675-901c-4756d7f2d45d"
+
/* NVDIMM - NFIT table */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
@@ -71,4 +76,5 @@
#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
#define UUID_1ST_FUNCTION_ID "893f00a6-660c-494e-bcfd-3043f4fb67c0"
#define UUID_2ND_FUNCTION_ID "107ededd-d381-4fd7-8da9-08e9a6c79644"
+#define UUID_FAN_TRIP_POINTS "a7611840-99fe-41ae-a488-35c75926c8eb"
#endif /* __ACUUID_H__ */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 13fa81504844..4d644f03098e 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -39,7 +39,8 @@
/* CPPC_AUTO_ACT_WINDOW_MAX_SIG is 127, so 128 and 129 will decay to 127 when writing */
#define CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH 129
-#define CPPC_ENERGY_PERF_MAX (0xFF)
+#define CPPC_EPP_PERFORMANCE_PREF 0x00
+#define CPPC_EPP_ENERGY_EFFICIENCY_PREF 0xFF
/* Each register has the folowing format. */
struct cpc_reg {
@@ -119,8 +120,6 @@ struct cppc_perf_caps {
u32 lowest_nonlinear_perf;
u32 lowest_freq;
u32 nominal_freq;
- u32 energy_perf;
- bool auto_sel;
};
struct cppc_perf_ctrls {
@@ -128,6 +127,7 @@ struct cppc_perf_ctrls {
u32 min_perf;
u32 desired_perf;
u32 energy_perf;
+ bool auto_sel;
};
struct cppc_perf_fb_ctrs {
@@ -154,6 +154,7 @@ extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
+extern bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu);
extern bool cppc_perf_ctrs_in_pcc(void);
extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
@@ -204,6 +205,10 @@ static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
return -EOPNOTSUPP;
}
+static inline bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
+{
+ return false;
+}
static inline bool cppc_perf_ctrs_in_pcc(void)
{
return false;
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index ebd21b05fe6e..7bea522c0657 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -21,6 +21,7 @@ struct ghes {
struct acpi_hest_generic_v2 *generic_v2;
};
struct acpi_hest_generic_status *estatus;
+ unsigned int estatus_length;
unsigned long flags;
union {
struct list_head list;
@@ -29,6 +30,7 @@ struct ghes {
};
struct device *dev;
struct list_head elist;
+ void __iomem *error_status_vaddr;
};
struct ghes_estatus_node {
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index d0eccbd920e5..7146a8e9e9c2 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -417,32 +417,15 @@ static inline void acpi_processor_throttling_init(void) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
/* in processor_idle.c */
-extern struct cpuidle_driver acpi_idle_driver;
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-int acpi_processor_power_init(struct acpi_processor *pr);
-int acpi_processor_power_exit(struct acpi_processor *pr);
+void acpi_processor_power_init(struct acpi_processor *pr);
+void acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
-#else
-static inline int acpi_processor_power_init(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_power_exit(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_hotplug(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
+void acpi_processor_register_idle_driver(void);
+void acpi_processor_unregister_idle_driver(void);
+int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
@@ -465,11 +448,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
}
#endif /* CONFIG_CPU_FREQ */
-#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
-extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
-#endif
-
void acpi_processor_init_invariance_cppc(void);
#endif
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 295c94a3ccc1..9aff61e7b8f2 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -32,6 +32,7 @@ mandatory-y += irq_work.h
mandatory-y += kdebug.h
mandatory-y += kmap_size.h
mandatory-y += kprobes.h
+mandatory-y += kvm_types.h
mandatory-y += linkage.h
mandatory-y += local.h
mandatory-y += local64.h
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 100d24b02e52..f22ccfc0df98 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -10,7 +10,7 @@
#include <linux/types.h>
typedef struct {
- s64 counter;
+ s64 __aligned(sizeof(s64)) counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index cc840537885f..ddd90bbe40df 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -26,6 +26,9 @@ __NR_fremovexattr,
__NR_fchownat,
__NR_fchmodat,
#endif
+#ifdef __NR_fchmodat2
+__NR_fchmodat2,
+#endif
#ifdef __NR_chown32
__NR_chown32,
__NR_fchown32,
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
index 7bb7b5a83ae2..fb9991f53fb6 100644
--- a/include/asm-generic/audit_read.h
+++ b/include/asm-generic/audit_read.h
@@ -4,9 +4,15 @@ __NR_readlink,
#endif
__NR_quotactl,
__NR_listxattr,
+#ifdef __NR_listxattrat
+__NR_listxattrat,
+#endif
__NR_llistxattr,
__NR_flistxattr,
__NR_getxattr,
+#ifdef __NR_getxattrat
+__NR_getxattrat,
+#endif
__NR_lgetxattr,
__NR_fgetxattr,
#ifdef __NR_readlinkat
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index 0f2dcbbfee2f..151d267a496b 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -28,7 +28,7 @@ struct rqspinlock {
*/
struct bpf_res_spin_lock {
u32 val;
-};
+} __aligned(__alignof__(struct rqspinlock));
struct qspinlock;
#ifdef CONFIG_QUEUED_SPINLOCKS
@@ -191,7 +191,7 @@ static __always_inline int res_spin_lock(rqspinlock_t *lock)
#else
-#define res_spin_lock(lock) resilient_tas_spin_lock(lock)
+#define res_spin_lock(lock) ({ grab_held_lock_entry(lock); resilient_tas_spin_lock(lock); })
#endif /* CONFIG_QUEUED_SPINLOCKS */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 4d679d2a206b..4aeac0c3d3f0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -213,7 +213,7 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE
+#ifndef CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE
static inline void __tlb_remove_table(void *table)
{
struct ptdesc *ptdesc = (struct ptdesc *)table;
@@ -287,8 +287,7 @@ struct mmu_gather_batch {
*/
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
- bool delay_rmap, int page_size);
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size);
bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
unsigned int nr_pages, bool delay_rmap);
@@ -510,7 +509,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
- if (__tlb_remove_page_size(tlb, page, false, page_size))
+ if (__tlb_remove_page_size(tlb, page, page_size))
tlb_flush_mmu(tlb);
}
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 4dbe715be65b..9865ba48c5b1 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,11 +45,7 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NUMA
- #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
- #else
- #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
- #endif
+#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
@@ -61,7 +57,7 @@
cpumask_of_node(pcibus_to_node(bus)))
#endif
-#endif /* CONFIG_NUMA */
+#endif /* !CONFIG_NUMA */
#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8ca130af301f..eeb070f330bd 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -972,7 +972,8 @@
#define RUNTIME_CONST_VARIABLES \
RUNTIME_CONST(shift, d_hash_shift) \
RUNTIME_CONST(ptr, dentry_hashtable) \
- RUNTIME_CONST(ptr, __dentry_cache)
+ RUNTIME_CONST(ptr, __dentry_cache) \
+ RUNTIME_CONST(ptr, __names_cache)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 9339da7c20a8..cbf1cc96db52 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -19,6 +19,103 @@
#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
/*
+ * The POWER8 VSX optimized AES assembly code is borrowed from OpenSSL and
+ * inherits OpenSSL's AES_KEY format, which stores the number of rounds after
+ * the round keys. That assembly code is difficult to change. So for
+ * compatibility purposes we reserve space for the extra nrounds field on PPC64.
+ *
+ * Note: when prepared for decryption, the round keys are just the reversed
+ * standard round keys, not the round keys for the Equivalent Inverse Cipher.
+ */
+struct p8_aes_key {
+ u32 rndkeys[AES_MAX_KEYLENGTH_U32];
+ int nrounds;
+};
+
+union aes_enckey_arch {
+ u32 rndkeys[AES_MAX_KEYLENGTH_U32];
+#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
+#if defined(CONFIG_PPC) && defined(CONFIG_SPE)
+ /* Used unconditionally (when SPE AES code is enabled in kconfig) */
+ u32 spe_enc_key[AES_MAX_KEYLENGTH_U32] __aligned(8);
+#elif defined(CONFIG_PPC)
+ /*
+ * Kernels that include the POWER8 VSX optimized AES code use this field
+ * when that code is usable at key preparation time. Otherwise they
+ * fall back to rndkeys. In the latter case, p8.nrounds (which doesn't
+ * overlap rndkeys) is set to 0 to differentiate the two formats.
+ */
+ struct p8_aes_key p8;
+#elif defined(CONFIG_S390)
+ /* Used when the CPU supports CPACF AES for this key's length */
+ u8 raw_key[AES_MAX_KEY_SIZE];
+#elif defined(CONFIG_SPARC64)
+ /* Used when the CPU supports the SPARC64 AES opcodes */
+ u64 sparc_rndkeys[AES_MAX_KEYLENGTH / sizeof(u64)];
+#endif
+#endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
+};
+
+union aes_invkey_arch {
+ u32 inv_rndkeys[AES_MAX_KEYLENGTH_U32];
+#ifdef CONFIG_CRYPTO_LIB_AES_ARCH
+#if defined(CONFIG_PPC) && defined(CONFIG_SPE)
+ /* Used unconditionally (when SPE AES code is enabled in kconfig) */
+ u32 spe_dec_key[AES_MAX_KEYLENGTH_U32] __aligned(8);
+#elif defined(CONFIG_PPC)
+ /* Used conditionally, analogous to aes_enckey_arch::p8 */
+ struct p8_aes_key p8;
+#endif
+#endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
+};
+
+/**
+ * struct aes_enckey - An AES key prepared for encryption
+ * @len: Key length in bytes: 16 for AES-128, 24 for AES-192, 32 for AES-256.
+ * @nrounds: Number of rounds: 10 for AES-128, 12 for AES-192, 14 for AES-256.
+ * This is '6 + @len / 4' and is cached so that AES implementations
+ * that need it don't have to recompute it for each en/decryption.
+ * @padding: Padding to make offsetof(@k) be a multiple of 16, so that aligning
+ * this struct to a 16-byte boundary results in @k also being 16-byte
+ * aligned. Users aren't required to align this struct to 16 bytes,
+ * but it may slightly improve performance.
+ * @k: This typically contains the AES round keys as an array of '@nrounds + 1'
+ * groups of four u32 words. However, architecture-specific implementations
+ * of AES may store something else here, e.g. just the raw key if it's all
+ * they need.
+ *
+ * Note that this struct is about half the size of struct aes_key. This is
+ * separate from struct aes_key so that modes that need only AES encryption
+ * (e.g. AES-GCM, AES-CTR, AES-CMAC, tweak key in AES-XTS) don't incur the time
+ * and space overhead of computing and caching the decryption round keys.
+ *
+ * Note that there's no decryption-only equivalent (i.e. "struct aes_deckey"),
+ * since (a) it's rare that modes need decryption-only, and (b) some AES
+ * implementations use the same @k for both encryption and decryption, either
+ * always or conditionally; in the latter case both @k and @inv_k are needed.
+ */
+struct aes_enckey {
+ u32 len;
+ u32 nrounds;
+ u32 padding[2];
+ union aes_enckey_arch k;
+};
+
+/**
+ * struct aes_key - An AES key prepared for encryption and decryption
+ * @aes_enckey: Common fields and the key prepared for encryption
+ * @inv_k: This generally contains the round keys for the AES Equivalent
+ * Inverse Cipher, as an array of '@nrounds + 1' groups of four u32
+ * words. However, architecture-specific implementations of AES may
+ * store something else here. For example, they may leave this field
+ * uninitialized if they use @k for both encryption and decryption.
+ */
+struct aes_key {
+ struct aes_enckey; /* Include all fields of aes_enckey. */
+ union aes_invkey_arch inv_k;
+};
+
+/*
* Please ensure that the first two fields are 16-byte aligned
* relative to the start of the structure, i.e., don't move them!
*/
@@ -28,13 +125,10 @@ struct crypto_aes_ctx {
u32 key_length;
};
-extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
-extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
-
/*
* validate key length for AES algorithms
*/
-static inline int aes_check_keylen(unsigned int keylen)
+static inline int aes_check_keylen(size_t keylen)
{
switch (keylen) {
case AES_KEYSIZE_128:
@@ -48,9 +142,6 @@ static inline int aes_check_keylen(unsigned int keylen)
return 0;
}
-int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len);
-
/**
* aes_expandkey - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
@@ -68,28 +159,177 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
+/*
+ * The following functions are temporarily exported for use by the AES mode
+ * implementations in arch/$(SRCARCH)/crypto/. These exports will go away when
+ * that code is migrated into lib/crypto/.
+ */
+#ifdef CONFIG_ARM64
+int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+#elif defined(CONFIG_PPC)
+void ppc_expand_key_128(u32 *key_enc, const u8 *key);
+void ppc_expand_key_192(u32 *key_enc, const u8 *key);
+void ppc_expand_key_256(u32 *key_enc, const u8 *key);
+void ppc_generate_decrypt_key(u32 *key_dec, u32 *key_enc, unsigned int key_len);
+void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds,
+ u32 bytes);
+void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds,
+ u32 bytes);
+void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
+ u8 *iv);
+void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes,
+ u8 *iv);
+void ppc_crypt_ctr(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
+ u8 *iv);
+void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes,
+ u8 *iv, u32 *key_twk);
+void ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes,
+ u8 *iv, u32 *key_twk);
+int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+ struct p8_aes_key *key);
+int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
+ struct p8_aes_key *key);
+void aes_p8_encrypt(const u8 *in, u8 *out, const struct p8_aes_key *key);
+void aes_p8_decrypt(const u8 *in, u8 *out, const struct p8_aes_key *key);
+void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key, u8 *iv, const int enc);
+void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key, const u8 *iv);
+void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key1,
+ const struct p8_aes_key *key2, u8 *iv);
+void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
+ const struct p8_aes_key *key1,
+ const struct p8_aes_key *key2, u8 *iv);
+#elif defined(CONFIG_SPARC64)
+void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key,
+ unsigned int key_len);
+void aes_sparc64_load_encrypt_keys_128(const u64 *key);
+void aes_sparc64_load_encrypt_keys_192(const u64 *key);
+void aes_sparc64_load_encrypt_keys_256(const u64 *key);
+void aes_sparc64_load_decrypt_keys_128(const u64 *key);
+void aes_sparc64_load_decrypt_keys_192(const u64 *key);
+void aes_sparc64_load_decrypt_keys_256(const u64 *key);
+void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len);
+void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input, u64 *output,
+ unsigned int len, u64 *iv);
+#endif
+
/**
- * aes_encrypt - Encrypt a single AES block
- * @ctx: Context struct containing the key schedule
- * @out: Buffer to store the ciphertext
- * @in: Buffer containing the plaintext
+ * aes_preparekey() - Prepare an AES key for encryption and decryption
+ * @key: (output) The key structure to initialize
+ * @in_key: The raw AES key
+ * @key_len: Length of the raw key in bytes. Should be either AES_KEYSIZE_128,
+ * AES_KEYSIZE_192, or AES_KEYSIZE_256.
+ *
+ * This prepares an AES key for both the encryption and decryption directions of
+ * the block cipher. Typically this involves expanding the raw key into both
+ * the standard round keys and the Equivalent Inverse Cipher round keys, but
+ * some architecture-specific implementations don't do the full expansion here.
+ *
+ * The caller is responsible for zeroizing both the struct aes_key and the raw
+ * key once they are no longer needed.
+ *
+ * If you don't need decryption support, use aes_prepareenckey() instead.
+ *
+ * Return: 0 on success or -EINVAL if the given key length is invalid. No other
+ * errors are possible, so callers that always pass a valid key length
+ * don't need to check for errors.
+ *
+ * Context: Any context.
*/
-void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+int aes_preparekey(struct aes_key *key, const u8 *in_key, size_t key_len);
/**
- * aes_decrypt - Decrypt a single AES block
- * @ctx: Context struct containing the key schedule
- * @out: Buffer to store the plaintext
- * @in: Buffer containing the ciphertext
+ * aes_prepareenckey() - Prepare an AES key for encryption-only
+ * @key: (output) The key structure to initialize
+ * @in_key: The raw AES key
+ * @key_len: Length of the raw key in bytes. Should be either AES_KEYSIZE_128,
+ * AES_KEYSIZE_192, or AES_KEYSIZE_256.
+ *
+ * This prepares an AES key for only the encryption direction of the block
+ * cipher. Typically this involves expanding the raw key into only the standard
+ * round keys, resulting in a struct about half the size of struct aes_key.
+ *
+ * The caller is responsible for zeroizing both the struct aes_enckey and the
+ * raw key once they are no longer needed.
+ *
+ * Note that while the resulting prepared key supports only AES encryption, it
+ * can still be used for decrypting in a mode of operation that uses AES in only
+ * the encryption (forward) direction, for example counter mode.
+ *
+ * Return: 0 on success or -EINVAL if the given key length is invalid. No other
+ * errors are possible, so callers that always pass a valid key length
+ * don't need to check for errors.
+ *
+ * Context: Any context.
+ */
+int aes_prepareenckey(struct aes_enckey *key, const u8 *in_key, size_t key_len);
+
+typedef union {
+ const struct aes_enckey *enc_key;
+ const struct aes_key *full_key;
+} aes_encrypt_arg __attribute__ ((__transparent_union__));
+
+/**
+ * aes_encrypt() - Encrypt a single AES block
+ * @key: The AES key, as a pointer to either an encryption-only key
+ * (struct aes_enckey) or a full, bidirectional key (struct aes_key).
+ * @out: Buffer to store the ciphertext block
+ * @in: Buffer containing the plaintext block
+ *
+ * Context: Any context.
+ */
+void aes_encrypt(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
+
+/**
+ * aes_decrypt() - Decrypt a single AES block
+ * @key: The AES key, previously initialized by aes_preparekey()
+ * @out: Buffer to store the plaintext block
+ * @in: Buffer containing the ciphertext block
+ *
+ * Context: Any context.
*/
-void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+void aes_decrypt(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
+extern const u32 aes_enc_tab[256];
+extern const u32 aes_dec_tab[256];
-void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+void aescfb_encrypt(const struct aes_enckey *key, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
-void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+void aescfb_decrypt(const struct aes_enckey *key, u8 *dst, const u8 *src,
int len, const u8 iv[AES_BLOCK_SIZE]);
#endif
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
index 6b25305fe611..cb5d6fe15d40 100644
--- a/include/crypto/df_sp80090a.h
+++ b/include/crypto/df_sp80090a.h
@@ -18,7 +18,7 @@ static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
statelen + blocklen; /* temp */
}
-int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+int crypto_drbg_ctr_df(struct aes_enckey *aes,
unsigned char *df_data,
size_t bytes_to_return,
struct list_head *seedlist,
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index fd9df607a836..b524e47bd4d0 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -66,7 +66,7 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
struct aesgcm_ctx {
be128 ghash_key;
- struct crypto_aes_ctx aes_ctx;
+ struct aes_enckey aes_key;
unsigned int authsize;
};
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 2d97440028ff..9a3f28baa804 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -191,11 +191,12 @@ static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
-struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
- struct crypto_acomp_streams *s) __acquires(stream);
+#define crypto_acomp_lock_stream_bh(...) __acquire_ret(_crypto_acomp_lock_stream_bh(__VA_ARGS__), &__ret->lock);
+struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires_ret;
static inline void crypto_acomp_unlock_stream_bh(
- struct crypto_acomp_stream *stream) __releases(stream)
+ struct crypto_acomp_stream *stream) __releases(&stream->lock)
{
spin_unlock_bh(&stream->lock);
}
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index f19ef376833f..6a1d27880615 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -45,7 +45,7 @@ struct crypto_engine {
struct list_head list;
spinlock_t queue_lock;
- struct crypto_queue queue;
+ struct crypto_queue queue __guarded_by(&queue_lock);
struct device *dev;
struct kthread_worker *kworker;
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 0cad8e7364c8..a965b6aabf61 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -242,6 +242,13 @@ static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
return crypto_tfm_ctx_dma(&tfm->base);
}
+static inline bool crypto_skcipher_tested(struct crypto_skcipher *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_skcipher_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
return req->__ctx;
diff --git a/include/crypto/mldsa.h b/include/crypto/mldsa.h
new file mode 100644
index 000000000000..3ef2676787c9
--- /dev/null
+++ b/include/crypto/mldsa.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Support for verifying ML-DSA signatures
+ *
+ * Copyright 2025 Google LLC
+ */
+#ifndef _CRYPTO_MLDSA_H
+#define _CRYPTO_MLDSA_H
+
+#include <linux/types.h>
+
+/* Identifier for an ML-DSA parameter set */
+enum mldsa_alg {
+ MLDSA44, /* ML-DSA-44 */
+ MLDSA65, /* ML-DSA-65 */
+ MLDSA87, /* ML-DSA-87 */
+};
+
+/* Lengths of ML-DSA public keys and signatures in bytes */
+#define MLDSA44_PUBLIC_KEY_SIZE 1312
+#define MLDSA65_PUBLIC_KEY_SIZE 1952
+#define MLDSA87_PUBLIC_KEY_SIZE 2592
+#define MLDSA44_SIGNATURE_SIZE 2420
+#define MLDSA65_SIGNATURE_SIZE 3309
+#define MLDSA87_SIGNATURE_SIZE 4627
+
+/**
+ * mldsa_verify() - Verify an ML-DSA signature
+ * @alg: The ML-DSA parameter set to use
+ * @sig: The signature
+ * @sig_len: Length of the signature in bytes. Should match the
+ * MLDSA*_SIGNATURE_SIZE constant associated with @alg,
+ * otherwise -EBADMSG will be returned.
+ * @msg: The message
+ * @msg_len: Length of the message in bytes
+ * @pk: The public key
+ * @pk_len: Length of the public key in bytes. Should match the
+ * MLDSA*_PUBLIC_KEY_SIZE constant associated with @alg,
+ * otherwise -EBADMSG will be returned.
+ *
+ * This verifies a signature using pure ML-DSA with the specified parameter set.
+ * The context string is assumed to be empty. This corresponds to FIPS 204
+ * Algorithm 3 "ML-DSA.Verify" with the ctx parameter set to the empty string
+ * and the lengths of the signature and key given explicitly by the caller.
+ *
+ * Context: Might sleep
+ *
+ * Return:
+ * * 0 if the signature is valid
+ * * -EBADMSG if the signature and/or public key is malformed
+ * * -EKEYREJECTED if the signature is invalid but otherwise well-formed
+ * * -ENOMEM if out of memory so the validity of the signature is unknown
+ */
+int mldsa_verify(enum mldsa_alg alg, const u8 *sig, size_t sig_len,
+ const u8 *msg, size_t msg_len, const u8 *pk, size_t pk_len);
+
+#if IS_ENABLED(CONFIG_CRYPTO_LIB_MLDSA_KUNIT_TEST)
+/* Internal function, exposed only for unit testing */
+s32 mldsa_use_hint(u8 h, s32 r, s32 gamma2);
+#endif
+
+#endif /* _CRYPTO_MLDSA_H */
diff --git a/include/crypto/nh.h b/include/crypto/nh.h
new file mode 100644
index 000000000000..465e85bf203f
--- /dev/null
+++ b/include/crypto/nh.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NH hash function for Adiantum
+ */
+
+#ifndef _CRYPTO_NH_H
+#define _CRYPTO_NH_H
+
+#include <linux/types.h>
+
+/* NH parameterization: */
+
+/* Endianness: little */
+/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
+
+/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
+#define NH_PAIR_STRIDE 2
+#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
+
+/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
+#define NH_NUM_PASSES 4
+#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
+
+/* Max message size: 1024 bytes (32x compression factor) */
+#define NH_NUM_STRIDES 64
+#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
+#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
+#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
+ NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
+#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
+
+/**
+ * nh() - NH hash function for Adiantum
+ * @key: The key. @message_len + 48 bytes of it are used. This is NH_KEY_BYTES
+ * if @message_len has its maximum length of NH_MESSAGE_BYTES.
+ * @message: The message
+ * @message_len: The message length in bytes. Must be a multiple of 16
+ * (NH_MESSAGE_UNIT) and at most 1024 (NH_MESSAGE_BYTES).
+ * @hash: (output) The resulting hash value
+ *
+ * Note: the pseudocode for NH in the Adiantum paper iterates over 1024-byte
+ * segments of the message, computes a 32-byte hash for each, and returns all
+ * the hashes concatenated together. In contrast, this function just hashes one
+ * segment and returns one hash. It's the caller's responsibility to call this
+ * function for each 1024-byte segment and collect all the hashes.
+ *
+ * Context: Any context.
+ */
+void nh(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES]);
+
+#endif /* _CRYPTO_NH_H */
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
deleted file mode 100644
index 306925fea190..000000000000
--- a/include/crypto/nhpoly1305.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values and helper functions for the NHPoly1305 hash function.
- */
-
-#ifndef _NHPOLY1305_H
-#define _NHPOLY1305_H
-
-#include <crypto/hash.h>
-#include <crypto/internal/poly1305.h>
-
-/* NH parameterization: */
-
-/* Endianness: little */
-/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
-
-/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
-#define NH_PAIR_STRIDE 2
-#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
-
-/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
-#define NH_NUM_PASSES 4
-#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
-
-/* Max message size: 1024 bytes (32x compression factor) */
-#define NH_NUM_STRIDES 64
-#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
-#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
-#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
- NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
-#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
-
-#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
-
-struct nhpoly1305_key {
- struct poly1305_core_key poly_key;
- u32 nh_key[NH_KEY_WORDS];
-};
-
-struct nhpoly1305_state {
-
- /* Running total of polynomial evaluation */
- struct poly1305_state poly_state;
-
- /* Partial block buffer */
- u8 buffer[NH_MESSAGE_UNIT];
- unsigned int buflen;
-
- /*
- * Number of bytes remaining until the current NH message reaches
- * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
- */
- unsigned int nh_remaining;
-
- __le64 nh_hash[NH_NUM_PASSES];
-};
-
-typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len,
- __le64 hash[NH_NUM_PASSES]);
-
-int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
-
-int crypto_nhpoly1305_init(struct shash_desc *desc);
-int crypto_nhpoly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen);
-int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
- const u8 *src, unsigned int srclen,
- nh_t nh_fn);
-int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst);
-int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst,
- nh_t nh_fn);
-
-#endif /* _NHPOLY1305_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 81098e00c08f..4c5199b20338 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -43,9 +43,11 @@ extern void public_key_free(struct public_key *key);
struct public_key_signature {
struct asymmetric_key_id *auth_ids[3];
u8 *s; /* Signature */
- u8 *digest;
+ u8 *m; /* Message data to pass to verifier */
u32 s_size; /* Number of bytes in signature */
- u32 digest_size; /* Number of bytes in digest */
+ u32 m_size; /* Number of bytes in ->m */
+ bool m_free; /* T if ->m needs freeing */
+ bool algo_takes_data; /* T if public key algo operates on data, not a hash */
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
index 27f08b972931..4d973e016cd6 100644
--- a/include/crypto/sha1.h
+++ b/include/crypto/sha1.h
@@ -26,16 +26,6 @@ struct sha1_state {
u8 buffer[SHA1_BLOCK_SIZE];
};
-/*
- * An implementation of SHA-1's compression function. Don't use in new code!
- * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
- * the correct way to hash something with SHA-1 (use crypto_shash instead).
- */
-#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
-#define SHA1_WORKSPACE_WORDS 16
-void sha1_init_raw(__u32 *buf);
-void sha1_transform(__u32 *digest, const char *data, __u32 *W);
-
/* State for the SHA-1 compression function */
struct sha1_block_state {
u32 h[SHA1_DIGEST_SIZE / 4];
diff --git a/include/cxl/event.h b/include/cxl/event.h
index 6fd90f9cc203..ff97fea718d2 100644
--- a/include/cxl/event.h
+++ b/include/cxl/event.h
@@ -320,4 +320,26 @@ static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data
}
#endif
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+int cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err);
+int cxl_cper_setup_prot_err_work_data(struct cxl_cper_prot_err_work_data *wd,
+ struct cxl_cper_sec_prot_err *prot_err,
+ int severity);
+#else
+static inline int
+cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err)
+{
+ return -EOPNOTSUPP;
+}
+static inline int
+cxl_cper_setup_prot_err_work_data(struct cxl_cper_prot_err_work_data *wd,
+ struct cxl_cper_sec_prot_err *prot_err,
+ int severity)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *wd);
+
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/drm/bridge/inno_hdmi.h b/include/drm/bridge/inno_hdmi.h
new file mode 100644
index 000000000000..5bbcaeea94e2
--- /dev/null
+++ b/include/drm/bridge/inno_hdmi.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef __INNO_HDMI__
+#define __INNO_HDMI__
+
+#include <linux/types.h>
+
+struct device;
+struct drm_encoder;
+struct drm_display_mode;
+struct inno_hdmi;
+
+struct inno_hdmi_plat_ops {
+ void (*enable)(struct device *pdev, struct drm_display_mode *mode);
+};
+
+struct inno_hdmi_phy_config {
+ unsigned long pixelclock;
+ u8 pre_emphasis;
+ u8 voltage_level_control;
+};
+
+struct inno_hdmi_plat_data {
+ const struct inno_hdmi_plat_ops *ops;
+ struct inno_hdmi_phy_config *phy_configs;
+ struct inno_hdmi_phy_config *default_phy_config;
+};
+
+struct inno_hdmi *inno_hdmi_bind(struct device *pdev,
+ struct drm_encoder *encoder,
+ const struct inno_hdmi_plat_data *plat_data);
+#endif /* __INNO_HDMI__ */
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index 31d7ed589233..03005e474704 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -100,7 +100,6 @@ struct samsung_dsim_plat_data {
struct samsung_dsim {
struct mipi_dsi_host dsi_host;
struct drm_bridge bridge;
- struct drm_bridge *out_bridge;
struct device *dev;
struct drm_display_mode mode;
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 14d2859f0bda..1d0acd58f486 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -206,6 +206,9 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
/* DP/eDP DSC support */
u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+u32 drm_dp_dsc_slice_count_to_mask(int slice_count);
+u32 drm_dp_dsc_sink_slice_count_mask(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp);
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 43783891d359..178f8f62c80f 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,7 +30,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
-#include <drm/drm_colorop.h>
/**
* struct drm_crtc_commit - track modeset commits on a CRTC
@@ -340,6 +339,11 @@ struct drm_private_state_funcs {
*/
struct drm_private_obj {
/**
+ * @dev: parent DRM device
+ */
+ struct drm_device *dev;
+
+ /**
* @head: List entry used to attach a private object to a &drm_device
* (queued to &drm_mode_config.privobj_list).
*/
@@ -707,6 +711,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_colorop_state *
drm_atomic_get_colorop_state(struct drm_atomic_state *state,
struct drm_colorop *colorop);
+
+struct drm_colorop_state *
+drm_atomic_get_old_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop);
+struct drm_colorop_state *
+drm_atomic_get_new_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop);
+
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
@@ -804,36 +816,6 @@ drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_old_colorop_state - get colorop state, if it exists
- * @state: global atomic state object
- * @colorop: colorop to grab
- *
- * This function returns the old colorop state for the given colorop, or
- * NULL if the colorop is not part of the global atomic state.
- */
-static inline struct drm_colorop_state *
-drm_atomic_get_old_colorop_state(struct drm_atomic_state *state,
- struct drm_colorop *colorop)
-{
- return state->colorops[drm_colorop_index(colorop)].old_state;
-}
-
-/**
- * drm_atomic_get_new_colorop_state - get colorop state, if it exists
- * @state: global atomic state object
- * @colorop: colorop to grab
- *
- * This function returns the new colorop state for the given colorop, or
- * NULL if the colorop is not part of the global atomic state.
- */
-static inline struct drm_colorop_state *
-drm_atomic_get_new_colorop_state(struct drm_atomic_state *state,
- struct drm_colorop *colorop)
-{
- return state->colorops[drm_colorop_index(colorop)].new_state;
-}
-
-/**
* drm_atomic_get_old_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index dbafe136833f..4f19f7064ee3 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -614,6 +614,7 @@ struct drm_bridge_funcs {
* controllers for HDMI bridges.
*/
void (*hpd_notify)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
enum drm_connector_status status);
/**
@@ -667,29 +668,113 @@ struct drm_bridge_funcs {
unsigned long long tmds_rate);
/**
- * @hdmi_clear_infoframe:
+ * @hdmi_clear_avi_infoframe:
*
* This callback clears the infoframes in the hardware during commit.
- * It will be called multiple times, once for every disabled infoframe
- * type.
*
* This callback is optional but it must be implemented by bridges that
* set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
*/
- int (*hdmi_clear_infoframe)(struct drm_bridge *bridge,
- enum hdmi_infoframe_type type);
+ int (*hdmi_clear_avi_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_avi_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_avi_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
/**
- * @hdmi_write_infoframe:
+ * @hdmi_clear_hdmi_infoframe:
*
- * Program the infoframe into the hardware. It will be called multiple
- * times, once for every updated infoframe type.
+ * This callback clears the infoframes in the hardware during commit.
*
* This callback is optional but it must be implemented by bridges that
* set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
*/
- int (*hdmi_write_infoframe)(struct drm_bridge *bridge,
- enum hdmi_infoframe_type type,
- const u8 *buffer, size_t len);
+ int (*hdmi_clear_hdmi_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_hdmi_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_hdmi_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_clear_hdr_drm_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_clear_hdr_drm_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_hdr_drm_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_write_hdr_drm_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_clear_spd_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_clear_spd_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_spd_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_write_spd_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_clear_audio_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_clear_audio_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_audio_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_audio_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
/**
* @hdmi_audio_startup:
@@ -945,7 +1030,11 @@ enum drm_bridge_ops {
/**
* @DRM_BRIDGE_OP_HDMI: The bridge provides HDMI connector operations,
* including infoframes support. Bridges that set this flag must
- * implement the &drm_bridge_funcs->write_infoframe callback.
+ * provide HDMI-related information and implement the
+ * &drm_bridge_funcs->clear_avi_infoframe,
+ * &drm_bridge_funcs->write_avi_infoframe,
+ * &drm_bridge_funcs->clear_hdmi_infoframe and
+ * &drm_bridge_funcs->write_hdmi_infoframe callbacks.
*
* Note: currently there can be at most one bridge in a chain that sets
* this bit. This is to simplify corresponding glue code in connector
@@ -957,6 +1046,9 @@ enum drm_bridge_ops {
* Bridges that set this flag must implement the
* &drm_bridge_funcs->hdmi_audio_prepare and
* &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ * If the bridge implements @DRM_BRIDGE_OP_HDMI, it also must implement
+ * &drm_bridge_funcs->hdmi_write_audio_infoframe and
+ * &drm_bridge_funcs->hdmi_cleaer_audio_infoframe callbacks.
*
* Note: currently there can be at most one bridge in a chain that sets
* this bit. This is to simplify corresponding glue code in connector
@@ -988,6 +1080,18 @@ enum drm_bridge_ops {
* to be present.
*/
DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME: The bridge supports
+ * &drm_bridge_funcs->hdmi_write_hdr_drm_infoframe and
+ * &drm_bridge_funcs->hdmi_clear_hdr_drm_infoframe callbacks.
+ */
+ DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME = BIT(9),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME: The bridge supports
+ * &drm_bridge_funcs->hdmi_write_spd_infoframe and
+ * &drm_bridge_funcs->hdmi_clear_spd_infoframe callbacks.
+ */
+ DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME = BIT(10),
};
/**
@@ -1026,6 +1130,14 @@ struct drm_bridge {
*/
struct kref refcount;
+ /**
+ * @unplugged:
+ *
+ * Flag to tell if the bridge has been unplugged.
+ * See drm_bridge_enter() and drm_bridge_unplug().
+ */
+ bool unplugged;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -1153,6 +1265,17 @@ struct drm_bridge {
* @hpd_cb.
*/
void *hpd_data;
+
+ /**
+ * @next_bridge: Pointer to the following bridge, automatically put
+ * when this bridge is freed (i.e. at destroy time). This is for
+ * drivers needing to store a pointer to the next bridge in the
+ * chain, and ensures any code still holding a reference to this
+ * bridge after its removal cannot use-after-free the next
+ * bridge. Any other bridge pointers stored by the driver must be
+ * put in the .destroy callback by driver code.
+ */
+ struct drm_bridge *next_bridge;
};
static inline struct drm_bridge *
@@ -1161,6 +1284,10 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+bool drm_bridge_enter(struct drm_bridge *bridge, int *idx);
+void drm_bridge_exit(int idx);
+void drm_bridge_unplug(struct drm_bridge *bridge);
+
struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
void drm_bridge_put(struct drm_bridge *bridge);
@@ -1196,8 +1323,13 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags);
#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np);
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
#else
+static inline struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np)
+{
+ return NULL;
+}
static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
{
return NULL;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 8f34f4b8183d..7eaec37ae1c7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -1222,44 +1222,24 @@ struct drm_connector_cec_funcs {
};
/**
- * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
+ * struct drm_connector_infoframe_funcs - InfoFrame-related functions
*/
-struct drm_connector_hdmi_funcs {
- /**
- * @tmds_char_rate_valid:
- *
- * This callback is invoked at atomic_check time to figure out
- * whether a particular TMDS character rate is supported by the
- * driver.
- *
- * The @tmds_char_rate_valid callback is optional.
- *
- * Returns:
- *
- * Either &drm_mode_status.MODE_OK or one of the failure reasons
- * in &enum drm_mode_status.
- */
- enum drm_mode_status
- (*tmds_char_rate_valid)(const struct drm_connector *connector,
- const struct drm_display_mode *mode,
- unsigned long long tmds_rate);
-
+struct drm_connector_infoframe_funcs {
/**
* @clear_infoframe:
*
* This callback is invoked through
* @drm_atomic_helper_connector_hdmi_update_infoframes during a
* commit to clear the infoframes into the hardware. It will be
- * called multiple times, once for every disabled infoframe
- * type.
+ * called once for each frame type to be disabled.
*
- * The @clear_infoframe callback is optional.
+ * The @clear_infoframe callback is mandatory for AVI and HDMI-VS
+ * InfoFrame types.
*
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*clear_infoframe)(struct drm_connector *connector,
- enum hdmi_infoframe_type type);
+ int (*clear_infoframe)(struct drm_connector *connector);
/**
* @write_infoframe:
@@ -1267,18 +1247,42 @@ struct drm_connector_hdmi_funcs {
* This callback is invoked through
* @drm_atomic_helper_connector_hdmi_update_infoframes during a
* commit to program the infoframes into the hardware. It will
- * be called multiple times, once for every updated infoframe
- * type.
+ * be called for every updated infoframe type.
*
- * The @write_infoframe callback is mandatory.
+ * The @write_infoframe callback is mandatory for AVI and HDMI-VS
+ * InfoFrame types.
*
* Returns:
* 0 on success, a negative error code otherwise
*/
int (*write_infoframe)(struct drm_connector *connector,
- enum hdmi_infoframe_type type,
const u8 *buffer, size_t len);
+};
+
+/**
+ * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_hdmi_funcs {
+ /**
+ * @tmds_char_rate_valid:
+ *
+ * This callback is invoked at atomic_check time to figure out
+ * whether a particular TMDS character rate is supported by the
+ * driver.
+ *
+ * The @tmds_char_rate_valid callback is optional.
+ *
+ * Returns:
+ *
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons
+ * in &enum drm_mode_status.
+ */
+ enum drm_mode_status
+ (*tmds_char_rate_valid)(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate);
+
/**
* @read_edid:
*
@@ -1293,6 +1297,47 @@ struct drm_connector_hdmi_funcs {
* Valid EDID on success, NULL in case of failure.
*/
const struct drm_edid *(*read_edid)(struct drm_connector *connector);
+
+ /**
+ * @avi:
+ *
+ * Set of callbacks for handling the AVI InfoFrame. These callbacks are
+ * mandatory.
+ */
+ struct drm_connector_infoframe_funcs avi;
+
+ /**
+ * @hdmi:
+ *
+ * Set of callbacks for handling the HDMI Vendor-Specific InfoFrame.
+ * These callbacks are mandatory.
+ */
+ struct drm_connector_infoframe_funcs hdmi;
+
+ /**
+ * @audio:
+ *
+ * Set of callbacks for handling the Audio InfoFrame. These callbacks
+ * are optional, but they are required for drivers which use
+ * drm_atomic_helper_connector_hdmi_update_audio_infoframe().
+ */
+ struct drm_connector_infoframe_funcs audio;
+
+ /**
+ * @hdr_drm:
+ *
+ * Set of callbacks for handling the HDR DRM InfoFrame. These callbacks
+ * are mandatory if HDR output is to be supported.
+ */
+ struct drm_connector_infoframe_funcs hdr_drm;
+
+ /**
+ * @spd:
+ *
+ * Set of callbacks for handling the SPD InfoFrame. These callbacks are
+ * optional.
+ */
+ struct drm_connector_infoframe_funcs spd;
};
/**
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 5af49c5c3778..bc78fb77cc27 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -3,6 +3,9 @@
#include <linux/list.h>
#include <linux/kref.h>
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <linux/mount.h>
+#endif
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/sched.h>
@@ -168,6 +171,18 @@ struct drm_device {
*/
struct drm_master *master;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /**
+ * @huge_mnt:
+ *
+ * Huge tmpfs mountpoint used at GEM object initialization
+ * drm_gem_object_init(). Drivers can call drm_gem_huge_mnt_create() to
+ * create, mount and use it. The default tmpfs mountpoint (`shm_mnt`) is
+ * used if NULL.
+ */
+ struct vfsmount *huge_mnt;
+#endif
+
/**
* @driver_features: per-device driver features
*
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index dd9a18f8de5a..05cca77b7249 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -167,13 +167,6 @@ struct drm_fb_helper {
struct mutex lock;
/**
- * @kernel_fb_list:
- *
- * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit.
- */
- struct list_head kernel_fb_list;
-
- /**
* @delayed_hotplug:
*
* A hotplug was received while fbdev wasn't in control of the DRM
@@ -236,8 +229,6 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
.fb_setcmap = drm_fb_helper_setcmap, \
.fb_blank = drm_fb_helper_blank, \
.fb_pan_display = drm_fb_helper_pan_display, \
- .fb_debug_enter = drm_fb_helper_debug_enter, \
- .fb_debug_leave = drm_fb_helper_debug_leave, \
.fb_ioctl = drm_fb_helper_ioctl
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -280,8 +271,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
-int drm_fb_helper_debug_enter(struct fb_info *info);
-int drm_fb_helper_debug_leave(struct fb_info *info);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -387,16 +376,6 @@ static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
return 0;
}
-
-static inline int drm_fb_helper_debug_enter(struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_debug_leave(struct fb_info *info)
-{
- return 0;
-}
#endif
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 1a3018e4a537..6ee70ad65e1f 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/idr.h>
+#include <linux/xarray.h>
#include <uapi/drm/drm.h>
@@ -316,10 +317,8 @@ struct drm_file {
/** @table_lock: Protects @object_idr. */
spinlock_t table_lock;
- /** @syncobj_idr: Mapping of sync object handles to object pointers. */
- struct idr syncobj_idr;
- /** @syncobj_table_lock: Protects @syncobj_idr. */
- spinlock_t syncobj_table_lock;
+ /** @syncobj_xa: Mapping of sync object handles to object pointers. */
+ struct xarray syncobj_xa;
/** @filp: Pointer to the core file structure. */
struct file *filp;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 8d48d2af2649..86f5846154f7 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -40,6 +40,9 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <drm/drm_device.h>
+#endif
#include <drm/drm_vma_manager.h>
struct iosys_map;
@@ -469,6 +472,7 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
+ .get_unmapped_area = drm_gem_get_unmapped_area,\
.mmap = drm_gem_mmap, \
.fop_flags = FOP_UNSIGNED_OFFSET
@@ -491,13 +495,40 @@ struct drm_gem_object {
DRM_GEM_FOPS,\
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value);
+#else
+static inline int drm_gem_huge_mnt_create(struct drm_device *dev,
+ const char *value)
+{
+ return 0;
+}
+#endif
+
+/**
+ * drm_gem_get_huge_mnt - Get the huge tmpfs mountpoint used by a DRM device
+ * @dev: DRM device
+ *
+ * This function gets the huge tmpfs mountpoint used by DRM device @dev. A huge
+ * tmpfs mountpoint is used instead of `shm_mnt` after a successful call to
+ * drm_gem_huge_mnt_create() when CONFIG_TRANSPARENT_HUGEPAGE is enabled.
+ *
+ * Returns:
+ * The huge tmpfs mountpoint in use, NULL otherwise.
+ */
+static inline struct vfsmount *drm_gem_get_huge_mnt(struct drm_device *dev)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return dev->huge_mnt;
+#else
+ return NULL;
+#endif
+}
+
void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
-int drm_gem_object_init_with_mnt(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size,
- struct vfsmount *gemfs);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_fini(struct drm_gem_object *obj);
@@ -507,6 +538,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+#ifdef CONFIG_MMU
+unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+#else
+#define drm_gem_get_unmapped_area NULL
+#endif
+
/**
* drm_gem_object_get - acquire a GEM buffer object reference
* @obj: GEM buffer object
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 589f7bfe7506..5ccdae21b94a 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -109,9 +109,6 @@ struct drm_gem_shmem_object {
int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
-struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
- size_t size,
- struct vfsmount *gemfs);
void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
@@ -303,4 +300,15 @@ struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
.gem_prime_import = drm_gem_shmem_prime_import_no_map, \
.dumb_create = drm_gem_shmem_dumb_create
+/*
+ * Kunit helpers
+ */
+
+#if IS_ENABLED(CONFIG_KUNIT)
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+#endif
+
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 632e100e6efb..2578ac92a8d4 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -328,6 +328,35 @@ void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages);
+/**
+ * enum drm_gpusvm_scan_result - Scan result from the drm_gpusvm_scan_mm() function.
+ * @DRM_GPUSVM_SCAN_UNPOPULATED: At least one page was not present or inaccessible.
+ * @DRM_GPUSVM_SCAN_EQUAL: All pages belong to the struct dev_pagemap indicated as
+ * the @pagemap argument to the drm_gpusvm_scan_mm() function.
+ * @DRM_GPUSVM_SCAN_OTHER: All pages belong to exactly one dev_pagemap, which is
+ * *NOT* the @pagemap argument to the drm_gpusvm_scan_mm(). All pages belong to
+ * the same device private owner.
+ * @DRM_GPUSVM_SCAN_SYSTEM: All pages are present and system pages.
+ * @DRM_GPUSVM_SCAN_MIXED_DEVICE: All pages are device pages and belong to at least
+ * two different struct dev_pagemaps. All pages belong to the same device private
+ * owner.
+ * @DRM_GPUSVM_SCAN_MIXED: Pages are present and are a mix of system pages
+ * and device-private pages. All device-private pages belong to the same device
+ * private owner.
+ */
+enum drm_gpusvm_scan_result {
+ DRM_GPUSVM_SCAN_UNPOPULATED,
+ DRM_GPUSVM_SCAN_EQUAL,
+ DRM_GPUSVM_SCAN_OTHER,
+ DRM_GPUSVM_SCAN_SYSTEM,
+ DRM_GPUSVM_SCAN_MIXED_DEVICE,
+ DRM_GPUSVM_SCAN_MIXED,
+};
+
+enum drm_gpusvm_scan_result drm_gpusvm_scan_mm(struct drm_gpusvm_range *range,
+ void *dev_private_owner,
+ const struct dev_pagemap *pagemap);
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index fdfc575b2603..655bd9104ffb 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -736,8 +736,8 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
struct drm_gpuvm_bo *
-drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj);
+drm_gpuvm_bo_obtain_locked(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
@@ -1121,7 +1121,7 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops);
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
- struct drm_gpuva_op_map *op)
+ const struct drm_gpuva_op_map *op)
{
va->va.addr = op->va.addr;
va->va.range = op->va.range;
@@ -1265,13 +1265,13 @@ int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
- struct drm_gpuva_op_map *op);
+ const struct drm_gpuva_op_map *op);
void drm_gpuva_remap(struct drm_gpuva *prev,
struct drm_gpuva *next,
- struct drm_gpuva_op_remap *op);
+ const struct drm_gpuva_op_remap *op);
-void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+void drm_gpuva_unmap(const struct drm_gpuva_op_unmap *op);
/**
* drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c68edbd126d0..44a0d6f8d01f 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -133,6 +133,9 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
int drm_object_property_get_default_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val);
+int drm_object_immutable_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index fe32854b7ffe..3e68213958dd 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -52,11 +52,6 @@ struct drm_scanout_buffer;
struct drm_writeback_connector;
struct drm_writeback_job;
-enum mode_set_atomic {
- LEAVE_ATOMIC_MODE_SET,
- ENTER_ATOMIC_MODE_SET,
-};
-
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
@@ -254,24 +249,6 @@ struct drm_crtc_helper_funcs {
struct drm_framebuffer *old_fb);
/**
- * @mode_set_base_atomic:
- *
- * This callback is used by the fbdev helpers to set a new framebuffer
- * and scanout without sleeping, i.e. from an atomic calling context. It
- * is only used to implement kgdb support.
- *
- * This callback is optional and only needed for kgdb support in the fbdev
- * helpers.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*mode_set_base_atomic)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic);
-
- /**
* @disable:
*
* This callback should be used to disable the CRTC. With the atomic
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 7f0256dae3f1..f2f2bf82eff9 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -5,6 +5,7 @@
#include <linux/err.h>
#include <linux/of_graph.h>
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
+#include <linux/of.h>
#include <drm/drm_bridge.h>
#endif
@@ -170,9 +171,12 @@ static inline int drm_of_panel_bridge_remove(const struct device_node *np,
if (!remote)
return -ENODEV;
- bridge = of_drm_find_bridge(remote);
+ bridge = of_drm_find_and_get_bridge(remote);
drm_panel_bridge_remove(bridge);
+ drm_bridge_put(bridge);
+ of_node_put(remote);
+
return 0;
#else
return -EINVAL;
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index eb29e5309f0a..2baf0861f78f 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -10,6 +10,8 @@
struct dma_fence;
struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_dev_hold;
struct drm_pagemap_zdd;
struct device;
@@ -124,17 +126,49 @@ struct drm_pagemap_ops {
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms);
+ /**
+ * @destroy: Destroy the drm_pagemap and associated resources.
+ * @dpagemap: The drm_pagemap to destroy.
+ * @is_atomic_or_reclaim: The function may be called from
+ * atomic- or reclaim context.
+ *
+ * The implementation should take care not to attempt to
+ * destroy resources that may already have been destroyed
+ * using devm_ callbacks, since this function may be called
+ * after the underlying struct device has been unbound.
+ * If the implementation defers the execution to a work item
+ * to avoid locking issues, then it must make sure the work
+ * items are flushed before module exit. If the destroy call
+ * happens after the provider's pci_remove() callback has
+ * been executed, a module reference and drm device reference is
+ * held across the destroy callback.
+ */
+ void (*destroy)(struct drm_pagemap *dpagemap,
+ bool is_atomic_or_reclaim);
};
/**
* struct drm_pagemap: Additional information for a struct dev_pagemap
* used for device p2p handshaking.
* @ops: The struct drm_pagemap_ops.
- * @dev: The struct drevice owning the device-private memory.
+ * @ref: Reference count.
+ * @drm: The struct drm device owning the device-private memory.
+ * @pagemap: Pointer to the underlying dev_pagemap.
+ * @dev_hold: Pointer to a struct drm_pagemap_dev_hold for
+ * device referencing.
+ * @cache: Back-pointer to the &struct drm_pagemap_cache used for this
+ * &struct drm_pagemap. May be NULL if no cache is used.
+ * @shrink_link: Link into the shrinker's list of drm_pagemaps. Only
+ * used if also using a pagemap cache.
*/
struct drm_pagemap {
const struct drm_pagemap_ops *ops;
- struct device *dev;
+ struct kref ref;
+ struct drm_device *drm;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap_dev_hold *dev_hold;
+ struct drm_pagemap_cache *cache;
+ struct list_head shrink_link;
};
struct drm_pagemap_devmem;
@@ -211,8 +245,19 @@ struct drm_pagemap_devmem_ops {
#if IS_ENABLED(CONFIG_ZONE_DEVICE)
+int drm_pagemap_init(struct drm_pagemap *dpagemap,
+ struct dev_pagemap *pagemap,
+ struct drm_device *drm,
+ const struct drm_pagemap_ops *ops);
+
+struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
+ struct dev_pagemap *pagemap,
+ const struct drm_pagemap_ops *ops);
+
struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+void drm_pagemap_put(struct drm_pagemap *dpagemap);
+
#else
static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
@@ -220,9 +265,42 @@ static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page
return NULL;
}
+static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
/**
+ * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
+ * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
+ *
+ * Return: Pointer to the struct drm_pagemap, or NULL.
+ */
+static inline struct drm_pagemap *
+drm_pagemap_get(struct drm_pagemap *dpagemap)
+{
+ if (likely(dpagemap))
+ kref_get(&dpagemap->ref);
+
+ return dpagemap;
+}
+
+/**
+ * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
+ * unless the current reference count is zero.
+ * @dpagemap: Pointer to the drm_pagemap or NULL.
+ *
+ * Return: A pointer to @dpagemap if the reference count was successfully
+ * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
+ */
+static inline struct drm_pagemap * __must_check
+drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
+{
+ return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
+}
+
+/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
* @dev: Pointer to the device structure which device memory allocation belongs to
@@ -246,13 +324,29 @@ struct drm_pagemap_devmem {
struct dma_fence *pre_migrate_fence;
};
+/**
+ * struct drm_pagemap_migrate_details - Details to govern migration.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @can_migrate_same_pagemap: Whether the copy function as indicated by
+ * the @source_peer_migrates flag, can migrate device pages within a
+ * single drm_pagemap.
+ * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
+ * should use the copy_to_ram() callback rather than the destination
+ * drm_pagemap should use the copy_to_devmem() callback.
+ */
+struct drm_pagemap_migrate_details {
+ unsigned long timeslice_ms;
+ u32 can_migrate_same_pagemap : 1;
+ u32 source_peer_migrates : 1;
+};
+
#if IS_ENABLED(CONFIG_ZONE_DEVICE)
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct mm_struct *mm,
unsigned long start, unsigned long end,
- unsigned long timeslice_ms,
- void *pgmap_owner);
+ const struct drm_pagemap_migrate_details *mdetails);
int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
@@ -269,6 +363,10 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
struct mm_struct *mm,
unsigned long timeslice_ms);
+void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
+
+int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
+
#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
#endif
diff --git a/include/drm/drm_pagemap_util.h b/include/drm/drm_pagemap_util.h
new file mode 100644
index 000000000000..19169b42b891
--- /dev/null
+++ b/include/drm/drm_pagemap_util.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _DRM_PAGEMAP_UTIL_H_
+#define _DRM_PAGEMAP_UTIL_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct drm_device;
+struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_owner;
+struct drm_pagemap_shrinker;
+
+/**
+ * struct drm_pagemap_peer - Structure representing a fast interconnect peer
+ * @list: Pointer to a &struct drm_pagemap_owner_list used to keep track of peers
+ * @link: List link for @list's list of peers.
+ * @owner: Pointer to a &struct drm_pagemap_owner, common for a set of peers having
+ * fast interconnects.
+ * @private: Pointer private to the struct embedding this struct.
+ */
+struct drm_pagemap_peer {
+ struct drm_pagemap_owner_list *list;
+ struct list_head link;
+ struct drm_pagemap_owner *owner;
+ void *private;
+};
+
+/**
+ * struct drm_pagemap_owner_list - Keeping track of peers and owners
+ * @peer: List of peers.
+ *
+ * The owner list defines the scope where we identify peers having fast interconnects
+ * and a common owner. Typically a driver has a single global owner list to
+ * keep track of common owners for the driver's pagemaps.
+ */
+struct drm_pagemap_owner_list {
+ /** @lock: Mutex protecting the @peers list. */
+ struct mutex lock;
+ /** @peers: List of peers. */
+ struct list_head peers;
+};
+
+/*
+ * Convenience macro to define an owner list.
+ * Typically the owner list statically declared
+ * driver-wide.
+ */
+#define DRM_PAGEMAP_OWNER_LIST_DEFINE(_name) \
+ struct drm_pagemap_owner_list _name = { \
+ .lock = __MUTEX_INITIALIZER((_name).lock), \
+ .peers = LIST_HEAD_INIT((_name).peers) }
+
+void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap);
+
+int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache);
+
+struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm);
+
+struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker);
+
+struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap);
+
+struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache);
+
+#ifdef CONFIG_PROVE_LOCKING
+
+void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap);
+
+#else
+
+static inline void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
+{
+}
+
+#endif /* CONFIG_PROVE_LOCKING */
+
+void drm_pagemap_release_owner(struct drm_pagemap_peer *peer);
+
+int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
+ struct drm_pagemap_owner_list *owner_list,
+ bool (*has_interconnect)(struct drm_pagemap_peer *peer1,
+ struct drm_pagemap_peer *peer2));
+#endif
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 082f29156b3e..aa49b5a42bb5 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -284,6 +284,7 @@ int drm_property_replace_blob_from_id(struct drm_device *dev,
uint64_t blob_id,
ssize_t expected_size,
ssize_t expected_elem_size,
+ ssize_t max_size,
bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index ffa564d79638..2fcef9c0f5b1 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -302,8 +302,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
int drm_crtc_vblank_get(struct drm_crtc *crtc);
void drm_crtc_vblank_put(struct drm_crtc *crtc);
-void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+int drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index fb88301b3c45..78e07c2507c7 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -645,6 +645,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
+bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched);
struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
@@ -674,6 +675,7 @@ bool drm_sched_job_has_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_increase_karma(struct drm_sched_job *bad);
+bool drm_sched_job_is_signaled(struct drm_sched_job *job);
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
@@ -698,4 +700,54 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
+/**
+ * struct drm_sched_pending_job_iter - DRM scheduler pending job iterator state
+ * @sched: DRM scheduler associated with pending job iterator
+ */
+struct drm_sched_pending_job_iter {
+ struct drm_gpu_scheduler *sched;
+};
+
+/* Drivers should never call this directly */
+static inline struct drm_sched_pending_job_iter
+__drm_sched_pending_job_iter_begin(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_pending_job_iter iter = {
+ .sched = sched,
+ };
+
+ WARN_ON(!drm_sched_is_stopped(sched));
+ return iter;
+}
+
+/* Drivers should never call this directly */
+static inline void
+__drm_sched_pending_job_iter_end(const struct drm_sched_pending_job_iter iter)
+{
+ WARN_ON(!drm_sched_is_stopped(iter.sched));
+}
+
+DEFINE_CLASS(drm_sched_pending_job_iter, struct drm_sched_pending_job_iter,
+ __drm_sched_pending_job_iter_end(_T),
+ __drm_sched_pending_job_iter_begin(__sched),
+ struct drm_gpu_scheduler *__sched);
+static inline void *
+class_drm_sched_pending_job_iter_lock_ptr(class_drm_sched_pending_job_iter_t *_T)
+{ return _T; }
+#define class_drm_sched_pending_job_iter_is_conditional false
+
+/**
+ * drm_sched_for_each_pending_job() - Iterator for each pending job in scheduler
+ * @__job: Current pending job being iterated over
+ * @__sched: DRM scheduler to iterate over pending jobs
+ * @__entity: DRM scheduler entity to filter jobs, NULL indicates no filter
+ *
+ * Iterator for each pending job in scheduler, filtering on an entity, and
+ * enforcing scheduler is fully stopped
+ */
+#define drm_sched_for_each_pending_job(__job, __sched, __entity) \
+ scoped_guard(drm_sched_pending_job_iter, (__sched)) \
+ list_for_each_entry((__job), &(__sched)->pending_list, list) \
+ for_each_if(!(__entity) || (__job)->entity == (__entity))
+
#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
index 26bedc360044..ce946859a3a9 100644
--- a/include/drm/intel/display_parent_interface.h
+++ b/include/drm/intel/display_parent_interface.h
@@ -6,9 +6,55 @@
#include <linux/types.h>
+struct dma_fence;
+struct drm_crtc;
struct drm_device;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_plane_state;
+struct drm_scanout_buffer;
+struct i915_vma;
+struct intel_hdcp_gsc_context;
+struct intel_initial_plane_config;
+struct intel_panic;
+struct intel_stolen_node;
struct ref_tracker;
+/* Keep struct definitions sorted */
+
+struct intel_display_hdcp_interface {
+ ssize_t (*gsc_msg_send)(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+ bool (*gsc_check_status)(struct drm_device *drm);
+ struct intel_hdcp_gsc_context *(*gsc_context_alloc)(struct drm_device *drm);
+ void (*gsc_context_free)(struct intel_hdcp_gsc_context *gsc_context);
+};
+
+struct intel_display_initial_plane_interface {
+ void (*vblank_wait)(struct drm_crtc *crtc);
+ struct drm_gem_object *(*alloc_obj)(struct drm_device *drm, struct intel_initial_plane_config *plane_config);
+ int (*setup)(struct drm_plane_state *plane_state, struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer *fb, struct i915_vma *vma);
+ void (*config_fini)(struct intel_initial_plane_config *plane_configs);
+};
+
+struct intel_display_irq_interface {
+ bool (*enabled)(struct drm_device *drm);
+ void (*synchronize)(struct drm_device *drm);
+};
+
+struct intel_display_panic_interface {
+ struct intel_panic *(*alloc)(void);
+ int (*setup)(struct intel_panic *panic, struct drm_scanout_buffer *sb);
+ void (*finish)(struct intel_panic *panic);
+};
+
+struct intel_display_pc8_interface {
+ void (*block)(struct drm_device *drm);
+ void (*unblock)(struct drm_device *drm);
+};
+
struct intel_display_rpm_interface {
struct ref_tracker *(*get)(const struct drm_device *drm);
struct ref_tracker *(*get_raw)(const struct drm_device *drm);
@@ -25,6 +71,28 @@ struct intel_display_rpm_interface {
void (*assert_unblock)(const struct drm_device *drm);
};
+struct intel_display_rps_interface {
+ void (*boost_if_not_started)(struct dma_fence *fence);
+ void (*mark_interactive)(struct drm_device *drm, bool interactive);
+ void (*ilk_irq_handler)(struct drm_device *drm);
+};
+
+struct intel_display_stolen_interface {
+ int (*insert_node_in_range)(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end);
+ int (*insert_node)(struct intel_stolen_node *node, u64 size, unsigned int align); /* Optional */
+ void (*remove_node)(struct intel_stolen_node *node);
+ bool (*initialized)(struct drm_device *drm);
+ bool (*node_allocated)(const struct intel_stolen_node *node);
+ u64 (*node_offset)(const struct intel_stolen_node *node);
+ u64 (*area_address)(struct drm_device *drm); /* Optional */
+ u64 (*area_size)(struct drm_device *drm); /* Optional */
+ u64 (*node_address)(const struct intel_stolen_node *node);
+ u64 (*node_size)(const struct intel_stolen_node *node);
+ struct intel_stolen_node *(*node_alloc)(struct drm_device *drm);
+ void (*node_free)(const struct intel_stolen_node *node);
+};
+
/**
* struct intel_display_parent_interface - services parent driver provides to display
*
@@ -38,8 +106,44 @@ struct intel_display_rpm_interface {
* check the optional pointers.
*/
struct intel_display_parent_interface {
+ /** @hdcp: HDCP GSC interface */
+ const struct intel_display_hdcp_interface *hdcp;
+
+ /** @initial_plane: Initial plane interface */
+ const struct intel_display_initial_plane_interface *initial_plane;
+
+ /** @irq: IRQ interface */
+ const struct intel_display_irq_interface *irq;
+
+ /** @panic: Panic interface */
+ const struct intel_display_panic_interface *panic;
+
+ /** @pc8: PC8 interface. Optional. */
+ const struct intel_display_pc8_interface *pc8;
+
/** @rpm: Runtime PM functions */
const struct intel_display_rpm_interface *rpm;
+
+ /** @rps: RPS interface. Optional. */
+ const struct intel_display_rps_interface *rps;
+
+ /** @stolen: Stolen memory. */
+ const struct intel_display_stolen_interface *stolen;
+
+ /* Generic independent functions */
+ struct {
+ /** @fence_priority_display: Set display priority. Optional. */
+ void (*fence_priority_display)(struct dma_fence *fence);
+
+ /** @has_auxccs: Are AuxCCS formats supported by the parent. Optional. */
+ bool (*has_auxccs)(struct drm_device *drm);
+
+ /** @has_fenced_regions: Support legacy fencing? Optional. */
+ bool (*has_fenced_regions)(struct drm_device *drm);
+
+ /** @vgpu_active: Is vGPU active? Optional. */
+ bool (*vgpu_active)(struct drm_device *drm);
+ };
};
#endif
diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h
index d65be2cba2ab..0850738a30fc 100644
--- a/include/drm/intel/intel_lb_mei_interface.h
+++ b/include/drm/intel/intel_lb_mei_interface.h
@@ -53,7 +53,8 @@ enum intel_lb_status {
*/
struct intel_lb_component_ops {
/**
- * push_payload - Sends a payload to the authentication firmware
+ * @push_payload: Sends a payload to the authentication firmware
+ *
* @dev: Device struct corresponding to the mei device
* @type: Payload type (see &enum intel_lb_type)
* @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
index 442f9e9037dc..7a14dcb9f17b 100644
--- a/include/dt-bindings/clock/google,gs101.h
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -313,6 +313,42 @@
#define CLK_APM_PLL_DIV4_APM 70
#define CLK_APM_PLL_DIV16_APM 71
+/* CMU_DPU */
+#define CLK_MOUT_DPU_BUS_USER 1
+#define CLK_DOUT_DPU_BUSP 2
+#define CLK_GOUT_DPU_PCLK 3
+#define CLK_GOUT_DPU_CLK_DPU_OSCCLK_CLK 4
+#define CLK_GOUT_DPU_AD_APB_DPU_DMA_PCLKM 5
+#define CLK_GOUT_DPU_DPUF_ACLK_DMA 6
+#define CLK_GOUT_DPU_DPUF_ACLK_DPP 7
+#define CLK_GOUT_DPU_D_TZPC_DPU_PCLK 8
+#define CLK_GOUT_DPU_GPC_DPU_PCLK 9
+#define CLK_GOUT_DPU_LHM_AXI_P_DPU_I_CLK 10
+#define CLK_GOUT_DPU_LHS_AXI_D0_DPU_I_CLK 11
+#define CLK_GOUT_DPU_LHS_AXI_D1_DPU_I_CLK 12
+#define CLK_GOUT_DPU_LHS_AXI_D2_DPU_I_CLK 13
+#define CLK_GOUT_DPU_PPMU_DPUD0_ACLK 14
+#define CLK_GOUT_DPU_PPMU_DPUD0_PCLK 15
+#define CLK_GOUT_DPU_PPMU_DPUD1_ACLK 16
+#define CLK_GOUT_DPU_PPMU_DPUD1_PCLK 17
+#define CLK_GOUT_DPU_PPMU_DPUD2_ACLK 18
+#define CLK_GOUT_DPU_PPMU_DPUD2_PCLK 19
+#define CLK_GOUT_DPU_CLK_DPU_BUSD_CLK 20
+#define CLK_GOUT_DPU_CLK_DPU_BUSP_CLK 21
+#define CLK_GOUT_DPU_SSMT_DPU0_ACLK 22
+#define CLK_GOUT_DPU_SSMT_DPU0_PCLK 23
+#define CLK_GOUT_DPU_SSMT_DPU1_ACLK 24
+#define CLK_GOUT_DPU_SSMT_DPU1_PCLK 25
+#define CLK_GOUT_DPU_SSMT_DPU2_ACLK 26
+#define CLK_GOUT_DPU_SSMT_DPU2_PCLK 27
+#define CLK_GOUT_DPU_SYSMMU_DPUD0_CLK_S1 28
+#define CLK_GOUT_DPU_SYSMMU_DPUD0_CLK_S2 29
+#define CLK_GOUT_DPU_SYSMMU_DPUD1_CLK_S1 30
+#define CLK_GOUT_DPU_SYSMMU_DPUD1_CLK_S2 31
+#define CLK_GOUT_DPU_SYSMMU_DPUD2_CLK_S1 32
+#define CLK_GOUT_DPU_SYSMMU_DPUD2_CLK_S2 33
+#define CLK_GOUT_DPU_SYSREG_DPU_PCLK 34
+
/* CMU_HSI0 */
#define CLK_FOUT_USB_PLL 1
#define CLK_MOUT_PLL_USB 2
diff --git a/include/dt-bindings/clock/oxsemi,ox810se.h b/include/dt-bindings/clock/oxsemi,ox810se.h
deleted file mode 100644
index 7256365160f8..000000000000
--- a/include/dt-bindings/clock/oxsemi,ox810se.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef DT_CLOCK_OXSEMI_OX810SE_H
-#define DT_CLOCK_OXSEMI_OX810SE_H
-
-#define CLK_810_LEON 0
-#define CLK_810_DMA_SGDMA 1
-#define CLK_810_CIPHER 2
-#define CLK_810_SATA 3
-#define CLK_810_AUDIO 4
-#define CLK_810_USBMPH 5
-#define CLK_810_ETHA 6
-#define CLK_810_PCIEA 7
-#define CLK_810_NAND 8
-
-#endif /* DT_CLOCK_OXSEMI_OX810SE_H */
diff --git a/include/dt-bindings/clock/oxsemi,ox820.h b/include/dt-bindings/clock/oxsemi,ox820.h
deleted file mode 100644
index 55f4226e2f3f..000000000000
--- a/include/dt-bindings/clock/oxsemi,ox820.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef DT_CLOCK_OXSEMI_OX820_H
-#define DT_CLOCK_OXSEMI_OX820_H
-
-/* PLLs */
-#define CLK_820_PLLA 0
-#define CLK_820_PLLB 1
-
-/* Gate Clocks */
-#define CLK_820_LEON 2
-#define CLK_820_DMA_SGDMA 3
-#define CLK_820_CIPHER 4
-#define CLK_820_SD 5
-#define CLK_820_SATA 6
-#define CLK_820_AUDIO 7
-#define CLK_820_USBMPH 8
-#define CLK_820_ETHA 9
-#define CLK_820_PCIEA 10
-#define CLK_820_NAND 11
-#define CLK_820_PCIEB 12
-#define CLK_820_ETHB 13
-#define CLK_820_REF600 14
-#define CLK_820_USBDEV 15
-
-#endif /* DT_CLOCK_OXSEMI_OX820_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
index 4e3897b3669d..4265460bfb30 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8917.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -194,6 +194,7 @@
#define GCC_QUSB2_PHY_BCR 2
#define GCC_USB_HS_BCR 3
#define GCC_USB2_HS_PHY_ONLY_BCR 4
+#define GCC_MDSS_BCR 5
/* GDSCs */
#define CPP_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,mss-sc7180.h b/include/dt-bindings/clock/qcom,mss-sc7180.h
deleted file mode 100644
index f15a9ded2961..000000000000
--- a/include/dt-bindings/clock/qcom,mss-sc7180.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H
-#define _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H
-
-#define MSS_AXI_CRYPTO_CLK 0
-#define MSS_AXI_NAV_CLK 1
-
-#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
index 62aa12425592..d905804e6465 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-gcc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -387,6 +387,9 @@
#define GCC_USB4_2_PHY_RX0_CLK_SRC 377
#define GCC_USB4_2_PHY_RX1_CLK_SRC 378
#define GCC_USB4_2_PHY_SYS_CLK_SRC 379
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 380
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 381
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 382
/* GCC power domains */
#define GCC_PCIE_0_TUNNEL_GDSC 0
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
index 2a805e06487b..c4863e444458 100644
--- a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -31,5 +31,8 @@
#define R9A09G077_ETCLKC 19
#define R9A09G077_ETCLKD 20
#define R9A09G077_ETCLKE 21
+#define R9A09G077_XSPI_CLK0 22
+#define R9A09G077_XSPI_CLK1 23
+#define R9A09G077_PCLKCAN 24
#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
index 09da0ad33be6..0d53f1e65077 100644
--- a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -31,5 +31,8 @@
#define R9A09G087_ETCLKC 19
#define R9A09G087_ETCLKD 20
#define R9A09G087_ETCLKE 21
+#define R9A09G087_XSPI_CLK0 22
+#define R9A09G087_XSPI_CLK1 23
+#define R9A09G087_PCLKCAN 24
#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/clock/xlnx-versal-clk.h b/include/dt-bindings/clock/xlnx-versal-clk.h
deleted file mode 100644
index 264d634d226e..000000000000
--- a/include/dt-bindings/clock/xlnx-versal-clk.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2019 Xilinx Inc.
- *
- */
-
-#ifndef _DT_BINDINGS_CLK_VERSAL_H
-#define _DT_BINDINGS_CLK_VERSAL_H
-
-#define PMC_PLL 1
-#define APU_PLL 2
-#define RPU_PLL 3
-#define CPM_PLL 4
-#define NOC_PLL 5
-#define PLL_MAX 6
-#define PMC_PRESRC 7
-#define PMC_POSTCLK 8
-#define PMC_PLL_OUT 9
-#define PPLL 10
-#define NOC_PRESRC 11
-#define NOC_POSTCLK 12
-#define NOC_PLL_OUT 13
-#define NPLL 14
-#define APU_PRESRC 15
-#define APU_POSTCLK 16
-#define APU_PLL_OUT 17
-#define APLL 18
-#define RPU_PRESRC 19
-#define RPU_POSTCLK 20
-#define RPU_PLL_OUT 21
-#define RPLL 22
-#define CPM_PRESRC 23
-#define CPM_POSTCLK 24
-#define CPM_PLL_OUT 25
-#define CPLL 26
-#define PPLL_TO_XPD 27
-#define NPLL_TO_XPD 28
-#define APLL_TO_XPD 29
-#define RPLL_TO_XPD 30
-#define EFUSE_REF 31
-#define SYSMON_REF 32
-#define IRO_SUSPEND_REF 33
-#define USB_SUSPEND 34
-#define SWITCH_TIMEOUT 35
-#define RCLK_PMC 36
-#define RCLK_LPD 37
-#define WDT 38
-#define TTC0 39
-#define TTC1 40
-#define TTC2 41
-#define TTC3 42
-#define GEM_TSU 43
-#define GEM_TSU_LB 44
-#define MUXED_IRO_DIV2 45
-#define MUXED_IRO_DIV4 46
-#define PSM_REF 47
-#define GEM0_RX 48
-#define GEM0_TX 49
-#define GEM1_RX 50
-#define GEM1_TX 51
-#define CPM_CORE_REF 52
-#define CPM_LSBUS_REF 53
-#define CPM_DBG_REF 54
-#define CPM_AUX0_REF 55
-#define CPM_AUX1_REF 56
-#define QSPI_REF 57
-#define OSPI_REF 58
-#define SDIO0_REF 59
-#define SDIO1_REF 60
-#define PMC_LSBUS_REF 61
-#define I2C_REF 62
-#define TEST_PATTERN_REF 63
-#define DFT_OSC_REF 64
-#define PMC_PL0_REF 65
-#define PMC_PL1_REF 66
-#define PMC_PL2_REF 67
-#define PMC_PL3_REF 68
-#define CFU_REF 69
-#define SPARE_REF 70
-#define NPI_REF 71
-#define HSM0_REF 72
-#define HSM1_REF 73
-#define SD_DLL_REF 74
-#define FPD_TOP_SWITCH 75
-#define FPD_LSBUS 76
-#define ACPU 77
-#define DBG_TRACE 78
-#define DBG_FPD 79
-#define LPD_TOP_SWITCH 80
-#define ADMA 81
-#define LPD_LSBUS 82
-#define CPU_R5 83
-#define CPU_R5_CORE 84
-#define CPU_R5_OCM 85
-#define CPU_R5_OCM2 86
-#define IOU_SWITCH 87
-#define GEM0_REF 88
-#define GEM1_REF 89
-#define GEM_TSU_REF 90
-#define USB0_BUS_REF 91
-#define UART0_REF 92
-#define UART1_REF 93
-#define SPI0_REF 94
-#define SPI1_REF 95
-#define CAN0_REF 96
-#define CAN1_REF 97
-#define I2C0_REF 98
-#define I2C1_REF 99
-#define DBG_LPD 100
-#define TIMESTAMP_REF 101
-#define DBG_TSTMP 102
-#define CPM_TOPSW_REF 103
-#define USB3_DUAL_REF 104
-#define OUTCLK_MAX 105
-#define REF_CLK 106
-#define PL_ALT_REF_CLK 107
-#define MUXED_IRO 108
-#define PL_EXT 109
-#define PL_LB 110
-#define MIO_50_OR_51 111
-#define MIO_24_OR_25 112
-
-#endif
diff --git a/include/dt-bindings/clock/xlnx-zynqmp-clk.h b/include/dt-bindings/clock/xlnx-zynqmp-clk.h
deleted file mode 100644
index f0f7ddd3dcbd..000000000000
--- a/include/dt-bindings/clock/xlnx-zynqmp-clk.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Xilinx Zynq MPSoC Firmware layer
- *
- * Copyright (C) 2014-2018 Xilinx, Inc.
- *
- */
-
-#ifndef _DT_BINDINGS_CLK_ZYNQMP_H
-#define _DT_BINDINGS_CLK_ZYNQMP_H
-
-/*
- * These bindings are deprecated, because they do not match the actual
- * concept of bindings but rather contain pure firmware values.
- * Instead include the header in the DTS source directory.
- */
-#warning "These bindings are deprecated. Instead use the header in the DTS source directory."
-
-#define IOPLL 0
-#define RPLL 1
-#define APLL 2
-#define DPLL 3
-#define VPLL 4
-#define IOPLL_TO_FPD 5
-#define RPLL_TO_FPD 6
-#define APLL_TO_LPD 7
-#define DPLL_TO_LPD 8
-#define VPLL_TO_LPD 9
-#define ACPU 10
-#define ACPU_HALF 11
-#define DBF_FPD 12
-#define DBF_LPD 13
-#define DBG_TRACE 14
-#define DBG_TSTMP 15
-#define DP_VIDEO_REF 16
-#define DP_AUDIO_REF 17
-#define DP_STC_REF 18
-#define GDMA_REF 19
-#define DPDMA_REF 20
-#define DDR_REF 21
-#define SATA_REF 22
-#define PCIE_REF 23
-#define GPU_REF 24
-#define GPU_PP0_REF 25
-#define GPU_PP1_REF 26
-#define TOPSW_MAIN 27
-#define TOPSW_LSBUS 28
-#define GTGREF0_REF 29
-#define LPD_SWITCH 30
-#define LPD_LSBUS 31
-#define USB0_BUS_REF 32
-#define USB1_BUS_REF 33
-#define USB3_DUAL_REF 34
-#define USB0 35
-#define USB1 36
-#define CPU_R5 37
-#define CPU_R5_CORE 38
-#define CSU_SPB 39
-#define CSU_PLL 40
-#define PCAP 41
-#define IOU_SWITCH 42
-#define GEM_TSU_REF 43
-#define GEM_TSU 44
-#define GEM0_TX 45
-#define GEM1_TX 46
-#define GEM2_TX 47
-#define GEM3_TX 48
-#define GEM0_RX 49
-#define GEM1_RX 50
-#define GEM2_RX 51
-#define GEM3_RX 52
-#define QSPI_REF 53
-#define SDIO0_REF 54
-#define SDIO1_REF 55
-#define UART0_REF 56
-#define UART1_REF 57
-#define SPI0_REF 58
-#define SPI1_REF 59
-#define NAND_REF 60
-#define I2C0_REF 61
-#define I2C1_REF 62
-#define CAN0_REF 63
-#define CAN1_REF 64
-#define CAN0 65
-#define CAN1 66
-#define DLL_REF 67
-#define ADMA_REF 68
-#define TIMESTAMP_REF 69
-#define AMS_REF 70
-#define PL0_REF 71
-#define PL1_REF 72
-#define PL2_REF 73
-#define PL3_REF 74
-#define WDT 75
-#define IOPLL_INT 76
-#define IOPLL_PRE_SRC 77
-#define IOPLL_HALF 78
-#define IOPLL_INT_MUX 79
-#define IOPLL_POST_SRC 80
-#define RPLL_INT 81
-#define RPLL_PRE_SRC 82
-#define RPLL_HALF 83
-#define RPLL_INT_MUX 84
-#define RPLL_POST_SRC 85
-#define APLL_INT 86
-#define APLL_PRE_SRC 87
-#define APLL_HALF 88
-#define APLL_INT_MUX 89
-#define APLL_POST_SRC 90
-#define DPLL_INT 91
-#define DPLL_PRE_SRC 92
-#define DPLL_HALF 93
-#define DPLL_INT_MUX 94
-#define DPLL_POST_SRC 95
-#define VPLL_INT 96
-#define VPLL_PRE_SRC 97
-#define VPLL_HALF 98
-#define VPLL_INT_MUX 99
-#define VPLL_POST_SRC 100
-#define CAN0_MIO 101
-#define CAN1_MIO 102
-#define ACPU_FULL 103
-#define GEM0_REF 104
-#define GEM1_REF 105
-#define GEM2_REF 106
-#define GEM3_REF 107
-#define GEM0_REF_UNG 108
-#define GEM1_REF_UNG 109
-#define GEM2_REF_UNG 110
-#define GEM3_REF_UNG 111
-#define LPD_WDT 112
-
-#endif
diff --git a/include/dt-bindings/dma/jz4775-dma.h b/include/dt-bindings/dma/jz4775-dma.h
deleted file mode 100644
index 8d27e2c69dca..000000000000
--- a/include/dt-bindings/dma/jz4775-dma.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * This header provides macros for JZ4775 DMA bindings.
- *
- * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
- */
-
-#ifndef __DT_BINDINGS_DMA_JZ4775_DMA_H__
-#define __DT_BINDINGS_DMA_JZ4775_DMA_H__
-
-/*
- * Request type numbers for the JZ4775 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define JZ4775_DMA_I2S0_TX 0x6
-#define JZ4775_DMA_I2S0_RX 0x7
-#define JZ4775_DMA_AUTO 0x8
-#define JZ4775_DMA_SADC_RX 0x9
-#define JZ4775_DMA_UART3_TX 0x0e
-#define JZ4775_DMA_UART3_RX 0x0f
-#define JZ4775_DMA_UART2_TX 0x10
-#define JZ4775_DMA_UART2_RX 0x11
-#define JZ4775_DMA_UART1_TX 0x12
-#define JZ4775_DMA_UART1_RX 0x13
-#define JZ4775_DMA_UART0_TX 0x14
-#define JZ4775_DMA_UART0_RX 0x15
-#define JZ4775_DMA_SSI0_TX 0x16
-#define JZ4775_DMA_SSI0_RX 0x17
-#define JZ4775_DMA_MSC0_TX 0x1a
-#define JZ4775_DMA_MSC0_RX 0x1b
-#define JZ4775_DMA_MSC1_TX 0x1c
-#define JZ4775_DMA_MSC1_RX 0x1d
-#define JZ4775_DMA_MSC2_TX 0x1e
-#define JZ4775_DMA_MSC2_RX 0x1f
-#define JZ4775_DMA_PCM0_TX 0x20
-#define JZ4775_DMA_PCM0_RX 0x21
-#define JZ4775_DMA_SMB0_TX 0x24
-#define JZ4775_DMA_SMB0_RX 0x25
-#define JZ4775_DMA_SMB1_TX 0x26
-#define JZ4775_DMA_SMB1_RX 0x27
-#define JZ4775_DMA_SMB2_TX 0x28
-#define JZ4775_DMA_SMB2_RX 0x29
-
-#endif /* __DT_BINDINGS_DMA_JZ4775_DMA_H__ */
diff --git a/include/dt-bindings/dma/x2000-dma.h b/include/dt-bindings/dma/x2000-dma.h
deleted file mode 100644
index db2cd4830b00..000000000000
--- a/include/dt-bindings/dma/x2000-dma.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * This header provides macros for X2000 DMA bindings.
- *
- * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
- */
-
-#ifndef __DT_BINDINGS_DMA_X2000_DMA_H__
-#define __DT_BINDINGS_DMA_X2000_DMA_H__
-
-/*
- * Request type numbers for the X2000 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define X2000_DMA_AUTO 0x8
-#define X2000_DMA_UART5_TX 0xa
-#define X2000_DMA_UART5_RX 0xb
-#define X2000_DMA_UART4_TX 0xc
-#define X2000_DMA_UART4_RX 0xd
-#define X2000_DMA_UART3_TX 0xe
-#define X2000_DMA_UART3_RX 0xf
-#define X2000_DMA_UART2_TX 0x10
-#define X2000_DMA_UART2_RX 0x11
-#define X2000_DMA_UART1_TX 0x12
-#define X2000_DMA_UART1_RX 0x13
-#define X2000_DMA_UART0_TX 0x14
-#define X2000_DMA_UART0_RX 0x15
-#define X2000_DMA_SSI0_TX 0x16
-#define X2000_DMA_SSI0_RX 0x17
-#define X2000_DMA_SSI1_TX 0x18
-#define X2000_DMA_SSI1_RX 0x19
-#define X2000_DMA_I2C0_TX 0x24
-#define X2000_DMA_I2C0_RX 0x25
-#define X2000_DMA_I2C1_TX 0x26
-#define X2000_DMA_I2C1_RX 0x27
-#define X2000_DMA_I2C2_TX 0x28
-#define X2000_DMA_I2C2_RX 0x29
-#define X2000_DMA_I2C3_TX 0x2a
-#define X2000_DMA_I2C3_RX 0x2b
-#define X2000_DMA_I2C4_TX 0x2c
-#define X2000_DMA_I2C4_RX 0x2d
-#define X2000_DMA_I2C5_TX 0x2e
-#define X2000_DMA_I2C5_RX 0x2f
-#define X2000_DMA_UART6_TX 0x30
-#define X2000_DMA_UART6_RX 0x31
-#define X2000_DMA_UART7_TX 0x32
-#define X2000_DMA_UART7_RX 0x33
-#define X2000_DMA_UART8_TX 0x34
-#define X2000_DMA_UART8_RX 0x35
-#define X2000_DMA_UART9_TX 0x36
-#define X2000_DMA_UART9_RX 0x37
-#define X2000_DMA_SADC_RX 0x38
-
-#endif /* __DT_BINDINGS_DMA_X2000_DMA_H__ */
diff --git a/include/dt-bindings/gce/mt6779-gce.h b/include/dt-bindings/gce/mt6779-gce.h
deleted file mode 100644
index 06101316ace4..000000000000
--- a/include/dt-bindings/gce/mt6779-gce.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 MediaTek Inc.
- * Author: Dennis-YC Hsieh <dennis-yc.hsieh@mediatek.com>
- */
-
-#ifndef _DT_BINDINGS_GCE_MT6779_H
-#define _DT_BINDINGS_GCE_MT6779_H
-
-#define CMDQ_NO_TIMEOUT 0xffffffff
-
-/* GCE HW thread priority */
-#define CMDQ_THR_PRIO_LOWEST 0
-#define CMDQ_THR_PRIO_1 1
-#define CMDQ_THR_PRIO_2 2
-#define CMDQ_THR_PRIO_3 3
-#define CMDQ_THR_PRIO_4 4
-#define CMDQ_THR_PRIO_5 5
-#define CMDQ_THR_PRIO_6 6
-#define CMDQ_THR_PRIO_HIGHEST 7
-
-/* GCE subsys table */
-#define SUBSYS_1300XXXX 0
-#define SUBSYS_1400XXXX 1
-#define SUBSYS_1401XXXX 2
-#define SUBSYS_1402XXXX 3
-#define SUBSYS_1502XXXX 4
-#define SUBSYS_1880XXXX 5
-#define SUBSYS_1881XXXX 6
-#define SUBSYS_1882XXXX 7
-#define SUBSYS_1883XXXX 8
-#define SUBSYS_1884XXXX 9
-#define SUBSYS_1000XXXX 10
-#define SUBSYS_1001XXXX 11
-#define SUBSYS_1002XXXX 12
-#define SUBSYS_1003XXXX 13
-#define SUBSYS_1004XXXX 14
-#define SUBSYS_1005XXXX 15
-#define SUBSYS_1020XXXX 16
-#define SUBSYS_1028XXXX 17
-#define SUBSYS_1700XXXX 18
-#define SUBSYS_1701XXXX 19
-#define SUBSYS_1702XXXX 20
-#define SUBSYS_1703XXXX 21
-#define SUBSYS_1800XXXX 22
-#define SUBSYS_1801XXXX 23
-#define SUBSYS_1802XXXX 24
-#define SUBSYS_1804XXXX 25
-#define SUBSYS_1805XXXX 26
-#define SUBSYS_1808XXXX 27
-#define SUBSYS_180aXXXX 28
-#define SUBSYS_180bXXXX 29
-#define CMDQ_SUBSYS_OFF 32
-
-/* GCE hardware events */
-#define CMDQ_EVENT_DISP_RDMA0_SOF 0
-#define CMDQ_EVENT_DISP_RDMA1_SOF 1
-#define CMDQ_EVENT_MDP_RDMA0_SOF 2
-#define CMDQ_EVENT_MDP_RDMA1_SOF 3
-#define CMDQ_EVENT_MDP_RSZ0_SOF 4
-#define CMDQ_EVENT_MDP_RSZ1_SOF 5
-#define CMDQ_EVENT_MDP_TDSHP_SOF 6
-#define CMDQ_EVENT_MDP_WROT0_SOF 7
-#define CMDQ_EVENT_MDP_WROT1_SOF 8
-#define CMDQ_EVENT_DISP_OVL0_SOF 9
-#define CMDQ_EVENT_DISP_2L_OVL0_SOF 10
-#define CMDQ_EVENT_DISP_2L_OVL1_SOF 11
-#define CMDQ_EVENT_DISP_WDMA0_SOF 12
-#define CMDQ_EVENT_DISP_COLOR0_SOF 13
-#define CMDQ_EVENT_DISP_CCORR0_SOF 14
-#define CMDQ_EVENT_DISP_AAL0_SOF 15
-#define CMDQ_EVENT_DISP_GAMMA0_SOF 16
-#define CMDQ_EVENT_DISP_DITHER0_SOF 17
-#define CMDQ_EVENT_DISP_PWM0_SOF 18
-#define CMDQ_EVENT_DISP_DSI0_SOF 19
-#define CMDQ_EVENT_DISP_DPI0_SOF 20
-#define CMDQ_EVENT_DISP_POSTMASK0_SOF 21
-#define CMDQ_EVENT_DISP_RSZ0_SOF 22
-#define CMDQ_EVENT_MDP_AAL_SOF 23
-#define CMDQ_EVENT_MDP_CCORR_SOF 24
-#define CMDQ_EVENT_DISP_DBI0_SOF 25
-#define CMDQ_EVENT_ISP_RELAY_SOF 26
-#define CMDQ_EVENT_IPU_RELAY_SOF 27
-#define CMDQ_EVENT_DISP_RDMA0_EOF 28
-#define CMDQ_EVENT_DISP_RDMA1_EOF 29
-#define CMDQ_EVENT_MDP_RDMA0_EOF 30
-#define CMDQ_EVENT_MDP_RDMA1_EOF 31
-#define CMDQ_EVENT_MDP_RSZ0_EOF 32
-#define CMDQ_EVENT_MDP_RSZ1_EOF 33
-#define CMDQ_EVENT_MDP_TDSHP_EOF 34
-#define CMDQ_EVENT_MDP_WROT0_W_EOF 35
-#define CMDQ_EVENT_MDP_WROT1_W_EOF 36
-#define CMDQ_EVENT_DISP_OVL0_EOF 37
-#define CMDQ_EVENT_DISP_2L_OVL0_EOF 38
-#define CMDQ_EVENT_DISP_2L_OVL1_EOF 39
-#define CMDQ_EVENT_DISP_WDMA0_EOF 40
-#define CMDQ_EVENT_DISP_COLOR0_EOF 41
-#define CMDQ_EVENT_DISP_CCORR0_EOF 42
-#define CMDQ_EVENT_DISP_AAL0_EOF 43
-#define CMDQ_EVENT_DISP_GAMMA0_EOF 44
-#define CMDQ_EVENT_DISP_DITHER0_EOF 45
-#define CMDQ_EVENT_DISP_DSI0_EOF 46
-#define CMDQ_EVENT_DISP_DPI0_EOF 47
-#define CMDQ_EVENT_DISP_RSZ0_EOF 49
-#define CMDQ_EVENT_MDP_AAL_FRAME_DONE 50
-#define CMDQ_EVENT_MDP_CCORR_FRAME_DONE 51
-#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 52
-#define CMDQ_EVENT_MUTEX0_STREAM_EOF 130
-#define CMDQ_EVENT_MUTEX1_STREAM_EOF 131
-#define CMDQ_EVENT_MUTEX2_STREAM_EOF 132
-#define CMDQ_EVENT_MUTEX3_STREAM_EOF 133
-#define CMDQ_EVENT_MUTEX4_STREAM_EOF 134
-#define CMDQ_EVENT_MUTEX5_STREAM_EOF 135
-#define CMDQ_EVENT_MUTEX6_STREAM_EOF 136
-#define CMDQ_EVENT_MUTEX7_STREAM_EOF 137
-#define CMDQ_EVENT_MUTEX8_STREAM_EOF 138
-#define CMDQ_EVENT_MUTEX9_STREAM_EOF 139
-#define CMDQ_EVENT_MUTEX10_STREAM_EOF 140
-#define CMDQ_EVENT_MUTEX11_STREAM_EOF 141
-#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 142
-#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 143
-#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 144
-#define CMDQ_EVENT_DISP_RDMA3_UNDERRUN 145
-#define CMDQ_EVENT_DSI0_TE 146
-#define CMDQ_EVENT_DSI0_IRQ_EVENT 147
-#define CMDQ_EVENT_DSI0_DONE_EVENT 148
-#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE 150
-#define CMDQ_EVENT_DISP_WDMA0_RST_DONE 151
-#define CMDQ_EVENT_MDP_WROT0_RST_DONE 153
-#define CMDQ_EVENT_MDP_RDMA0_RST_DONE 154
-#define CMDQ_EVENT_DISP_OVL0_RST_DONE 155
-#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE 156
-#define CMDQ_EVENT_DISP_OVL1_2L_RST_DONE 157
-#define CMDQ_EVENT_DIP_CQ_THREAD0_EOF 257
-#define CMDQ_EVENT_DIP_CQ_THREAD1_EOF 258
-#define CMDQ_EVENT_DIP_CQ_THREAD2_EOF 259
-#define CMDQ_EVENT_DIP_CQ_THREAD3_EOF 260
-#define CMDQ_EVENT_DIP_CQ_THREAD4_EOF 261
-#define CMDQ_EVENT_DIP_CQ_THREAD5_EOF 262
-#define CMDQ_EVENT_DIP_CQ_THREAD6_EOF 263
-#define CMDQ_EVENT_DIP_CQ_THREAD7_EOF 264
-#define CMDQ_EVENT_DIP_CQ_THREAD8_EOF 265
-#define CMDQ_EVENT_DIP_CQ_THREAD9_EOF 266
-#define CMDQ_EVENT_DIP_CQ_THREAD10_EOF 267
-#define CMDQ_EVENT_DIP_CQ_THREAD11_EOF 268
-#define CMDQ_EVENT_DIP_CQ_THREAD12_EOF 269
-#define CMDQ_EVENT_DIP_CQ_THREAD13_EOF 270
-#define CMDQ_EVENT_DIP_CQ_THREAD14_EOF 271
-#define CMDQ_EVENT_DIP_CQ_THREAD15_EOF 272
-#define CMDQ_EVENT_DIP_CQ_THREAD16_EOF 273
-#define CMDQ_EVENT_DIP_CQ_THREAD17_EOF 274
-#define CMDQ_EVENT_DIP_CQ_THREAD18_EOF 275
-#define CMDQ_EVENT_DIP_DMA_ERR_EVENT 276
-#define CMDQ_EVENT_AMD_FRAME_DONE 277
-#define CMDQ_EVENT_MFB_DONE 278
-#define CMDQ_EVENT_WPE_A_EOF 279
-#define CMDQ_EVENT_VENC_EOF 289
-#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 290
-#define CMDQ_EVENT_JPEG_ENC_EOF 291
-#define CMDQ_EVENT_VENC_MB_DONE 292
-#define CMDQ_EVENT_VENC_128BYTE_CNT_DONE 293
-#define CMDQ_EVENT_ISP_FRAME_DONE_A 321
-#define CMDQ_EVENT_ISP_FRAME_DONE_B 322
-#define CMDQ_EVENT_ISP_FRAME_DONE_C 323
-#define CMDQ_EVENT_ISP_CAMSV_0_PASS1_DONE 324
-#define CMDQ_EVENT_ISP_CAMSV_0_2_PASS1_DONE 325
-#define CMDQ_EVENT_ISP_CAMSV_1_PASS1_DONE 326
-#define CMDQ_EVENT_ISP_CAMSV_2_PASS1_DONE 327
-#define CMDQ_EVENT_ISP_CAMSV_3_PASS1_DONE 328
-#define CMDQ_EVENT_ISP_TSF_DONE 329
-#define CMDQ_EVENT_SENINF_0_FIFO_FULL 330
-#define CMDQ_EVENT_SENINF_1_FIFO_FULL 331
-#define CMDQ_EVENT_SENINF_2_FIFO_FULL 332
-#define CMDQ_EVENT_SENINF_3_FIFO_FULL 333
-#define CMDQ_EVENT_SENINF_4_FIFO_FULL 334
-#define CMDQ_EVENT_SENINF_5_FIFO_FULL 335
-#define CMDQ_EVENT_SENINF_6_FIFO_FULL 336
-#define CMDQ_EVENT_SENINF_7_FIFO_FULL 337
-#define CMDQ_EVENT_TG_OVRUN_A_INT_DLY 338
-#define CMDQ_EVENT_TG_OVRUN_B_INT_DLY 339
-#define CMDQ_EVENT_TG_OVRUN_C_INT 340
-#define CMDQ_EVENT_TG_GRABERR_A_INT_DLY 341
-#define CMDQ_EVENT_TG_GRABERR_B_INT_DLY 342
-#define CMDQ_EVENT_TG_GRABERR_C_INT 343
-#define CMDQ_EVENT_CQ_VR_SNAP_A_INT_DLY 344
-#define CMDQ_EVENT_CQ_VR_SNAP_B_INT_DLY 345
-#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 346
-#define CMDQ_EVENT_DMA_R1_ERROR_A_INT_DLY 347
-#define CMDQ_EVENT_DMA_R1_ERROR_B_INT_DLY 348
-#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 349
-#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_0 353
-#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_1 354
-#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_2 355
-#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_3 356
-#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_0 385
-#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_1 386
-#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_2 387
-#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_3 388
-#define CMDQ_EVENT_VDEC_EVENT_0 416
-#define CMDQ_EVENT_VDEC_EVENT_1 417
-#define CMDQ_EVENT_VDEC_EVENT_2 418
-#define CMDQ_EVENT_VDEC_EVENT_3 419
-#define CMDQ_EVENT_VDEC_EVENT_4 420
-#define CMDQ_EVENT_VDEC_EVENT_5 421
-#define CMDQ_EVENT_VDEC_EVENT_6 422
-#define CMDQ_EVENT_VDEC_EVENT_7 423
-#define CMDQ_EVENT_VDEC_EVENT_8 424
-#define CMDQ_EVENT_VDEC_EVENT_9 425
-#define CMDQ_EVENT_VDEC_EVENT_10 426
-#define CMDQ_EVENT_VDEC_EVENT_11 427
-#define CMDQ_EVENT_VDEC_EVENT_12 428
-#define CMDQ_EVENT_VDEC_EVENT_13 429
-#define CMDQ_EVENT_VDEC_EVENT_14 430
-#define CMDQ_EVENT_VDEC_EVENT_15 431
-#define CMDQ_EVENT_FDVT_DONE 449
-#define CMDQ_EVENT_FE_DONE 450
-#define CMDQ_EVENT_RSC_EOF 451
-#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 452
-#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 453
-#define CMDQ_EVENT_DSI0_TE_INFRA 898
-
-#endif
diff --git a/include/dt-bindings/gpio/nvidia,tegra264-gpio.h b/include/dt-bindings/gpio/nvidia,tegra264-gpio.h
new file mode 100644
index 000000000000..25fb66f9710a
--- /dev/null
+++ b/include/dt-bindings/gpio/nvidia,tegra264-gpio.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/* Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for binding nvidia,tegra264-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA264_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA264_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA264_MAIN_GPIO_PORT_T 0
+#define TEGRA264_MAIN_GPIO_PORT_U 1
+#define TEGRA264_MAIN_GPIO_PORT_V 2
+#define TEGRA264_MAIN_GPIO_PORT_W 3
+#define TEGRA264_MAIN_GPIO_PORT_AL 4
+#define TEGRA264_MAIN_GPIO_PORT_Y 5
+#define TEGRA264_MAIN_GPIO_PORT_Z 6
+#define TEGRA264_MAIN_GPIO_PORT_X 7
+#define TEGRA264_MAIN_GPIO_PORT_H 8
+#define TEGRA264_MAIN_GPIO_PORT_J 9
+#define TEGRA264_MAIN_GPIO_PORT_K 10
+#define TEGRA264_MAIN_GPIO_PORT_L 11
+#define TEGRA264_MAIN_GPIO_PORT_M 12
+#define TEGRA264_MAIN_GPIO_PORT_P 13
+#define TEGRA264_MAIN_GPIO_PORT_Q 14
+#define TEGRA264_MAIN_GPIO_PORT_R 15
+#define TEGRA264_MAIN_GPIO_PORT_S 16
+#define TEGRA264_MAIN_GPIO_PORT_F 17
+#define TEGRA264_MAIN_GPIO_PORT_G 18
+
+#define TEGRA264_MAIN_GPIO(port, offset) \
+ ((TEGRA264_MAIN_GPIO_PORT_##port * 8) + (offset))
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA264_AON_GPIO_PORT_AA 0
+#define TEGRA264_AON_GPIO_PORT_BB 1
+#define TEGRA264_AON_GPIO_PORT_CC 2
+#define TEGRA264_AON_GPIO_PORT_DD 3
+#define TEGRA264_AON_GPIO_PORT_EE 4
+
+#define TEGRA264_AON_GPIO(port, offset) \
+ ((TEGRA264_AON_GPIO_PORT_##port * 8) + (offset))
+
+#define TEGRA264_UPHY_GPIO_PORT_A 0
+#define TEGRA264_UPHY_GPIO_PORT_B 1
+#define TEGRA264_UPHY_GPIO_PORT_C 2
+#define TEGRA264_UPHY_GPIO_PORT_D 3
+#define TEGRA264_UPHY_GPIO_PORT_E 4
+
+#define TEGRA264_UPHY_GPIO(port, offset) \
+ ((TEGRA264_UPHY_GPIO_PORT_##port * 8) + (offset))
+
+#endif
diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h
deleted file mode 100644
index 3fb438a96e35..000000000000
--- a/include/dt-bindings/memory/mt6779-larb-port.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 MediaTek Inc.
- * Author: Chao Hao <chao.hao@mediatek.com>
- */
-
-#ifndef _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_
-#define _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_
-
-#include <dt-bindings/memory/mtk-memory-port.h>
-
-#define M4U_LARB0_ID 0
-#define M4U_LARB1_ID 1
-#define M4U_LARB2_ID 2
-#define M4U_LARB3_ID 3
-#define M4U_LARB4_ID 4
-#define M4U_LARB5_ID 5
-#define M4U_LARB6_ID 6
-#define M4U_LARB7_ID 7
-#define M4U_LARB8_ID 8
-#define M4U_LARB9_ID 9
-#define M4U_LARB10_ID 10
-#define M4U_LARB11_ID 11
-
-/* larb0 */
-#define M4U_PORT_DISP_POSTMASK0 MTK_M4U_ID(M4U_LARB0_ID, 0)
-#define M4U_PORT_DISP_OVL0_HDR MTK_M4U_ID(M4U_LARB0_ID, 1)
-#define M4U_PORT_DISP_OVL1_HDR MTK_M4U_ID(M4U_LARB0_ID, 2)
-#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 3)
-#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
-#define M4U_PORT_DISP_PVRIC0 MTK_M4U_ID(M4U_LARB0_ID, 5)
-#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6)
-#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 7)
-#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 8)
-
-/* larb1 */
-#define M4U_PORT_DISP_OVL0_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 0)
-#define M4U_PORT_DISP_OVL1_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 1)
-#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB1_ID, 2)
-#define M4U_PORT_DISP_OVL1_2L MTK_M4U_ID(M4U_LARB1_ID, 3)
-#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 4)
-#define M4U_PORT_MDP_PVRIC0 MTK_M4U_ID(M4U_LARB1_ID, 5)
-#define M4U_PORT_MDP_PVRIC1 MTK_M4U_ID(M4U_LARB1_ID, 6)
-#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB1_ID, 7)
-#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 8)
-#define M4U_PORT_MDP_WROT0_R MTK_M4U_ID(M4U_LARB1_ID, 9)
-#define M4U_PORT_MDP_WROT0_W MTK_M4U_ID(M4U_LARB1_ID, 10)
-#define M4U_PORT_MDP_WROT1_R MTK_M4U_ID(M4U_LARB1_ID, 11)
-#define M4U_PORT_MDP_WROT1_W MTK_M4U_ID(M4U_LARB1_ID, 12)
-#define M4U_PORT_DISP_FAKE1 MTK_M4U_ID(M4U_LARB1_ID, 13)
-
-/* larb2-VDEC */
-#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
-#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
-#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
-#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
-#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
-#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
-#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
-#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 7)
-#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB2_ID, 8)
-#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 9)
-#define M4U_PORT_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(M4U_LARB2_ID, 10)
-#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB2_ID, 11)
-
-/* larb3-VENC */
-#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
-#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
-#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
-#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
-#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
-#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
-#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 6)
-#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
-#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
-#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB3_ID, 9)
-#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
-#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
-#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 12)
-#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 13)
-#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 14)
-#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 15)
-#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 16)
-#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 17)
-#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 18)
-
-/* larb4-dummy */
-
-/* larb5-IMG */
-#define M4U_PORT_IMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 0)
-#define M4U_PORT_IMGBI_D1 MTK_M4U_ID(M4U_LARB5_ID, 1)
-#define M4U_PORT_DMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 2)
-#define M4U_PORT_DEPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 3)
-#define M4U_PORT_LCEI_D1 MTK_M4U_ID(M4U_LARB5_ID, 4)
-#define M4U_PORT_SMTI_D1 MTK_M4U_ID(M4U_LARB5_ID, 5)
-#define M4U_PORT_SMTO_D2 MTK_M4U_ID(M4U_LARB5_ID, 6)
-#define M4U_PORT_SMTO_D1 MTK_M4U_ID(M4U_LARB5_ID, 7)
-#define M4U_PORT_CRZO_D1 MTK_M4U_ID(M4U_LARB5_ID, 8)
-#define M4U_PORT_IMG3O_D1 MTK_M4U_ID(M4U_LARB5_ID, 9)
-#define M4U_PORT_VIPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 10)
-#define M4U_PORT_WPE_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 11)
-#define M4U_PORT_WPE_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 12)
-#define M4U_PORT_WPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 13)
-#define M4U_PORT_TIMGO_D1 MTK_M4U_ID(M4U_LARB5_ID, 14)
-#define M4U_PORT_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 15)
-#define M4U_PORT_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 16)
-#define M4U_PORT_MFB_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 17)
-#define M4U_PORT_MFB_RDMA3 MTK_M4U_ID(M4U_LARB5_ID, 18)
-#define M4U_PORT_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 19)
-#define M4U_PORT_RESERVE1 MTK_M4U_ID(M4U_LARB5_ID, 20)
-#define M4U_PORT_RESERVE2 MTK_M4U_ID(M4U_LARB5_ID, 21)
-#define M4U_PORT_RESERVE3 MTK_M4U_ID(M4U_LARB5_ID, 22)
-#define M4U_PORT_RESERVE4 MTK_M4U_ID(M4U_LARB5_ID, 23)
-#define M4U_PORT_RESERVE5 MTK_M4U_ID(M4U_LARB5_ID, 24)
-#define M4U_PORT_RESERVE6 MTK_M4U_ID(M4U_LARB5_ID, 25)
-
-/* larb6-IMG-VPU */
-#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB6_ID, 0)
-#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB6_ID, 1)
-#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB6_ID, 2)
-
-/* larb7-DVS */
-#define M4U_PORT_DVS_RDMA MTK_M4U_ID(M4U_LARB7_ID, 0)
-#define M4U_PORT_DVS_WDMA MTK_M4U_ID(M4U_LARB7_ID, 1)
-#define M4U_PORT_DVP_RDMA MTK_M4U_ID(M4U_LARB7_ID, 2)
-#define M4U_PORT_DVP_WDMA MTK_M4U_ID(M4U_LARB7_ID, 3)
-
-/* larb8-IPESYS */
-#define M4U_PORT_FDVT_RDA MTK_M4U_ID(M4U_LARB8_ID, 0)
-#define M4U_PORT_FDVT_RDB MTK_M4U_ID(M4U_LARB8_ID, 1)
-#define M4U_PORT_FDVT_WRA MTK_M4U_ID(M4U_LARB8_ID, 2)
-#define M4U_PORT_FDVT_WRB MTK_M4U_ID(M4U_LARB8_ID, 3)
-#define M4U_PORT_FE_RD0 MTK_M4U_ID(M4U_LARB8_ID, 4)
-#define M4U_PORT_FE_RD1 MTK_M4U_ID(M4U_LARB8_ID, 5)
-#define M4U_PORT_FE_WR0 MTK_M4U_ID(M4U_LARB8_ID, 6)
-#define M4U_PORT_FE_WR1 MTK_M4U_ID(M4U_LARB8_ID, 7)
-#define M4U_PORT_RSC_RDMA0 MTK_M4U_ID(M4U_LARB8_ID, 8)
-#define M4U_PORT_RSC_WDMA MTK_M4U_ID(M4U_LARB8_ID, 9)
-
-/* larb9-CAM */
-#define M4U_PORT_CAM_IMGO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 0)
-#define M4U_PORT_CAM_RRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 1)
-#define M4U_PORT_CAM_LSCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 2)
-#define M4U_PORT_CAM_BPCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 3)
-#define M4U_PORT_CAM_YUVO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 4)
-#define M4U_PORT_CAM_UFDI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 5)
-#define M4U_PORT_CAM_RAWI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 6)
-#define M4U_PORT_CAM_RAWI_R5_C MTK_M4U_ID(M4U_LARB9_ID, 7)
-#define M4U_PORT_CAM_CAMSV_1 MTK_M4U_ID(M4U_LARB9_ID, 8)
-#define M4U_PORT_CAM_CAMSV_2 MTK_M4U_ID(M4U_LARB9_ID, 9)
-#define M4U_PORT_CAM_CAMSV_3 MTK_M4U_ID(M4U_LARB9_ID, 10)
-#define M4U_PORT_CAM_CAMSV_4 MTK_M4U_ID(M4U_LARB9_ID, 11)
-#define M4U_PORT_CAM_CAMSV_5 MTK_M4U_ID(M4U_LARB9_ID, 12)
-#define M4U_PORT_CAM_CAMSV_6 MTK_M4U_ID(M4U_LARB9_ID, 13)
-#define M4U_PORT_CAM_AAO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 14)
-#define M4U_PORT_CAM_AFO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 15)
-#define M4U_PORT_CAM_FLKO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 16)
-#define M4U_PORT_CAM_LCESO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 17)
-#define M4U_PORT_CAM_CRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 18)
-#define M4U_PORT_CAM_LTMSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 19)
-#define M4U_PORT_CAM_RSSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 20)
-#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB9_ID, 21)
-#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB9_ID, 22)
-#define M4U_PORT_CAM_FAKE MTK_M4U_ID(M4U_LARB9_ID, 23)
-
-/* larb10-CAM_A */
-#define M4U_PORT_CAM_IMGO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 0)
-#define M4U_PORT_CAM_RRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 1)
-#define M4U_PORT_CAM_LSCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 2)
-#define M4U_PORT_CAM_BPCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 3)
-#define M4U_PORT_CAM_YUVO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 4)
-#define M4U_PORT_CAM_UFDI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 5)
-#define M4U_PORT_CAM_RAWI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 6)
-#define M4U_PORT_CAM_RAWI_R5_A MTK_M4U_ID(M4U_LARB10_ID, 7)
-#define M4U_PORT_CAM_IMGO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 8)
-#define M4U_PORT_CAM_RRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 9)
-#define M4U_PORT_CAM_LSCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 10)
-#define M4U_PORT_CAM_BPCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 11)
-#define M4U_PORT_CAM_YUVO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 12)
-#define M4U_PORT_CAM_UFDI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 13)
-#define M4U_PORT_CAM_RAWI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 14)
-#define M4U_PORT_CAM_RAWI_R5_B MTK_M4U_ID(M4U_LARB10_ID, 15)
-#define M4U_PORT_CAM_CAMSV_0 MTK_M4U_ID(M4U_LARB10_ID, 16)
-#define M4U_PORT_CAM_AAO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 17)
-#define M4U_PORT_CAM_AFO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 18)
-#define M4U_PORT_CAM_FLKO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 19)
-#define M4U_PORT_CAM_LCESO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 20)
-#define M4U_PORT_CAM_CRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 21)
-#define M4U_PORT_CAM_AAO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 22)
-#define M4U_PORT_CAM_AFO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 23)
-#define M4U_PORT_CAM_FLKO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 24)
-#define M4U_PORT_CAM_LCESO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 25)
-#define M4U_PORT_CAM_CRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 26)
-#define M4U_PORT_CAM_LTMSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 27)
-#define M4U_PORT_CAM_RSSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 28)
-#define M4U_PORT_CAM_LTMSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 29)
-#define M4U_PORT_CAM_RSSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 30)
-
-/* larb11-CAM-VPU */
-#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB11_ID, 0)
-#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB11_ID, 1)
-#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB11_ID, 2)
-#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB11_ID, 3)
-#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB11_ID, 4)
-
-#endif
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
deleted file mode 100644
index b0b1091aad6d..000000000000
--- a/include/dt-bindings/mux/ti-serdes.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for SERDES MUX for TI SoCs
- */
-
-#ifndef _DT_BINDINGS_MUX_TI_SERDES
-#define _DT_BINDINGS_MUX_TI_SERDES
-
-/*
- * These bindings are deprecated, because they do not match the actual
- * concept of bindings but rather contain pure constants values used only
- * in DTS board files.
- * Instead include the header in the DTS source directory.
- */
-#warning "These bindings are deprecated. Instead, use the header in the DTS source directory."
-
-/* J721E */
-
-#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
-#define J721E_SERDES0_LANE0_PCIE0_LANE0 0x1
-#define J721E_SERDES0_LANE0_USB3_0_SWAP 0x2
-#define J721E_SERDES0_LANE0_IP4_UNUSED 0x3
-
-#define J721E_SERDES0_LANE1_QSGMII_LANE2 0x0
-#define J721E_SERDES0_LANE1_PCIE0_LANE1 0x1
-#define J721E_SERDES0_LANE1_USB3_0 0x2
-#define J721E_SERDES0_LANE1_IP4_UNUSED 0x3
-
-#define J721E_SERDES1_LANE0_QSGMII_LANE3 0x0
-#define J721E_SERDES1_LANE0_PCIE1_LANE0 0x1
-#define J721E_SERDES1_LANE0_USB3_1_SWAP 0x2
-#define J721E_SERDES1_LANE0_SGMII_LANE0 0x3
-
-#define J721E_SERDES1_LANE1_QSGMII_LANE4 0x0
-#define J721E_SERDES1_LANE1_PCIE1_LANE1 0x1
-#define J721E_SERDES1_LANE1_USB3_1 0x2
-#define J721E_SERDES1_LANE1_SGMII_LANE1 0x3
-
-#define J721E_SERDES2_LANE0_IP1_UNUSED 0x0
-#define J721E_SERDES2_LANE0_PCIE2_LANE0 0x1
-#define J721E_SERDES2_LANE0_USB3_1_SWAP 0x2
-#define J721E_SERDES2_LANE0_SGMII_LANE0 0x3
-
-#define J721E_SERDES2_LANE1_IP1_UNUSED 0x0
-#define J721E_SERDES2_LANE1_PCIE2_LANE1 0x1
-#define J721E_SERDES2_LANE1_USB3_1 0x2
-#define J721E_SERDES2_LANE1_SGMII_LANE1 0x3
-
-#define J721E_SERDES3_LANE0_IP1_UNUSED 0x0
-#define J721E_SERDES3_LANE0_PCIE3_LANE0 0x1
-#define J721E_SERDES3_LANE0_USB3_0_SWAP 0x2
-#define J721E_SERDES3_LANE0_IP4_UNUSED 0x3
-
-#define J721E_SERDES3_LANE1_IP1_UNUSED 0x0
-#define J721E_SERDES3_LANE1_PCIE3_LANE1 0x1
-#define J721E_SERDES3_LANE1_USB3_0 0x2
-#define J721E_SERDES3_LANE1_IP4_UNUSED 0x3
-
-#define J721E_SERDES4_LANE0_EDP_LANE0 0x0
-#define J721E_SERDES4_LANE0_IP2_UNUSED 0x1
-#define J721E_SERDES4_LANE0_QSGMII_LANE5 0x2
-#define J721E_SERDES4_LANE0_IP4_UNUSED 0x3
-
-#define J721E_SERDES4_LANE1_EDP_LANE1 0x0
-#define J721E_SERDES4_LANE1_IP2_UNUSED 0x1
-#define J721E_SERDES4_LANE1_QSGMII_LANE6 0x2
-#define J721E_SERDES4_LANE1_IP4_UNUSED 0x3
-
-#define J721E_SERDES4_LANE2_EDP_LANE2 0x0
-#define J721E_SERDES4_LANE2_IP2_UNUSED 0x1
-#define J721E_SERDES4_LANE2_QSGMII_LANE7 0x2
-#define J721E_SERDES4_LANE2_IP4_UNUSED 0x3
-
-#define J721E_SERDES4_LANE3_EDP_LANE3 0x0
-#define J721E_SERDES4_LANE3_IP2_UNUSED 0x1
-#define J721E_SERDES4_LANE3_QSGMII_LANE8 0x2
-#define J721E_SERDES4_LANE3_IP4_UNUSED 0x3
-
-/* J7200 */
-
-#define J7200_SERDES0_LANE0_QSGMII_LANE3 0x0
-#define J7200_SERDES0_LANE0_PCIE1_LANE0 0x1
-#define J7200_SERDES0_LANE0_IP3_UNUSED 0x2
-#define J7200_SERDES0_LANE0_IP4_UNUSED 0x3
-
-#define J7200_SERDES0_LANE1_QSGMII_LANE4 0x0
-#define J7200_SERDES0_LANE1_PCIE1_LANE1 0x1
-#define J7200_SERDES0_LANE1_IP3_UNUSED 0x2
-#define J7200_SERDES0_LANE1_IP4_UNUSED 0x3
-
-#define J7200_SERDES0_LANE2_QSGMII_LANE1 0x0
-#define J7200_SERDES0_LANE2_PCIE1_LANE2 0x1
-#define J7200_SERDES0_LANE2_IP3_UNUSED 0x2
-#define J7200_SERDES0_LANE2_IP4_UNUSED 0x3
-
-#define J7200_SERDES0_LANE3_QSGMII_LANE2 0x0
-#define J7200_SERDES0_LANE3_PCIE1_LANE3 0x1
-#define J7200_SERDES0_LANE3_USB 0x2
-#define J7200_SERDES0_LANE3_IP4_UNUSED 0x3
-
-/* AM64 */
-
-#define AM64_SERDES0_LANE0_PCIE0 0x0
-#define AM64_SERDES0_LANE0_USB 0x1
-
-/* J721S2 */
-
-#define J721S2_SERDES0_LANE0_EDP_LANE0 0x0
-#define J721S2_SERDES0_LANE0_PCIE1_LANE0 0x1
-#define J721S2_SERDES0_LANE0_IP3_UNUSED 0x2
-#define J721S2_SERDES0_LANE0_IP4_UNUSED 0x3
-
-#define J721S2_SERDES0_LANE1_EDP_LANE1 0x0
-#define J721S2_SERDES0_LANE1_PCIE1_LANE1 0x1
-#define J721S2_SERDES0_LANE1_USB 0x2
-#define J721S2_SERDES0_LANE1_IP4_UNUSED 0x3
-
-#define J721S2_SERDES0_LANE2_EDP_LANE2 0x0
-#define J721S2_SERDES0_LANE2_PCIE1_LANE2 0x1
-#define J721S2_SERDES0_LANE2_IP3_UNUSED 0x2
-#define J721S2_SERDES0_LANE2_IP4_UNUSED 0x3
-
-#define J721S2_SERDES0_LANE3_EDP_LANE3 0x0
-#define J721S2_SERDES0_LANE3_PCIE1_LANE3 0x1
-#define J721S2_SERDES0_LANE3_USB 0x2
-#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
-
-/* J784S4 */
-
-#define J784S4_SERDES0_LANE0_IP1_UNUSED 0x0
-#define J784S4_SERDES0_LANE0_PCIE1_LANE0 0x1
-#define J784S4_SERDES0_LANE0_IP3_UNUSED 0x2
-#define J784S4_SERDES0_LANE0_IP4_UNUSED 0x3
-
-#define J784S4_SERDES0_LANE1_IP1_UNUSED 0x0
-#define J784S4_SERDES0_LANE1_PCIE1_LANE1 0x1
-#define J784S4_SERDES0_LANE1_IP3_UNUSED 0x2
-#define J784S4_SERDES0_LANE1_IP4_UNUSED 0x3
-
-#define J784S4_SERDES0_LANE2_PCIE3_LANE0 0x0
-#define J784S4_SERDES0_LANE2_PCIE1_LANE2 0x1
-#define J784S4_SERDES0_LANE2_IP3_UNUSED 0x2
-#define J784S4_SERDES0_LANE2_IP4_UNUSED 0x3
-
-#define J784S4_SERDES0_LANE3_PCIE3_LANE1 0x0
-#define J784S4_SERDES0_LANE3_PCIE1_LANE3 0x1
-#define J784S4_SERDES0_LANE3_USB 0x2
-#define J784S4_SERDES0_LANE3_IP4_UNUSED 0x3
-
-#define J784S4_SERDES1_LANE0_QSGMII_LANE3 0x0
-#define J784S4_SERDES1_LANE0_PCIE0_LANE0 0x1
-#define J784S4_SERDES1_LANE0_IP3_UNUSED 0x2
-#define J784S4_SERDES1_LANE0_IP4_UNUSED 0x3
-
-#define J784S4_SERDES1_LANE1_QSGMII_LANE4 0x0
-#define J784S4_SERDES1_LANE1_PCIE0_LANE1 0x1
-#define J784S4_SERDES1_LANE1_IP3_UNUSED 0x2
-#define J784S4_SERDES1_LANE1_IP4_UNUSED 0x3
-
-#define J784S4_SERDES1_LANE2_QSGMII_LANE1 0x0
-#define J784S4_SERDES1_LANE2_PCIE0_LANE2 0x1
-#define J784S4_SERDES1_LANE2_PCIE2_LANE0 0x2
-#define J784S4_SERDES1_LANE2_IP4_UNUSED 0x3
-
-#define J784S4_SERDES1_LANE3_QSGMII_LANE2 0x0
-#define J784S4_SERDES1_LANE3_PCIE0_LANE3 0x1
-#define J784S4_SERDES1_LANE3_PCIE2_LANE1 0x2
-#define J784S4_SERDES1_LANE3_IP4_UNUSED 0x3
-
-#define J784S4_SERDES2_LANE0_QSGMII_LANE5 0x0
-#define J784S4_SERDES2_LANE0_IP2_UNUSED 0x1
-#define J784S4_SERDES2_LANE0_IP3_UNUSED 0x2
-#define J784S4_SERDES2_LANE0_IP4_UNUSED 0x3
-
-#define J784S4_SERDES2_LANE1_QSGMII_LANE6 0x0
-#define J784S4_SERDES2_LANE1_IP2_UNUSED 0x1
-#define J784S4_SERDES2_LANE1_IP3_UNUSED 0x2
-#define J784S4_SERDES2_LANE1_IP4_UNUSED 0x3
-
-#define J784S4_SERDES2_LANE2_QSGMII_LANE7 0x0
-#define J784S4_SERDES2_LANE2_QSGMII_LANE1 0x1
-#define J784S4_SERDES2_LANE2_IP3_UNUSED 0x2
-#define J784S4_SERDES2_LANE2_IP4_UNUSED 0x3
-
-#define J784S4_SERDES2_LANE3_QSGMII_LANE8 0x0
-#define J784S4_SERDES2_LANE3_QSGMII_LANE2 0x1
-#define J784S4_SERDES2_LANE3_IP3_UNUSED 0x2
-#define J784S4_SERDES2_LANE3_IP4_UNUSED 0x3
-
-#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 6b901b342348..f8d4094f0880 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -24,4 +24,8 @@
#define PHY_TYPE_CPHY 11
#define PHY_TYPE_USXGMII 12
+#define PHY_POL_NORMAL 0
+#define PHY_POL_INVERT 1
+#define PHY_POL_AUTO 2
+
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
deleted file mode 100644
index f393fbd68905..000000000000
--- a/include/dt-bindings/pinctrl/mt6397-pinfunc.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DTS_MT6397_PINFUNC_H
-#define __DTS_MT6397_PINFUNC_H
-
-#include <dt-bindings/pinctrl/mt65xx.h>
-
-#define MT6397_PIN_0_INT__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
-#define MT6397_PIN_0_INT__FUNC_INT (MTK_PIN_NO(0) | 1)
-
-#define MT6397_PIN_1_SRCVOLTEN__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
-#define MT6397_PIN_1_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(1) | 1)
-#define MT6397_PIN_1_SRCVOLTEN__FUNC_TEST_CK1 (MTK_PIN_NO(1) | 6)
-
-#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
-#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_SRCLKEN_PERI (MTK_PIN_NO(2) | 1)
-#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_TEST_CK2 (MTK_PIN_NO(2) | 6)
-
-#define MT6397_PIN_3_RTC_32K1V8__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
-#define MT6397_PIN_3_RTC_32K1V8__FUNC_RTC_32K1V8 (MTK_PIN_NO(3) | 1)
-#define MT6397_PIN_3_RTC_32K1V8__FUNC_TEST_CK3 (MTK_PIN_NO(3) | 6)
-
-#define MT6397_PIN_4_WRAP_EVENT__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
-#define MT6397_PIN_4_WRAP_EVENT__FUNC_WRAP_EVENT (MTK_PIN_NO(4) | 1)
-
-#define MT6397_PIN_5_SPI_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
-#define MT6397_PIN_5_SPI_CLK__FUNC_SPI_CLK (MTK_PIN_NO(5) | 1)
-
-#define MT6397_PIN_6_SPI_CSN__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
-#define MT6397_PIN_6_SPI_CSN__FUNC_SPI_CSN (MTK_PIN_NO(6) | 1)
-
-#define MT6397_PIN_7_SPI_MOSI__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
-#define MT6397_PIN_7_SPI_MOSI__FUNC_SPI_MOSI (MTK_PIN_NO(7) | 1)
-
-#define MT6397_PIN_8_SPI_MISO__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
-#define MT6397_PIN_8_SPI_MISO__FUNC_SPI_MISO (MTK_PIN_NO(8) | 1)
-
-#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
-#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(9) | 1)
-#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_IN0 (MTK_PIN_NO(9) | 6)
-#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_OUT0 (MTK_PIN_NO(9) | 7)
-
-#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
-#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_AUD_MISO (MTK_PIN_NO(10) | 1)
-#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_IN1 (MTK_PIN_NO(10) | 6)
-#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_OUT1 (MTK_PIN_NO(10) | 7)
-
-#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
-#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_AUD_MOSI (MTK_PIN_NO(11) | 1)
-#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_IN2 (MTK_PIN_NO(11) | 6)
-#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_OUT2 (MTK_PIN_NO(11) | 7)
-
-#define MT6397_PIN_12_COL0__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
-#define MT6397_PIN_12_COL0__FUNC_COL0_USBDL (MTK_PIN_NO(12) | 1)
-#define MT6397_PIN_12_COL0__FUNC_EINT10_1X (MTK_PIN_NO(12) | 2)
-#define MT6397_PIN_12_COL0__FUNC_PWM1_3X (MTK_PIN_NO(12) | 3)
-#define MT6397_PIN_12_COL0__FUNC_TEST_IN3 (MTK_PIN_NO(12) | 6)
-#define MT6397_PIN_12_COL0__FUNC_TEST_OUT3 (MTK_PIN_NO(12) | 7)
-
-#define MT6397_PIN_13_COL1__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
-#define MT6397_PIN_13_COL1__FUNC_COL1 (MTK_PIN_NO(13) | 1)
-#define MT6397_PIN_13_COL1__FUNC_EINT11_1X (MTK_PIN_NO(13) | 2)
-#define MT6397_PIN_13_COL1__FUNC_SCL0_2X (MTK_PIN_NO(13) | 3)
-#define MT6397_PIN_13_COL1__FUNC_TEST_IN4 (MTK_PIN_NO(13) | 6)
-#define MT6397_PIN_13_COL1__FUNC_TEST_OUT4 (MTK_PIN_NO(13) | 7)
-
-#define MT6397_PIN_14_COL2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
-#define MT6397_PIN_14_COL2__FUNC_COL2 (MTK_PIN_NO(14) | 1)
-#define MT6397_PIN_14_COL2__FUNC_EINT12_1X (MTK_PIN_NO(14) | 2)
-#define MT6397_PIN_14_COL2__FUNC_SDA0_2X (MTK_PIN_NO(14) | 3)
-#define MT6397_PIN_14_COL2__FUNC_TEST_IN5 (MTK_PIN_NO(14) | 6)
-#define MT6397_PIN_14_COL2__FUNC_TEST_OUT5 (MTK_PIN_NO(14) | 7)
-
-#define MT6397_PIN_15_COL3__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
-#define MT6397_PIN_15_COL3__FUNC_COL3 (MTK_PIN_NO(15) | 1)
-#define MT6397_PIN_15_COL3__FUNC_EINT13_1X (MTK_PIN_NO(15) | 2)
-#define MT6397_PIN_15_COL3__FUNC_SCL1_2X (MTK_PIN_NO(15) | 3)
-#define MT6397_PIN_15_COL3__FUNC_TEST_IN6 (MTK_PIN_NO(15) | 6)
-#define MT6397_PIN_15_COL3__FUNC_TEST_OUT6 (MTK_PIN_NO(15) | 7)
-
-#define MT6397_PIN_16_COL4__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
-#define MT6397_PIN_16_COL4__FUNC_COL4 (MTK_PIN_NO(16) | 1)
-#define MT6397_PIN_16_COL4__FUNC_EINT14_1X (MTK_PIN_NO(16) | 2)
-#define MT6397_PIN_16_COL4__FUNC_SDA1_2X (MTK_PIN_NO(16) | 3)
-#define MT6397_PIN_16_COL4__FUNC_TEST_IN7 (MTK_PIN_NO(16) | 6)
-#define MT6397_PIN_16_COL4__FUNC_TEST_OUT7 (MTK_PIN_NO(16) | 7)
-
-#define MT6397_PIN_17_COL5__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
-#define MT6397_PIN_17_COL5__FUNC_COL5 (MTK_PIN_NO(17) | 1)
-#define MT6397_PIN_17_COL5__FUNC_EINT15_1X (MTK_PIN_NO(17) | 2)
-#define MT6397_PIN_17_COL5__FUNC_SCL2_2X (MTK_PIN_NO(17) | 3)
-#define MT6397_PIN_17_COL5__FUNC_TEST_IN8 (MTK_PIN_NO(17) | 6)
-#define MT6397_PIN_17_COL5__FUNC_TEST_OUT8 (MTK_PIN_NO(17) | 7)
-
-#define MT6397_PIN_18_COL6__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
-#define MT6397_PIN_18_COL6__FUNC_COL6 (MTK_PIN_NO(18) | 1)
-#define MT6397_PIN_18_COL6__FUNC_EINT16_1X (MTK_PIN_NO(18) | 2)
-#define MT6397_PIN_18_COL6__FUNC_SDA2_2X (MTK_PIN_NO(18) | 3)
-#define MT6397_PIN_18_COL6__FUNC_GPIO32K_0 (MTK_PIN_NO(18) | 4)
-#define MT6397_PIN_18_COL6__FUNC_GPIO26M_0 (MTK_PIN_NO(18) | 5)
-#define MT6397_PIN_18_COL6__FUNC_TEST_IN9 (MTK_PIN_NO(18) | 6)
-#define MT6397_PIN_18_COL6__FUNC_TEST_OUT9 (MTK_PIN_NO(18) | 7)
-
-#define MT6397_PIN_19_COL7__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
-#define MT6397_PIN_19_COL7__FUNC_COL7 (MTK_PIN_NO(19) | 1)
-#define MT6397_PIN_19_COL7__FUNC_EINT17_1X (MTK_PIN_NO(19) | 2)
-#define MT6397_PIN_19_COL7__FUNC_PWM2_3X (MTK_PIN_NO(19) | 3)
-#define MT6397_PIN_19_COL7__FUNC_GPIO32K_1 (MTK_PIN_NO(19) | 4)
-#define MT6397_PIN_19_COL7__FUNC_GPIO26M_1 (MTK_PIN_NO(19) | 5)
-#define MT6397_PIN_19_COL7__FUNC_TEST_IN10 (MTK_PIN_NO(19) | 6)
-#define MT6397_PIN_19_COL7__FUNC_TEST_OUT10 (MTK_PIN_NO(19) | 7)
-
-#define MT6397_PIN_20_ROW0__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
-#define MT6397_PIN_20_ROW0__FUNC_ROW0 (MTK_PIN_NO(20) | 1)
-#define MT6397_PIN_20_ROW0__FUNC_EINT18_1X (MTK_PIN_NO(20) | 2)
-#define MT6397_PIN_20_ROW0__FUNC_SCL0_3X (MTK_PIN_NO(20) | 3)
-#define MT6397_PIN_20_ROW0__FUNC_TEST_IN11 (MTK_PIN_NO(20) | 6)
-#define MT6397_PIN_20_ROW0__FUNC_TEST_OUT11 (MTK_PIN_NO(20) | 7)
-
-#define MT6397_PIN_21_ROW1__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
-#define MT6397_PIN_21_ROW1__FUNC_ROW1 (MTK_PIN_NO(21) | 1)
-#define MT6397_PIN_21_ROW1__FUNC_EINT19_1X (MTK_PIN_NO(21) | 2)
-#define MT6397_PIN_21_ROW1__FUNC_SDA0_3X (MTK_PIN_NO(21) | 3)
-#define MT6397_PIN_21_ROW1__FUNC_AUD_TSTCK (MTK_PIN_NO(21) | 4)
-#define MT6397_PIN_21_ROW1__FUNC_TEST_IN12 (MTK_PIN_NO(21) | 6)
-#define MT6397_PIN_21_ROW1__FUNC_TEST_OUT12 (MTK_PIN_NO(21) | 7)
-
-#define MT6397_PIN_22_ROW2__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
-#define MT6397_PIN_22_ROW2__FUNC_ROW2 (MTK_PIN_NO(22) | 1)
-#define MT6397_PIN_22_ROW2__FUNC_EINT20_1X (MTK_PIN_NO(22) | 2)
-#define MT6397_PIN_22_ROW2__FUNC_SCL1_3X (MTK_PIN_NO(22) | 3)
-#define MT6397_PIN_22_ROW2__FUNC_TEST_IN13 (MTK_PIN_NO(22) | 6)
-#define MT6397_PIN_22_ROW2__FUNC_TEST_OUT13 (MTK_PIN_NO(22) | 7)
-
-#define MT6397_PIN_23_ROW3__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
-#define MT6397_PIN_23_ROW3__FUNC_ROW3 (MTK_PIN_NO(23) | 1)
-#define MT6397_PIN_23_ROW3__FUNC_EINT21_1X (MTK_PIN_NO(23) | 2)
-#define MT6397_PIN_23_ROW3__FUNC_SDA1_3X (MTK_PIN_NO(23) | 3)
-#define MT6397_PIN_23_ROW3__FUNC_TEST_IN14 (MTK_PIN_NO(23) | 6)
-#define MT6397_PIN_23_ROW3__FUNC_TEST_OUT14 (MTK_PIN_NO(23) | 7)
-
-#define MT6397_PIN_24_ROW4__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
-#define MT6397_PIN_24_ROW4__FUNC_ROW4 (MTK_PIN_NO(24) | 1)
-#define MT6397_PIN_24_ROW4__FUNC_EINT22_1X (MTK_PIN_NO(24) | 2)
-#define MT6397_PIN_24_ROW4__FUNC_SCL2_3X (MTK_PIN_NO(24) | 3)
-#define MT6397_PIN_24_ROW4__FUNC_TEST_IN15 (MTK_PIN_NO(24) | 6)
-#define MT6397_PIN_24_ROW4__FUNC_TEST_OUT15 (MTK_PIN_NO(24) | 7)
-
-#define MT6397_PIN_25_ROW5__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
-#define MT6397_PIN_25_ROW5__FUNC_ROW5 (MTK_PIN_NO(25) | 1)
-#define MT6397_PIN_25_ROW5__FUNC_EINT23_1X (MTK_PIN_NO(25) | 2)
-#define MT6397_PIN_25_ROW5__FUNC_SDA2_3X (MTK_PIN_NO(25) | 3)
-#define MT6397_PIN_25_ROW5__FUNC_TEST_IN16 (MTK_PIN_NO(25) | 6)
-#define MT6397_PIN_25_ROW5__FUNC_TEST_OUT16 (MTK_PIN_NO(25) | 7)
-
-#define MT6397_PIN_26_ROW6__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
-#define MT6397_PIN_26_ROW6__FUNC_ROW6 (MTK_PIN_NO(26) | 1)
-#define MT6397_PIN_26_ROW6__FUNC_EINT24_1X (MTK_PIN_NO(26) | 2)
-#define MT6397_PIN_26_ROW6__FUNC_PWM3_3X (MTK_PIN_NO(26) | 3)
-#define MT6397_PIN_26_ROW6__FUNC_GPIO32K_2 (MTK_PIN_NO(26) | 4)
-#define MT6397_PIN_26_ROW6__FUNC_GPIO26M_2 (MTK_PIN_NO(26) | 5)
-#define MT6397_PIN_26_ROW6__FUNC_TEST_IN17 (MTK_PIN_NO(26) | 6)
-#define MT6397_PIN_26_ROW6__FUNC_TEST_OUT17 (MTK_PIN_NO(26) | 7)
-
-#define MT6397_PIN_27_ROW7__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
-#define MT6397_PIN_27_ROW7__FUNC_ROW7 (MTK_PIN_NO(27) | 1)
-#define MT6397_PIN_27_ROW7__FUNC_EINT3_1X (MTK_PIN_NO(27) | 2)
-#define MT6397_PIN_27_ROW7__FUNC_CBUS (MTK_PIN_NO(27) | 3)
-#define MT6397_PIN_27_ROW7__FUNC_GPIO32K_3 (MTK_PIN_NO(27) | 4)
-#define MT6397_PIN_27_ROW7__FUNC_GPIO26M_3 (MTK_PIN_NO(27) | 5)
-#define MT6397_PIN_27_ROW7__FUNC_TEST_IN18 (MTK_PIN_NO(27) | 6)
-#define MT6397_PIN_27_ROW7__FUNC_TEST_OUT18 (MTK_PIN_NO(27) | 7)
-
-#define MT6397_PIN_28_PWM1__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
-#define MT6397_PIN_28_PWM1__FUNC_PWM1 (MTK_PIN_NO(28) | 1)
-#define MT6397_PIN_28_PWM1__FUNC_EINT4_1X (MTK_PIN_NO(28) | 2)
-#define MT6397_PIN_28_PWM1__FUNC_GPIO32K_4 (MTK_PIN_NO(28) | 4)
-#define MT6397_PIN_28_PWM1__FUNC_GPIO26M_4 (MTK_PIN_NO(28) | 5)
-#define MT6397_PIN_28_PWM1__FUNC_TEST_IN19 (MTK_PIN_NO(28) | 6)
-#define MT6397_PIN_28_PWM1__FUNC_TEST_OUT19 (MTK_PIN_NO(28) | 7)
-
-#define MT6397_PIN_29_PWM2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
-#define MT6397_PIN_29_PWM2__FUNC_PWM2 (MTK_PIN_NO(29) | 1)
-#define MT6397_PIN_29_PWM2__FUNC_EINT5_1X (MTK_PIN_NO(29) | 2)
-#define MT6397_PIN_29_PWM2__FUNC_GPIO32K_5 (MTK_PIN_NO(29) | 4)
-#define MT6397_PIN_29_PWM2__FUNC_GPIO26M_5 (MTK_PIN_NO(29) | 5)
-#define MT6397_PIN_29_PWM2__FUNC_TEST_IN20 (MTK_PIN_NO(29) | 6)
-#define MT6397_PIN_29_PWM2__FUNC_TEST_OUT20 (MTK_PIN_NO(29) | 7)
-
-#define MT6397_PIN_30_PWM3__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
-#define MT6397_PIN_30_PWM3__FUNC_PWM3 (MTK_PIN_NO(30) | 1)
-#define MT6397_PIN_30_PWM3__FUNC_EINT6_1X (MTK_PIN_NO(30) | 2)
-#define MT6397_PIN_30_PWM3__FUNC_COL0 (MTK_PIN_NO(30) | 3)
-#define MT6397_PIN_30_PWM3__FUNC_GPIO32K_6 (MTK_PIN_NO(30) | 4)
-#define MT6397_PIN_30_PWM3__FUNC_GPIO26M_6 (MTK_PIN_NO(30) | 5)
-#define MT6397_PIN_30_PWM3__FUNC_TEST_IN21 (MTK_PIN_NO(30) | 6)
-#define MT6397_PIN_30_PWM3__FUNC_TEST_OUT21 (MTK_PIN_NO(30) | 7)
-
-#define MT6397_PIN_31_SCL0__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
-#define MT6397_PIN_31_SCL0__FUNC_SCL0 (MTK_PIN_NO(31) | 1)
-#define MT6397_PIN_31_SCL0__FUNC_EINT7_1X (MTK_PIN_NO(31) | 2)
-#define MT6397_PIN_31_SCL0__FUNC_PWM1_2X (MTK_PIN_NO(31) | 3)
-#define MT6397_PIN_31_SCL0__FUNC_TEST_IN22 (MTK_PIN_NO(31) | 6)
-#define MT6397_PIN_31_SCL0__FUNC_TEST_OUT22 (MTK_PIN_NO(31) | 7)
-
-#define MT6397_PIN_32_SDA0__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
-#define MT6397_PIN_32_SDA0__FUNC_SDA0 (MTK_PIN_NO(32) | 1)
-#define MT6397_PIN_32_SDA0__FUNC_EINT8_1X (MTK_PIN_NO(32) | 2)
-#define MT6397_PIN_32_SDA0__FUNC_TEST_IN23 (MTK_PIN_NO(32) | 6)
-#define MT6397_PIN_32_SDA0__FUNC_TEST_OUT23 (MTK_PIN_NO(32) | 7)
-
-#define MT6397_PIN_33_SCL1__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
-#define MT6397_PIN_33_SCL1__FUNC_SCL1 (MTK_PIN_NO(33) | 1)
-#define MT6397_PIN_33_SCL1__FUNC_EINT9_1X (MTK_PIN_NO(33) | 2)
-#define MT6397_PIN_33_SCL1__FUNC_PWM2_2X (MTK_PIN_NO(33) | 3)
-#define MT6397_PIN_33_SCL1__FUNC_TEST_IN24 (MTK_PIN_NO(33) | 6)
-#define MT6397_PIN_33_SCL1__FUNC_TEST_OUT24 (MTK_PIN_NO(33) | 7)
-
-#define MT6397_PIN_34_SDA1__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
-#define MT6397_PIN_34_SDA1__FUNC_SDA1 (MTK_PIN_NO(34) | 1)
-#define MT6397_PIN_34_SDA1__FUNC_EINT0_1X (MTK_PIN_NO(34) | 2)
-#define MT6397_PIN_34_SDA1__FUNC_TEST_IN25 (MTK_PIN_NO(34) | 6)
-#define MT6397_PIN_34_SDA1__FUNC_TEST_OUT25 (MTK_PIN_NO(34) | 7)
-
-#define MT6397_PIN_35_SCL2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
-#define MT6397_PIN_35_SCL2__FUNC_SCL2 (MTK_PIN_NO(35) | 1)
-#define MT6397_PIN_35_SCL2__FUNC_EINT1_1X (MTK_PIN_NO(35) | 2)
-#define MT6397_PIN_35_SCL2__FUNC_PWM3_2X (MTK_PIN_NO(35) | 3)
-#define MT6397_PIN_35_SCL2__FUNC_TEST_IN26 (MTK_PIN_NO(35) | 6)
-#define MT6397_PIN_35_SCL2__FUNC_TEST_OUT26 (MTK_PIN_NO(35) | 7)
-
-#define MT6397_PIN_36_SDA2__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
-#define MT6397_PIN_36_SDA2__FUNC_SDA2 (MTK_PIN_NO(36) | 1)
-#define MT6397_PIN_36_SDA2__FUNC_EINT2_1X (MTK_PIN_NO(36) | 2)
-#define MT6397_PIN_36_SDA2__FUNC_TEST_IN27 (MTK_PIN_NO(36) | 6)
-#define MT6397_PIN_36_SDA2__FUNC_TEST_OUT27 (MTK_PIN_NO(36) | 7)
-
-#define MT6397_PIN_37_HDMISD__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
-#define MT6397_PIN_37_HDMISD__FUNC_HDMISD (MTK_PIN_NO(37) | 1)
-#define MT6397_PIN_37_HDMISD__FUNC_TEST_IN28 (MTK_PIN_NO(37) | 6)
-#define MT6397_PIN_37_HDMISD__FUNC_TEST_OUT28 (MTK_PIN_NO(37) | 7)
-
-#define MT6397_PIN_38_HDMISCK__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
-#define MT6397_PIN_38_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(38) | 1)
-#define MT6397_PIN_38_HDMISCK__FUNC_TEST_IN29 (MTK_PIN_NO(38) | 6)
-#define MT6397_PIN_38_HDMISCK__FUNC_TEST_OUT29 (MTK_PIN_NO(38) | 7)
-
-#define MT6397_PIN_39_HTPLG__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
-#define MT6397_PIN_39_HTPLG__FUNC_HTPLG (MTK_PIN_NO(39) | 1)
-#define MT6397_PIN_39_HTPLG__FUNC_TEST_IN30 (MTK_PIN_NO(39) | 6)
-#define MT6397_PIN_39_HTPLG__FUNC_TEST_OUT30 (MTK_PIN_NO(39) | 7)
-
-#define MT6397_PIN_40_CEC__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
-#define MT6397_PIN_40_CEC__FUNC_CEC (MTK_PIN_NO(40) | 1)
-#define MT6397_PIN_40_CEC__FUNC_TEST_IN31 (MTK_PIN_NO(40) | 6)
-#define MT6397_PIN_40_CEC__FUNC_TEST_OUT31 (MTK_PIN_NO(40) | 7)
-
-#endif /* __DTS_MT6397_PINFUNC_H */
diff --git a/include/dt-bindings/regulator/samsung,s2mpg10-regulator.h b/include/dt-bindings/regulator/samsung,s2mpg10-regulator.h
new file mode 100644
index 000000000000..d9c16bba4d85
--- /dev/null
+++ b/include/dt-bindings/regulator/samsung,s2mpg10-regulator.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2021 Google LLC
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Device Tree binding constants for the Samsung S2MPG1x PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_SAMSUNG_S2MPG10_H
+#define _DT_BINDINGS_REGULATOR_SAMSUNG_S2MPG10_H
+
+/*
+ * Several regulators may be controlled via external signals instead of via
+ * software. These constants describe the possible signals for such regulators
+ * and generally correspond to the respecitve on-chip pins.
+ *
+ * S2MPG10 regulators supporting these are:
+ * - buck1m .. buck7m buck10m
+ * - ldo3m .. ldo19m
+ *
+ * ldo20m supports external control, but using a different set of control
+ * signals.
+ *
+ * S2MPG11 regulators supporting these are:
+ * - buck1s .. buck3s buck5s buck8s buck9s bucka buckd
+ * - ldo1s ldo2s ldo8s ldo13s
+ */
+#define S2MPG10_EXTCTRL_PWREN 0 /* PWREN pin */
+#define S2MPG10_EXTCTRL_PWREN_MIF 1 /* PWREN_MIF pin */
+#define S2MPG10_EXTCTRL_AP_ACTIVE_N 2 /* ~AP_ACTIVE_N pin */
+#define S2MPG10_EXTCTRL_CPUCL1_EN 3 /* CPUCL1_EN pin */
+#define S2MPG10_EXTCTRL_CPUCL1_EN2 4 /* CPUCL1_EN & PWREN pins */
+#define S2MPG10_EXTCTRL_CPUCL2_EN 5 /* CPUCL2_EN pin */
+#define S2MPG10_EXTCTRL_CPUCL2_EN2 6 /* CPUCL2_E2 & PWREN pins */
+#define S2MPG10_EXTCTRL_TPU_EN 7 /* TPU_EN pin */
+#define S2MPG10_EXTCTRL_TPU_EN2 8 /* TPU_EN & ~AP_ACTIVE_N pins */
+#define S2MPG10_EXTCTRL_TCXO_ON 9 /* TCXO_ON pin */
+#define S2MPG10_EXTCTRL_TCXO_ON2 10 /* TCXO_ON & ~AP_ACTIVE_N pins */
+
+#define S2MPG10_EXTCTRL_LDO20M_EN2 11 /* VLDO20M_EN & LDO20M_SFR */
+#define S2MPG10_EXTCTRL_LDO20M_EN 12 /* VLDO20M_EN pin */
+
+#define S2MPG11_EXTCTRL_PWREN 0 /* PWREN pin */
+#define S2MPG11_EXTCTRL_PWREN_MIF 1 /* PWREN_MIF pin */
+#define S2MPG11_EXTCTRL_AP_ACTIVE_N 2 /* ~AP_ACTIVE_N pin */
+#define S2MPG11_EXTCTRL_G3D_EN 3 /* G3D_EN pin */
+#define S2MPG11_EXTCTRL_G3D_EN2 4 /* G3D_EN & ~AP_ACTIVE_N pins */
+#define S2MPG11_EXTCTRL_AOC_VDD 5 /* AOC_VDD pin */
+#define S2MPG11_EXTCTRL_AOC_RET 6 /* AOC_RET pin */
+#define S2MPG11_EXTCTRL_UFS_EN 7 /* UFS_EN pin */
+#define S2MPG11_EXTCTRL_LDO13S_EN 8 /* VLDO13S_EN pin */
+
+#endif /* _DT_BINDINGS_REGULATOR_SAMSUNG_S2MPG10_H */
diff --git a/include/dt-bindings/reset/bcm6318-reset.h b/include/dt-bindings/reset/bcm6318-reset.h
deleted file mode 100644
index f882662505ea..000000000000
--- a/include/dt-bindings/reset/bcm6318-reset.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-
-#ifndef __DT_BINDINGS_RESET_BCM6318_H
-#define __DT_BINDINGS_RESET_BCM6318_H
-
-#define BCM6318_RST_SPI 0
-#define BCM6318_RST_EPHY 1
-#define BCM6318_RST_SAR 2
-#define BCM6318_RST_ENETSW 3
-#define BCM6318_RST_USBD 4
-#define BCM6318_RST_USBH 5
-#define BCM6318_RST_PCIE_CORE 6
-#define BCM6318_RST_PCIE 7
-#define BCM6318_RST_PCIE_EXT 8
-#define BCM6318_RST_PCIE_HARD 9
-#define BCM6318_RST_ADSL 10
-#define BCM6318_RST_PHYMIPS 11
-#define BCM6318_RST_HOSTMIPS 12
-
-#endif /* __DT_BINDINGS_RESET_BCM6318_H */
diff --git a/include/dt-bindings/reset/imx8ulp-pcc-reset.h b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
deleted file mode 100644
index e99a4735c3c4..000000000000
--- a/include/dt-bindings/reset/imx8ulp-pcc-reset.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2021 NXP
- */
-
-#ifndef DT_BINDING_PCC_RESET_IMX8ULP_H
-#define DT_BINDING_PCC_RESET_IMX8ULP_H
-
-/* PCC3 */
-#define PCC3_WDOG3_SWRST 0
-#define PCC3_WDOG4_SWRST 1
-#define PCC3_LPIT1_SWRST 2
-#define PCC3_TPM4_SWRST 3
-#define PCC3_TPM5_SWRST 4
-#define PCC3_FLEXIO1_SWRST 5
-#define PCC3_I3C2_SWRST 6
-#define PCC3_LPI2C4_SWRST 7
-#define PCC3_LPI2C5_SWRST 8
-#define PCC3_LPUART4_SWRST 9
-#define PCC3_LPUART5_SWRST 10
-#define PCC3_LPSPI4_SWRST 11
-#define PCC3_LPSPI5_SWRST 12
-
-/* PCC4 */
-#define PCC4_FLEXSPI2_SWRST 0
-#define PCC4_TPM6_SWRST 1
-#define PCC4_TPM7_SWRST 2
-#define PCC4_LPI2C6_SWRST 3
-#define PCC4_LPI2C7_SWRST 4
-#define PCC4_LPUART6_SWRST 5
-#define PCC4_LPUART7_SWRST 6
-#define PCC4_SAI4_SWRST 7
-#define PCC4_SAI5_SWRST 8
-#define PCC4_USDHC0_SWRST 9
-#define PCC4_USDHC1_SWRST 10
-#define PCC4_USDHC2_SWRST 11
-#define PCC4_USB0_SWRST 12
-#define PCC4_USB0_PHY_SWRST 13
-#define PCC4_USB1_SWRST 14
-#define PCC4_USB1_PHY_SWRST 15
-#define PCC4_ENET_SWRST 16
-
-/* PCC5 */
-#define PCC5_TPM8_SWRST 0
-#define PCC5_SAI6_SWRST 1
-#define PCC5_SAI7_SWRST 2
-#define PCC5_SPDIF_SWRST 3
-#define PCC5_ISI_SWRST 4
-#define PCC5_CSI_REGS_SWRST 5
-#define PCC5_CSI_SWRST 6
-#define PCC5_DSI_SWRST 7
-#define PCC5_WDOG5_SWRST 8
-#define PCC5_EPDC_SWRST 9
-#define PCC5_PXP_SWRST 10
-#define PCC5_GPU2D_SWRST 11
-#define PCC5_GPU3D_SWRST 12
-#define PCC5_DC_NANO_SWRST 13
-
-#endif /*DT_BINDING_RESET_IMX8ULP_H */
diff --git a/include/dt-bindings/reset/oxsemi,ox810se.h b/include/dt-bindings/reset/oxsemi,ox810se.h
deleted file mode 100644
index e943187e6527..000000000000
--- a/include/dt-bindings/reset/oxsemi,ox810se.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef DT_RESET_OXSEMI_OX810SE_H
-#define DT_RESET_OXSEMI_OX810SE_H
-
-#define RESET_ARM 0
-#define RESET_COPRO 1
-/* Reserved 2 */
-/* Reserved 3 */
-#define RESET_USBHS 4
-#define RESET_USBHSPHY 5
-#define RESET_MAC 6
-#define RESET_PCI 7
-#define RESET_DMA 8
-#define RESET_DPE 9
-#define RESET_DDR 10
-#define RESET_SATA 11
-#define RESET_SATA_LINK 12
-#define RESET_SATA_PHY 13
- /* Reserved 14 */
-#define RESET_NAND 15
-#define RESET_GPIO 16
-#define RESET_UART1 17
-#define RESET_UART2 18
-#define RESET_MISC 19
-#define RESET_I2S 20
-#define RESET_AHB_MON 21
-#define RESET_UART3 22
-#define RESET_UART4 23
-#define RESET_SGDMA 24
-/* Reserved 25 */
-/* Reserved 26 */
-/* Reserved 27 */
-/* Reserved 28 */
-/* Reserved 29 */
-/* Reserved 30 */
-#define RESET_BUS 31
-
-#endif /* DT_RESET_OXSEMI_OX810SE_H */
diff --git a/include/dt-bindings/reset/oxsemi,ox820.h b/include/dt-bindings/reset/oxsemi,ox820.h
deleted file mode 100644
index 54b58e09c1c0..000000000000
--- a/include/dt-bindings/reset/oxsemi,ox820.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef DT_RESET_OXSEMI_OX820_H
-#define DT_RESET_OXSEMI_OX820_H
-
-#define RESET_SCU 0
-#define RESET_LEON 1
-#define RESET_ARM0 2
-#define RESET_ARM1 3
-#define RESET_USBHS 4
-#define RESET_USBPHYA 5
-#define RESET_MAC 6
-#define RESET_PCIEA 7
-#define RESET_SGDMA 8
-#define RESET_CIPHER 9
-#define RESET_DDR 10
-#define RESET_SATA 11
-#define RESET_SATA_LINK 12
-#define RESET_SATA_PHY 13
-#define RESET_PCIEPHY 14
-#define RESET_NAND 15
-#define RESET_GPIO 16
-#define RESET_UART1 17
-#define RESET_UART2 18
-#define RESET_MISC 19
-#define RESET_I2S 20
-#define RESET_SD 21
-#define RESET_MAC_2 22
-#define RESET_PCIEB 23
-#define RESET_VIDEO 24
-#define RESET_DDR_PHY 25
-#define RESET_USBPHYB 26
-#define RESET_USBDEV 27
-/* Reserved 29 */
-#define RESET_ARMDBG 29
-#define RESET_PLLA 30
-#define RESET_PLLB 31
-
-#endif /* DT_RESET_OXSEMI_OX820_H */
diff --git a/include/dt-bindings/reset/spacemit,k3-resets.h b/include/dt-bindings/reset/spacemit,k3-resets.h
new file mode 100644
index 000000000000..79ac1c22b7b5
--- /dev/null
+++ b/include/dt-bindings/reset/spacemit,k3-resets.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 SpacemiT Technology Co. Ltd
+ */
+
+#ifndef _DT_BINDINGS_RESET_SPACEMIT_K3_RESETS_H_
+#define _DT_BINDINGS_RESET_SPACEMIT_K3_RESETS_H_
+
+/* MPMU resets */
+#define RESET_MPMU_WDT 0
+#define RESET_MPMU_RIPC 1
+
+/* APBC resets */
+#define RESET_APBC_UART0 0
+#define RESET_APBC_UART2 1
+#define RESET_APBC_UART3 2
+#define RESET_APBC_UART4 3
+#define RESET_APBC_UART5 4
+#define RESET_APBC_UART6 5
+#define RESET_APBC_UART7 6
+#define RESET_APBC_UART8 7
+#define RESET_APBC_UART9 8
+#define RESET_APBC_UART10 9
+#define RESET_APBC_GPIO 10
+#define RESET_APBC_PWM0 11
+#define RESET_APBC_PWM1 12
+#define RESET_APBC_PWM2 13
+#define RESET_APBC_PWM3 14
+#define RESET_APBC_PWM4 15
+#define RESET_APBC_PWM5 16
+#define RESET_APBC_PWM6 17
+#define RESET_APBC_PWM7 18
+#define RESET_APBC_PWM8 19
+#define RESET_APBC_PWM9 20
+#define RESET_APBC_PWM10 21
+#define RESET_APBC_PWM11 22
+#define RESET_APBC_PWM12 23
+#define RESET_APBC_PWM13 24
+#define RESET_APBC_PWM14 25
+#define RESET_APBC_PWM15 26
+#define RESET_APBC_PWM16 27
+#define RESET_APBC_PWM17 28
+#define RESET_APBC_PWM18 29
+#define RESET_APBC_PWM19 30
+#define RESET_APBC_SPI0 31
+#define RESET_APBC_SPI1 32
+#define RESET_APBC_SPI3 33
+#define RESET_APBC_RTC 34
+#define RESET_APBC_TWSI0 35
+#define RESET_APBC_TWSI1 36
+#define RESET_APBC_TWSI2 37
+#define RESET_APBC_TWSI4 38
+#define RESET_APBC_TWSI5 39
+#define RESET_APBC_TWSI6 40
+#define RESET_APBC_TWSI8 41
+#define RESET_APBC_TIMERS0 42
+#define RESET_APBC_TIMERS1 43
+#define RESET_APBC_TIMERS2 44
+#define RESET_APBC_TIMERS3 45
+#define RESET_APBC_TIMERS4 46
+#define RESET_APBC_TIMERS5 47
+#define RESET_APBC_TIMERS6 48
+#define RESET_APBC_TIMERS7 49
+#define RESET_APBC_AIB 50
+#define RESET_APBC_ONEWIRE 51
+#define RESET_APBC_I2S0 52
+#define RESET_APBC_I2S1 53
+#define RESET_APBC_I2S2 54
+#define RESET_APBC_I2S3 55
+#define RESET_APBC_I2S4 56
+#define RESET_APBC_I2S5 57
+#define RESET_APBC_DRO 58
+#define RESET_APBC_IR0 59
+#define RESET_APBC_IR1 60
+#define RESET_APBC_TSEN 61
+#define RESET_IPC_AP2AUD 62
+#define RESET_APBC_CAN0 63
+#define RESET_APBC_CAN1 64
+#define RESET_APBC_CAN2 65
+#define RESET_APBC_CAN3 66
+#define RESET_APBC_CAN4 67
+
+/* APMU resets */
+#define RESET_APMU_CSI 0
+#define RESET_APMU_CCIC2PHY 1
+#define RESET_APMU_CCIC3PHY 2
+#define RESET_APMU_ISP_CIBUS 3
+#define RESET_APMU_DSI_ESC 4
+#define RESET_APMU_LCD 5
+#define RESET_APMU_V2D 6
+#define RESET_APMU_LCD_MCLK 7
+#define RESET_APMU_LCD_DSCCLK 8
+#define RESET_APMU_SC2_HCLK 9
+#define RESET_APMU_CCIC_4X 10
+#define RESET_APMU_CCIC1_PHY 11
+#define RESET_APMU_SDH_AXI 12
+#define RESET_APMU_SDH0 13
+#define RESET_APMU_SDH1 14
+#define RESET_APMU_SDH2 15
+#define RESET_APMU_USB2 16
+#define RESET_APMU_USB3_PORTA 17
+#define RESET_APMU_USB3_PORTB 18
+#define RESET_APMU_USB3_PORTC 19
+#define RESET_APMU_USB3_PORTD 20
+#define RESET_APMU_QSPI 21
+#define RESET_APMU_QSPI_BUS 22
+#define RESET_APMU_DMA 23
+#define RESET_APMU_AES_WTM 24
+#define RESET_APMU_MCB_DCLK 25
+#define RESET_APMU_MCB_ACLK 26
+#define RESET_APMU_VPU 27
+#define RESET_APMU_DTC 28
+#define RESET_APMU_GPU 29
+#define RESET_APMU_ALZO 30
+#define RESET_APMU_MC 31
+#define RESET_APMU_CPU0_POP 32
+#define RESET_APMU_CPU0_SW 33
+#define RESET_APMU_CPU1_POP 34
+#define RESET_APMU_CPU1_SW 35
+#define RESET_APMU_CPU2_POP 36
+#define RESET_APMU_CPU2_SW 37
+#define RESET_APMU_CPU3_POP 38
+#define RESET_APMU_CPU3_SW 39
+#define RESET_APMU_C0_MPSUB_SW 40
+#define RESET_APMU_CPU4_POP 41
+#define RESET_APMU_CPU4_SW 42
+#define RESET_APMU_CPU5_POP 43
+#define RESET_APMU_CPU5_SW 44
+#define RESET_APMU_CPU6_POP 45
+#define RESET_APMU_CPU6_SW 46
+#define RESET_APMU_CPU7_POP 47
+#define RESET_APMU_CPU7_SW 48
+#define RESET_APMU_C1_MPSUB_SW 49
+#define RESET_APMU_MPSUB_DBG 50
+#define RESET_APMU_UCIE 51
+#define RESET_APMU_RCPU 52
+#define RESET_APMU_DSI4LN2_ESCCLK 53
+#define RESET_APMU_DSI4LN2_LCD_SW 54
+#define RESET_APMU_DSI4LN2_LCD_MCLK 55
+#define RESET_APMU_DSI4LN2_LCD_DSCCLK 56
+#define RESET_APMU_DSI4LN2_DPU_ACLK 57
+#define RESET_APMU_DPU_ACLK 58
+#define RESET_APMU_UFS_ACLK 59
+#define RESET_APMU_EDP0 60
+#define RESET_APMU_EDP1 61
+#define RESET_APMU_PCIE_PORTA 62
+#define RESET_APMU_PCIE_PORTB 63
+#define RESET_APMU_PCIE_PORTC 64
+#define RESET_APMU_PCIE_PORTD 65
+#define RESET_APMU_PCIE_PORTE 66
+#define RESET_APMU_EMAC0 67
+#define RESET_APMU_EMAC1 68
+#define RESET_APMU_EMAC2 69
+#define RESET_APMU_ESPI_MCLK 70
+#define RESET_APMU_ESPI_SCLK 71
+
+/* DCIU resets*/
+#define RESET_DCIU_HDMA 0
+#define RESET_DCIU_DMA350 1
+#define RESET_DCIU_DMA350_0 2
+#define RESET_DCIU_DMA350_1 3
+#define RESET_DCIU_AXIDMA0 4
+#define RESET_DCIU_AXIDMA1 5
+#define RESET_DCIU_AXIDMA2 6
+#define RESET_DCIU_AXIDMA3 7
+#define RESET_DCIU_AXIDMA4 8
+#define RESET_DCIU_AXIDMA5 9
+#define RESET_DCIU_AXIDMA6 10
+#define RESET_DCIU_AXIDMA7 11
+
+#endif /* _DT_BINDINGS_RESET_SPACEMIT_K3_H_ */
diff --git a/include/dt-bindings/sound/audio-jack-events.h b/include/dt-bindings/sound/audio-jack-events.h
deleted file mode 100644
index 1b29b295126a..000000000000
--- a/include/dt-bindings/sound/audio-jack-events.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __AUDIO_JACK_EVENTS_H
-#define __AUDIO_JACK_EVENTS_H
-
-#define JACK_HEADPHONE 1
-#define JACK_MICROPHONE 2
-#define JACK_LINEOUT 3
-#define JACK_LINEIN 4
-
-#endif /* __AUDIO_JACK_EVENTS_H */
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
index ddc7302a510a..350f98178b26 100644
--- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -7,6 +7,9 @@
#ifndef __MEDIATEK_LVTS_DT_H
#define __MEDIATEK_LVTS_DT_H
+#define MT7987_CPU 0
+#define MT7987_ETH2P5G 1
+
#define MT7988_CPU_0 0
#define MT7988_CPU_1 1
#define MT7988_ETH2P5G_0 2
@@ -80,4 +83,30 @@
#define MT8192_AP_MD1 15
#define MT8192_AP_MD2 16
+#define MT8196_MCU_MEDIUM_CPU6_0 0
+#define MT8196_MCU_MEDIUM_CPU6_1 1
+#define MT8196_MCU_DSU2 2
+#define MT8196_MCU_DSU3 3
+#define MT8196_MCU_LITTLE_CPU3 4
+#define MT8196_MCU_LITTLE_CPU0 5
+#define MT8196_MCU_LITTLE_CPU1 6
+#define MT8196_MCU_LITTLE_CPU2 7
+#define MT8196_MCU_MEDIUM_CPU4_0 8
+#define MT8196_MCU_MEDIUM_CPU4_1 9
+#define MT8196_MCU_MEDIUM_CPU5_0 10
+#define MT8196_MCU_MEDIUM_CPU5_1 11
+#define MT8196_MCU_DSU0 12
+#define MT8196_MCU_DSU1 13
+#define MT8196_MCU_BIG_CPU7_0 14
+#define MT8196_MCU_BIG_CPU7_1 15
+
+#define MT8196_AP_TOP0 0
+#define MT8196_AP_TOP1 1
+#define MT8196_AP_TOP2 2
+#define MT8196_AP_TOP3 3
+#define MT8196_AP_BOT0 4
+#define MT8196_AP_BOT1 5
+#define MT8196_AP_BOT2 6
+#define MT8196_AP_BOT3 7
+
#endif /* __MEDIATEK_LVTS_DT_H */
diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
index 4eb64548a74f..03527162613f 100644
--- a/include/keys/trusted-type.h
+++ b/include/keys/trusted-type.h
@@ -19,7 +19,11 @@
#define MIN_KEY_SIZE 32
#define MAX_KEY_SIZE 128
-#define MAX_BLOB_SIZE 512
+#if IS_ENABLED(CONFIG_TRUSTED_KEYS_PKWM)
+#define MAX_BLOB_SIZE 1152
+#else
+#define MAX_BLOB_SIZE 512
+#endif
#define MAX_PCRINFO_SIZE 64
#define MAX_DIGEST_SIZE 64
@@ -46,6 +50,7 @@ struct trusted_key_options {
uint32_t policydigest_len;
unsigned char policydigest[MAX_DIGEST_SIZE];
uint32_t policyhandle;
+ void *private;
};
struct trusted_key_ops {
diff --git a/include/keys/trusted_pkwm.h b/include/keys/trusted_pkwm.h
new file mode 100644
index 000000000000..4035b9776394
--- /dev/null
+++ b/include/keys/trusted_pkwm.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PKWM_TRUSTED_KEY_H
+#define __PKWM_TRUSTED_KEY_H
+
+#include <keys/trusted-type.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+extern struct trusted_key_ops pkwm_trusted_key_ops;
+
+struct trusted_pkwm_options {
+ u16 wrap_flags;
+};
+
+static inline void dump_options(struct trusted_key_options *o)
+{
+ const struct trusted_pkwm_options *pkwm;
+ bool sb_audit_or_enforce_bit;
+ bool sb_enforce_bit;
+
+ pkwm = o->private;
+ sb_audit_or_enforce_bit = pkwm->wrap_flags & BIT(0);
+ sb_enforce_bit = pkwm->wrap_flags & BIT(1);
+
+ if (sb_audit_or_enforce_bit)
+ pr_debug("secure boot mode required: audit or enforce");
+ else if (sb_enforce_bit)
+ pr_debug("secure boot mode required: enforce");
+ else
+ pr_debug("secure boot mode required: disabled");
+}
+
+#endif
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 5ec5182b5e57..9cd1594ab697 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -906,7 +906,8 @@ do { \
}; \
\
_KUNIT_SAVE_LOC(test); \
- if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
+ if (likely(!IS_ERR_OR_NULL(__left) && !IS_ERR_OR_NULL(__right) && \
+ (strcmp(__left, __right) op 0))) \
break; \
\
\
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fbf0c3a65f59..3a412dcebc29 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -107,6 +107,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_GIC_V5,
ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index d4ed5622cf2b..17bb3374f4ca 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -27,12 +27,15 @@ int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+struct fwnode_handle *iort_iwb_handle(u32 iwb_id);
#ifdef CONFIG_ACPI_IORT
u32 iort_msi_map_id(struct device *dev, u32 id);
+u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node);
+int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
+int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa);
void acpi_configure_pmsi_domain(struct device *dev);
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head);
@@ -46,9 +49,15 @@ phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
+static inline u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node)
+{ return id; }
+static inline int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
+{ return -ENODEV; }
static inline struct irq_domain *iort_get_device_domain(
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
+static inline int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa)
+{ return -ENODEV; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
static inline
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 02940be66324..df0f5c382286 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -56,12 +56,14 @@ struct aer_capability_regs {
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
int pcie_aer_is_native(struct pci_dev *dev);
+void pci_aer_unmask_internal_errors(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
+static inline void pci_aer_unmask_internal_errors(struct pci_dev *dev) { }
#endif
void pci_print_aer(struct pci_dev *dev, int aer_severity,
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
index 06d7d83196ca..0c4fec98822e 100644
--- a/include/linux/array_size.h
+++ b/include/linux/array_size.h
@@ -10,4 +10,10 @@
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+/**
+ * ARRAY_END - get a pointer to one past the last element in array @arr
+ * @arr: array
+ */
+#define ARRAY_END(arr) (&(arr)[ARRAY_SIZE(arr)])
+
#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 54b416e26995..8fd48bcb2a46 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -26,10 +26,7 @@ enum {
ATA_MAX_DEVICES = 2, /* per bus/port */
ATA_MAX_PRD = 256, /* we could make these 256/256 */
ATA_SECT_SIZE = 512,
- ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
- ATA_MAX_SECTORS_1024 = 1024,
- ATA_MAX_SECTORS_8191 = 8191,
ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 2f9d36b72bd8..cdc25f8979f7 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2121,7 +2121,7 @@ raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -2155,7 +2155,7 @@ raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -2189,7 +2189,7 @@ raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -2222,7 +2222,7 @@ raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -4247,7 +4247,7 @@ raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -4281,7 +4281,7 @@ raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -4315,7 +4315,7 @@ raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -4348,7 +4348,7 @@ raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// b565db590afeeff0d7c9485ccbca5bb6e155749f
+// 206314f82b8b73a5c3aa69cf7f35ac9e7b5d6b58
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 37ab6314a9f7..feb3b5dc3e96 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1269,7 +1269,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -1292,7 +1292,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -1314,7 +1314,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -1337,7 +1337,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -2847,7 +2847,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -2870,7 +2870,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -2892,7 +2892,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -2915,7 +2915,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4425,7 +4425,7 @@ atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -4448,7 +4448,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -4470,7 +4470,7 @@ atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -4493,7 +4493,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// f618ac667f868941a84ce0ab2242f1786e049ed4
+// 9dd948d3012b22c4e75933a5172983f912e46439
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index f86b29d90877..6a4e47d2db35 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1449,7 +1449,7 @@ raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -1473,7 +1473,7 @@ raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -1497,7 +1497,7 @@ raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -1521,7 +1521,7 @@ raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// eadf183c3600b8b92b91839dd3be6bcc560c752d
+// 4b882bf19018602c10816c52f8b4ae280adc887b
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index b3643de9931d..fa6520e192be 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -36,7 +36,7 @@ attribute_container_set_no_classdevs(struct attribute_container *atc)
atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS;
}
-int attribute_container_register(struct attribute_container *cont);
+void attribute_container_register(struct attribute_container *cont);
int __must_check attribute_container_unregister(struct attribute_container *cont);
void attribute_container_create_device(struct device *dev,
int (*fn)(struct attribute_container *,
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 536f8ee8da81..b642b5faca65 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -13,7 +13,6 @@
#include <linux/ptrace.h>
#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
-#include <uapi/linux/netfilter/nf_tables.h>
#include <uapi/linux/fanotify.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
@@ -128,12 +127,6 @@ enum audit_nfcfgop {
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
-/* only for compat system calls */
-extern unsigned compat_write_class[];
-extern unsigned compat_read_class[];
-extern unsigned compat_dir_class[];
-extern unsigned compat_chattr_class[];
-extern unsigned compat_signal_class[];
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -195,6 +188,8 @@ extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
+extern int audit_log_nf_skb(struct audit_buffer *ab,
+ const struct sk_buff *skb, u8 nfproto);
extern int audit_update_lsm_rules(void);
@@ -272,6 +267,12 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
+static inline int audit_log_nf_skb(struct audit_buffer *ab,
+ const struct sk_buff *skb, u8 nfproto)
+{
+ return 0;
+}
+
static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
{
return INVALID_UID;
@@ -316,7 +317,6 @@ extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
-extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
@@ -380,12 +380,6 @@ static inline void audit_syscall_exit(void *pt_regs)
__audit_syscall_exit(success, return_code);
}
}
-static inline struct filename *audit_reusename(const __user char *name)
-{
- if (unlikely(!audit_dummy_context()))
- return __audit_reusename(name);
- return NULL;
-}
static inline void audit_getname(struct filename *name)
{
if (unlikely(!audit_dummy_context()))
@@ -624,10 +618,6 @@ static inline struct audit_context *audit_context(void)
{
return NULL;
}
-static inline struct filename *audit_reusename(const __user char *name)
-{
- return NULL;
-}
static inline void audit_getname(struct filename *name)
{ }
static inline void audit_inode(struct filename *name,
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
index 0e34d673ef17..2b8153791e6a 100644
--- a/include/linux/audit_arch.h
+++ b/include/linux/audit_arch.h
@@ -23,4 +23,11 @@ enum auditsc_class_t {
extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+/* only for compat system calls */
+extern unsigned compat_write_class[];
+extern unsigned compat_read_class[];
+extern unsigned compat_dir_class[];
+extern unsigned compat_chattr_class[];
+extern unsigned compat_signal_class[];
+
#endif
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0217c1073735..c88fd4d37d1f 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -46,7 +46,6 @@ enum wb_reason {
WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
- WB_REASON_LAPTOP_TIMER,
WB_REASON_FS_FREE_SPACE,
/*
* There is no bdi forker thread any more and works are done
@@ -204,8 +203,6 @@ struct backing_dev_info {
char dev_name[64];
struct device *owner;
- struct timer_list laptop_mode_wb_timer;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
#endif
diff --git a/include/linux/balloon.h b/include/linux/balloon.h
new file mode 100644
index 000000000000..ca5b15150f42
--- /dev/null
+++ b/include/linux/balloon.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common interface for implementing a memory balloon, including support
+ * for migration of pages inflated in a memory balloon.
+ *
+ * Balloon page migration makes use of the general "movable_ops page migration"
+ * feature.
+ *
+ * page->private is used to reference the responsible balloon device.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
+ *
+ * Once the page type and the PG_movable_ops are set, migration code
+ * can initiate page isolation by invoking the
+ * movable_operations()->isolate_page() callback
+ *
+ * As long as page->private is set, the page is either on the balloon list
+ * or isolated for migration. If page->private is not set, the page is
+ * either still getting inflated, or was deflated to be freed by the balloon
+ * driver soon. Isolation is impossible in both cases.
+ *
+ * As the page isolation scanning step a compaction thread does is a lockless
+ * procedure (from a page standpoint), it might bring some racy situations while
+ * performing balloon page migration. In order to sort out these racy scenarios
+ * and safely perform balloon's page migration we must, always, ensure following
+ * these simple rules:
+ *
+ * i. Inflation/deflation must set/clear page->private under the
+ * balloon_pages_lock
+ *
+ * ii. isolation or dequeueing procedure must remove the page from balloon
+ * device page list under balloon_pages_lock
+ *
+ * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
+ */
+#ifndef _LINUX_BALLOON_H
+#define _LINUX_BALLOON_H
+#include <linux/pagemap.h>
+#include <linux/page-flags.h>
+#include <linux/migrate.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/list.h>
+
+/*
+ * Balloon device information descriptor.
+ * This struct is used to allow the common balloon page migration interface
+ * procedures to find the proper balloon device holding memory pages they'll
+ * have to cope for page migration, as well as it serves the balloon driver as
+ * a page book-keeper for its registered balloon devices.
+ */
+struct balloon_dev_info {
+ unsigned long isolated_pages; /* # of isolated pages for migration */
+ struct list_head pages; /* Pages enqueued & handled to Host */
+ int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+ bool adjust_managed_page_count;
+};
+
+struct page *balloon_page_alloc(void);
+void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
+ struct page *page);
+struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
+size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages);
+size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages, size_t n_req_pages);
+
+static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
+{
+ balloon->isolated_pages = 0;
+ INIT_LIST_HEAD(&balloon->pages);
+ balloon->migratepage = NULL;
+ balloon->adjust_managed_page_count = false;
+}
+#endif /* _LINUX_BALLOON_H */
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
deleted file mode 100644
index 7cfe48769239..000000000000
--- a/include/linux/balloon_compaction.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/linux/balloon_compaction.h
- *
- * Common interface definitions for making balloon pages movable by compaction.
- *
- * Balloon page migration makes use of the general "movable_ops page migration"
- * feature.
- *
- * page->private is used to reference the responsible balloon device.
- * That these pages have movable_ops, and which movable_ops apply,
- * is derived from the page type (PageOffline()) combined with the
- * PG_movable_ops flag (PageMovableOps()).
- *
- * As the page isolation scanning step a compaction thread does is a lockless
- * procedure (from a page standpoint), it might bring some racy situations while
- * performing balloon page compaction. In order to sort out these racy scenarios
- * and safely perform balloon's page compaction and migration we must, always,
- * ensure following these simple rules:
- *
- * i. Setting the PG_movable_ops flag and page->private with the following
- * lock order
- * +-page_lock(page);
- * +--spin_lock_irq(&b_dev_info->pages_lock);
- *
- * ii. isolation or dequeueing procedure must remove the page from balloon
- * device page list under b_dev_info->pages_lock.
- *
- * The functions provided by this interface are placed to help on coping with
- * the aforementioned balloon page corner case, as well as to ensure the simple
- * set of exposed rules are satisfied while we are dealing with balloon pages
- * compaction / migration.
- *
- * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
- */
-#ifndef _LINUX_BALLOON_COMPACTION_H
-#define _LINUX_BALLOON_COMPACTION_H
-#include <linux/pagemap.h>
-#include <linux/page-flags.h>
-#include <linux/migrate.h>
-#include <linux/gfp.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-
-/*
- * Balloon device information descriptor.
- * This struct is used to allow the common balloon compaction interface
- * procedures to find the proper balloon device holding memory pages they'll
- * have to cope for page compaction / migration, as well as it serves the
- * balloon driver as a page book-keeper for its registered balloon devices.
- */
-struct balloon_dev_info {
- unsigned long isolated_pages; /* # of isolated pages for migration */
- spinlock_t pages_lock; /* Protection to pages list */
- struct list_head pages; /* Pages enqueued & handled to Host */
- int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
- struct page *page, enum migrate_mode mode);
-};
-
-extern struct page *balloon_page_alloc(void);
-extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
- struct page *page);
-extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
-extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
- struct list_head *pages);
-extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
- struct list_head *pages, size_t n_req_pages);
-
-static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
-{
- balloon->isolated_pages = 0;
- spin_lock_init(&balloon->pages_lock);
- INIT_LIST_HEAD(&balloon->pages);
- balloon->migratepage = NULL;
-}
-
-#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct movable_operations balloon_mops;
-/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
- */
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
-{
- return (struct balloon_dev_info *)page_private(page);
-}
-#endif /* CONFIG_BALLOON_COMPACTION */
-
-/*
- * balloon_page_insert - insert a page into the balloon's page list and make
- * the page->private assignment accordingly.
- * @balloon : pointer to balloon device
- * @page : page to be assigned as a 'balloon page'
- *
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before inserting a page into the balloon device.
- */
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageOffline(page);
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
- SetPageMovableOps(page);
- set_page_private(page, (unsigned long)balloon);
- }
- list_add(&page->lru, &balloon->pages);
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
- return GFP_HIGHUSER_MOVABLE;
- return GFP_HIGHUSER;
-}
-
-/*
- * balloon_page_finalize - prepare a balloon page that was removed from the
- * balloon list for release to the page allocator
- * @page: page to be released to the page allocator
- *
- * Caller must ensure that the page is locked.
- */
-static inline void balloon_page_finalize(struct page *page)
-{
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
- set_page_private(page, 0);
- /* PageOffline is sticky until the page is freed to the buddy. */
-}
-
-/*
- * balloon_page_push - insert a page into a page list.
- * @head : pointer to list
- * @page : page to be added
- *
- * Caller must ensure the page is private and protect the list.
- */
-static inline void balloon_page_push(struct list_head *pages, struct page *page)
-{
- list_add(&page->lru, pages);
-}
-
-/*
- * balloon_page_pop - remove a page from a page list.
- * @head : pointer to list
- * @page : page to be added
- *
- * Caller must ensure the page is private and protect the list.
- */
-static inline struct page *balloon_page_pop(struct list_head *pages)
-{
- struct page *page = list_first_entry_or_null(pages, struct page, lru);
-
- if (!page)
- return NULL;
-
- list_del(&page->lru);
- return page;
-}
-#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index c75a9b3672aa..36a3f2275ecd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -256,12 +256,6 @@ static inline struct folio *bio_first_folio_all(struct bio *bio)
return page_folio(bio_first_page_all(bio));
}
-static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
-{
- WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
- return &bio->bi_io_vec[bio->bi_vcnt - 1];
-}
-
/**
* struct folio_iter - State for iterating all folios in a bio.
* @folio: The current folio we're iterating. NULL after the last folio.
@@ -403,6 +397,29 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
return iov_iter_npages(iter, max_segs);
}
+/**
+ * bio_iov_bounce_nr_vecs - calculate number of bvecs for a bounce bio
+ * @iter: iter to bounce from
+ * @op: REQ_OP_* for the bio
+ *
+ * Calculates how many bvecs are needed for the next bio to bounce from/to
+ * @iter.
+ */
+static inline unsigned short
+bio_iov_bounce_nr_vecs(struct iov_iter *iter, blk_opf_t op)
+{
+ /*
+ * We still need to bounce bvec iters, so don't special case them
+ * here unlike in bio_iov_vecs_to_alloc.
+ *
+ * For reads we need to use a vector for the bounce buffer, account
+ * for that here.
+ */
+ if (op_is_write(op))
+ return iov_iter_npages(iter, BIO_MAX_VECS);
+ return iov_iter_npages(iter, BIO_MAX_VECS - 1) + 1;
+}
+
struct request_queue;
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
@@ -414,6 +431,7 @@ static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
}
extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
+void bio_reuse(struct bio *bio, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
@@ -456,6 +474,9 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
+int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
+void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty);
+
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index c0989b5b0407..7869a6e59b6a 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -7,6 +7,18 @@
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <asm/processor.h> /* for cpu_relax() */
+
+/*
+ * For static context analysis, we need a unique token for each possible bit
+ * that can be used as a bit_spinlock. The easiest way to do that is to create a
+ * fake context that we can cast to with the __bitlock(bitnum, addr) macro
+ * below, which will give us unique instances for each (bit, addr) pair that the
+ * static analysis can use.
+ */
+context_lock_struct(__context_bitlock) { };
+#define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr))
+
/*
* bit-based spin_lock()
*
@@ -14,6 +26,7 @@
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
+ __acquires(__bitlock(bitnum, addr))
{
/*
* Assuming the lock is uncontended, this never enters
@@ -32,13 +45,14 @@ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+ __cond_acquires(true, __bitlock(bitnum, addr))
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -47,7 +61,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
return 0;
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
return 1;
}
@@ -55,6 +69,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -63,7 +78,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
@@ -72,6 +87,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -80,7 +96,7 @@ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 126dc5b380af..54aeeef1f0ec 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -8,6 +8,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/typecheck.h>
#include <asm/byteorder.h>
@@ -243,7 +244,7 @@ __MAKE_OP(64)
#define __field_prep(mask, val) \
({ \
- __auto_type __mask = (mask); \
+ auto __mask = (mask); \
typeof(__mask) __val = (val); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
@@ -252,7 +253,7 @@ __MAKE_OP(64)
#define __field_get(mask, reg) \
({ \
- __auto_type __mask = (mask); \
+ auto __mask = (mask); \
typeof(__mask) __reg = (reg); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 58b0c5254a67..f7c3cb4a342f 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -132,6 +132,11 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
return bio->bi_crypt_context;
}
+static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
+{
+ return bio->bi_crypt_context;
+}
+
void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
gfp_t gfp_mask);
@@ -169,8 +174,35 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
return false;
}
+static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
+{
+ return NULL;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+bool __blk_crypto_submit_bio(struct bio *bio);
+
+/**
+ * blk_crypto_submit_bio - Submit a bio that may have a crypto context
+ * @bio: bio to submit
+ *
+ * If @bio has no crypto context, or the crypt context attached to @bio is
+ * supported by the underlying device's inline encryption hardware, just submit
+ * @bio.
+ *
+ * Otherwise, try to perform en/decryption for this bio by falling back to the
+ * kernel crypto API. For encryption this means submitting newly allocated
+ * bios for the encrypted payload while keeping back the source bio until they
+ * complete, while for reads the decryption happens in-place by a hooked in
+ * completion handler.
+ */
+static inline void blk_crypto_submit_bio(struct bio *bio)
+{
+ if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
+ submit_bio(bio);
+}
+
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
/**
* bio_crypt_clone - clone bio encryption context
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index a6b84206eb94..c15b1ac62765 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -91,7 +91,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return bio_integrity_intervals(bi, sectors) * bi->metadata_size;
}
-static inline bool blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(const struct request *rq)
{
return rq->cmd_flags & REQ_INTEGRITY;
}
@@ -168,9 +168,9 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
{
return 0;
}
-static inline int blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(const struct request *rq)
{
- return 0;
+ return false;
}
static inline struct bio_vec rq_integrity_vec(struct request *rq)
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index cb88fc791fbd..214c181ff2c9 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -28,7 +28,7 @@ struct blk_dma_iter {
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter);
+ struct blk_dma_iter *iter);
/**
* blk_rq_dma_map_coalesce - were all segments coalesced?
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index cae9e857aea4..18a2388ba581 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,6 +13,7 @@
struct blk_mq_tags;
struct blk_flush_queue;
+struct io_comp_batch;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_DEFAULT_RQ 128
@@ -22,7 +23,8 @@ enum rq_end_io_ret {
RQ_END_IO_FREE,
};
-typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t,
+ const struct io_comp_batch *);
/*
* request flags */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5dc061d318a4..d59553324a84 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -232,6 +232,8 @@ struct bio {
atomic_t __bi_remaining;
+ /* The actual vec list, preserved by bio_reset() */
+ struct bio_vec *bi_io_vec;
struct bvec_iter bi_iter;
union {
@@ -275,13 +277,12 @@ struct bio {
atomic_t __bi_cnt; /* pin count */
- struct bio_vec *bi_io_vec; /* the actual vec list */
-
struct bio_set *bi_pool;
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
-#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+#define BIO_MAX_SIZE UINT_MAX /* max value of bi_iter.bi_size */
+#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> SECTOR_SHIFT)
static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 72e34acd439c..99ef8cd7673c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -340,14 +340,13 @@ typedef unsigned int __bitwise blk_features_t;
/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES ((__force blk_features_t)(1u << 14))
+
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
-/* atomic writes enabled */
-#define BLK_FEAT_ATOMIC_WRITES \
- ((__force blk_features_t)(1u << 16))
-
/*
* Flags automatically inherited when stacking limits.
*/
@@ -551,7 +550,8 @@ struct request_queue {
/*
* queue settings
*/
- unsigned long nr_requests; /* Max # of requests */
+ unsigned int nr_requests; /* Max # of requests */
+ unsigned int async_depth; /* Max # of async requests */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
@@ -681,7 +681,7 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_passthrough_stat(q) \
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
@@ -1026,7 +1026,7 @@ extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-/* Helper to convert REQ_OP_XXX to its string format XXX */
+/* Convert a request operation REQ_OP_name into the string "name" */
extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
@@ -1044,7 +1044,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
return bdev->bd_queue; /* this is never NULL */
}
-/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+/* Convert a zone condition BLK_ZONE_COND_name into the string "name" */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
static inline unsigned int bio_zone_no(struct bio *bio)
@@ -1462,9 +1462,14 @@ bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
}
+static inline bool bdev_rot(struct block_device *bdev)
+{
+ return blk_queue_rot(bdev_get_queue(bdev));
+}
+
static inline bool bdev_nonrot(struct block_device *bdev)
{
- return blk_queue_nonrot(bdev_get_queue(bdev));
+ return !bdev_rot(bdev);
}
static inline bool bdev_synchronous(struct block_device *bdev)
@@ -1822,6 +1827,7 @@ struct io_comp_batch {
struct rq_list req_list;
bool need_ts;
void (*complete)(struct io_comp_batch *);
+ void *poll_ctx;
};
static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
diff --git a/include/linux/bnge/hsi.h b/include/linux/bnge/hsi.h
new file mode 100644
index 000000000000..8ea13d5407ee
--- /dev/null
+++ b/include/linux/bnge/hsi.h
@@ -0,0 +1,12609 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2026 Broadcom */
+
+/* DO NOT MODIFY!!! This file is automatically generated. */
+
+#ifndef _BNGE_HSI_H_
+#define _BNGE_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
+ __le16 size;
+ __le64 req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_PSP_CFG 0x72UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
+ #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
+ #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_ADV 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QUIESCE 0xc5UL
+ #define HWRM_FW_STATE_BACKUP 0xc6UL
+ #define HWRM_FW_STATE_RESTORE 0xc7UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
+ #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
+ #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_START 0xe4UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
+ #define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
+ #define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_PROF_QUERY 0x1c3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_FUNC_PTP_QCFG 0x1c5UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_PORT_PHY_FDRSTAT 0x232UL
+ #define HWRM_PORT_PHY_DBG 0x23aUL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_QCFG 0x264UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_QCFG 0x265UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
+ #define HWRM_TFC_SPR_BA_SET 0x3a1UL
+ #define HWRM_TFC_SPR_BA_GET 0x3a2UL
+ #define HWRM_MGMT_FILTER_ALLOC 0x3e8UL
+ #define HWRM_MGMT_FILTER_FREE 0x3e9UL
+ #define HWRM_MGMT_FILTER_CFG 0x3eaUL
+ #define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_DBG_TOKEN_QUERY_AUTH_IDS 0xff2fUL
+ #define HWRM_DBG_TOKEN_CFG 0xff30UL
+ #define HWRM_DBG_TRACE_TRIGGER 0xff31UL
+ #define HWRM_DBG_TRACE_TRIGGER_STATUS 0xff32UL
+ #define HWRM_NVM_SET_PROFILE 0xffe9UL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 704
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 15
+#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_RSVD 1
+#define HWRM_VERSION_STR "1.15.1.1"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1472b/184B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_DEBUG_TOKEN_SUPPORTED 0x20000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_PSP_SUPPORTED 0x40000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ROCE_COUNTERSET_SUPPORTED 0x80000UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ char active_pkg_name[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ __le16 max_req_timeout;
+ __le16 max_psp_supported_pfs;
+ __le16 max_psp_supported_vfs;
+ __le16 max_roce_countersets;
+ __le16 max_ext_req_timeout;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
+ __le16 len;
+ __le32 opaque;
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PNO_HOST_DMA_COMPLETE 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_GID_UPDATE 0x52UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ADPTV_QOS 0x53UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FABRIC_NEXT_HOP_IP_UPDATED 0x54UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x55UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PSP_SM_KEY_ROTATE_NOTIFY 0x56UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
+};
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
+};
+
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE 0xcUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPRT_TRACE 0xdUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RERT_TRACE 0xeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPC_MSG_TRACE 0xfUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPC_CMPL_TRACE 0x10UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPC_CMPL_TRACE
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUP_UDCC_SES 0x7UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcaps_output (size:1152b/144B) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ __le16 max_tso_segs;
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_CHANGE_UDP_SRCPORT_SUPPORT 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_DATA_SUPPORTED 0x800UL
+ __le16 max_roce_vfs;
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcfg_output (size:1408b/176B) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ __le16 alloc_msix;
+ __le16 registered_vfs;
+ __le16 l2_doorbell_bar_size_kb;
+ u8 active_endpoints;
+ u8 always_1;
+ __le32 reset_addr_poll;
+ __le16 legacy_l2_db_size_kb;
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ __le16 roce_vnic_id;
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
+ __le16 stag_vid;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 mirror_vnic_id;
+ u8 max_link_width;
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_LAST FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16
+ u8 max_link_speed;
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_LAST FUNC_QCFG_RESP_MAX_LINK_SPEED_G5
+ u8 negotiated_link_width;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16
+ u8 negotiated_link_speed;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5
+ u8 unused_7[2];
+ u8 pcie_compliance;
+ u8 unused_8;
+ __le16 l2_db_multi_page_size_kb;
+ u8 unused_9[5];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:1280b/160B) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_msix;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
+ __le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ #define FUNC_CFG_REQ_ENABLES2_PCIE_COMPLIANCE 0x1000UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ __le16 physical_slot_number;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
+ u8 pcie_compliance;
+ u8 unused_2;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_OUT_OF_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_NPAR_PARTITION_DOWN_FAILED 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_TPID_SET_DFLT_VLAN_NOT_SET 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_RES_ARRAY_ALLOC_FAILED 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_ASSET_TEST_FAILED 0x5UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_RES_UPDATE_FAILED 0x6UL
+ #define FUNC_CFG_CMD_ERR_CODE_APPLY_MAX_BW_FAILED 0x7UL
+ #define FUNC_CFG_CMD_ERR_CODE_ENABLE_EVB_FAILED 0x8UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_ASSET_TEST_FAILED 0x9UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_RES_UPDATE_FAILED 0xaUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_ASSET_TEST_FAILED 0xbUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_RES_UPDATE_FAILED 0xcUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_ASSET_TEST_FAILED 0xdUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_RES_UPDATE_FAILED 0xeUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_ASSET_TEST_FAILED 0xfUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_RES_UPDATE_FAILED 0x10UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_ASSET_TEST_FAILED 0x11UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_RES_UPDATE_FAILED 0x12UL
+ #define FUNC_CFG_CMD_ERR_CODE_FAILED_TO_START_STATS_THREAD 0x13UL
+ #define FUNC_CFG_CMD_ERR_CODE_RDMA_SRIOV_DISABLED 0x14UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_DISABLED 0x15UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_ASSET_TEST_FAILED 0x16UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_RES_UPDATE_FAILED 0x17UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_DISABLED 0x18UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_ASSET_TEST_FAILED 0x19UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_RES_UPDATE_FAILED 0x1aUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_DISABLED 0x1bUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_ASSET_TEST_FAILED 0x1cUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_RES_UPDATE_FAILED 0x1dUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_DISABLED 0x1eUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_ASSET_TEST_FAILED 0x1fUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_RES_UPDATE_FAILED 0x20UL
+ #define FUNC_CFG_CMD_ERR_CODE_INVALID_KDNET_MODE 0x21UL
+ #define FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL 0x22UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 clear_seq;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
+};
+
+/* hwrm_func_drv_qver_output (size:256b/32B) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 max_tx_scheduler_inputs;
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ __le16 mrav_num_entries_units;
+ u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ __le16 qp_num_fast_qpmd_entries;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ttx_pacing_rate_prof_query_input (size:192b/24B) */
+struct hwrm_func_ttx_pacing_rate_prof_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ttx_pacing_rate_prof_query_output (size:128b/16B) */
+struct hwrm_func_ttx_pacing_rate_prof_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 start_prof_id;
+ u8 end_prof_id;
+ u8 active_prof_id;
+ #define FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_60M 0x0UL
+ #define FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_50G 0x1UL
+ #define FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_LAST FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_50G
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_func_ttx_pacing_rate_query_input (size:192b/24B) */
+struct hwrm_func_ttx_pacing_rate_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 profile_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_func_ttx_pacing_rate_query_output (size:4224b/528B) */
+struct hwrm_func_ttx_pacing_rate_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 rates[128];
+ u8 profile_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_key_ctx_alloc_input (size:384b/48B) */
+struct hwrm_func_key_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_key_ctxs;
+ __le32 dma_bufr_size_bytes;
+ u8 key_ctx_type;
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_TX 0x0UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_RX 0x1UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_TX 0x2UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_RX 0x3UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_LAST FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_RX
+ u8 unused_0[7];
+ __le64 host_dma_addr;
+ __le32 partition_start_xid;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_key_ctx_alloc_output (size:192b/24B) */
+struct hwrm_func_key_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 num_key_ctxs_allocated;
+ u8 flags;
+ #define FUNC_KEY_CTX_ALLOC_RESP_FLAGS_KEY_CTXS_CONTIGUOUS 0x1UL
+ u8 unused_0;
+ __le32 partition_start_xid;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_key_ctx_free_input (size:256b/32B) */
+struct hwrm_func_key_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 key_ctx_type;
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_TX 0x0UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_RX 0x1UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_TX 0x2UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_RX 0x3UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_LAST FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_RX
+ u8 unused_0;
+ __le32 partition_start_xid;
+ __le16 num_entries;
+ u8 unused_1[6];
+};
+
+/* hwrm_func_key_ctx_free_output (size:128b/16B) */
+struct hwrm_func_key_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_cmd_err (size:64b/8B) */
+struct hwrm_func_backing_store_cfg_v2_cmd_err {
+ u8 code;
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_QP_FAIL 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SRQ_FAIL 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_CQ_FAIL 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_VNIC_FAIL 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_STAT_FAIL 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TQM_SPR_FAIL 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TQM_FPR_FAIL 0x7UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_MRAV_FAIL 0x8UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TIM_FAIL 0x9UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_V2TRACE_FAIL 0xaUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TXCK_FAIL 0xbUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_RXCK_FAIL 0xcUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_MPC_FAIL 0xdUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SHADOW_DB_FAIL 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_XID_FAIL 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TFC_FAIL 0x10UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TTXPACE_FAIL 0x11UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_CDU_ENABLE_FAIL 0x12UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SCHQ_ALLOC_FAIL 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_LAST FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SCHQ_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ERR_QPC_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 locked;
+ __le32 rsvd2[2];
+};
+
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mr_split_entries (size:128b/16B) */
+struct mr_split_entries {
+ __le32 mr_num_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* av_split_entries (size:128b/16B) */
+struct av_split_entries {
+ __le32 av_num_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* sq_split_entries (size:128b/16B) */
+struct sq_split_entries {
+ __le32 sq_num_l2_entries;
+ __le32 rsvd2;
+ __le32 rsvd3[2];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_cmd_err (size:64b/8B) */
+struct hwrm_func_backing_store_qcfg_v2_cmd_err {
+ u8 code;
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_SHDDB_FAIL 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_XID_FAIL 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_TXPAC_RING_FAIL 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_INVALID_FIELD 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_LAST FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_INVALID_FIELD
+ u8 unused_0[7];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_PHYSICAL_PBL_PREFERRED 0x80UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le16 max_instance_count;
+ u8 rsvd3;
+ u8 valid;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_ENABLE 0x800000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_DISABLE 0x1000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_ENABLE 0x2000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_DISABLE 0x4000000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
+ u8 unused_2[6];
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_LINK_TRAINING 0x8UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_PRECODING 0x10UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_TX_LASER_DISABLED 0x20UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+ __s32 ptp_freq_adj_ppb;
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
+ __s64 ptp_adj_phase;
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:3904b/488B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
+};
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ __le16 total_active_cos_queues;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
+ u8 valid;
+};
+
+/* hwrm_port_qstats_ext_pfc_wd_input (size:256b/32B) */
+struct hwrm_port_qstats_ext_pfc_wd_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 pfc_wd_stat_size;
+ u8 unused_0[4];
+ __le64 pfc_wd_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_pfc_wd_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_pfc_wd_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfc_wd_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
+};
+
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
+};
+
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
+/* port_stats_ext_pfc_adv (size:1536b/192B) */
+struct port_stats_ext_pfc_adv {
+ __le64 pfc_min_duration_time[8];
+ __le64 pfc_max_duration_time[8];
+ __le64 pfc_weighted_duration_time[8];
+};
+
+/* hwrm_port_qstats_ext_pfc_adv_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_pfc_adv_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 pfc_adv_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_PFC_ADV_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 tx_pfc_adv_stat_host_addr;
+ __le64 rx_pfc_adv_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_pfc_adv_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_pfc_adv_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfc_adv_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
+};
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_ts_query_input (size:320b/40B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_ADV_STATS_SUPPORTED 0x20UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_fdrstat_input (size:192b/24B) */
+struct hwrm_port_phy_fdrstat_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 rsvd[2];
+ __le16 ops;
+ #define PORT_PHY_FDRSTAT_REQ_OPS_START 0x0UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_STOP 0x1UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_CLEAR 0x2UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_COUNTER 0x3UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_LAST PORT_PHY_FDRSTAT_REQ_OPS_COUNTER
+};
+
+/* hwrm_port_phy_fdrstat_output (size:3072b/384B) */
+struct hwrm_port_phy_fdrstat_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 start_time;
+ __le64 end_time;
+ __le64 cmic_start_time;
+ __le64 cmic_end_time;
+ __le64 accumulated_uncorrected_codewords;
+ __le64 accumulated_corrected_codewords;
+ __le64 accumulated_total_codewords;
+ __le64 accumulated_symbol_errors;
+ __le64 accumulated_codewords_err_s0;
+ __le64 accumulated_codewords_err_s1;
+ __le64 accumulated_codewords_err_s2;
+ __le64 accumulated_codewords_err_s3;
+ __le64 accumulated_codewords_err_s4;
+ __le64 accumulated_codewords_err_s5;
+ __le64 accumulated_codewords_err_s6;
+ __le64 accumulated_codewords_err_s7;
+ __le64 accumulated_codewords_err_s8;
+ __le64 accumulated_codewords_err_s9;
+ __le64 accumulated_codewords_err_s10;
+ __le64 accumulated_codewords_err_s11;
+ __le64 accumulated_codewords_err_s12;
+ __le64 accumulated_codewords_err_s13;
+ __le64 accumulated_codewords_err_s14;
+ __le64 accumulated_codewords_err_s15;
+ __le64 accumulated_codewords_err_s16;
+ __le64 uncorrected_codewords;
+ __le64 corrected_codewords;
+ __le64 total_codewords;
+ __le64 symbol_errors;
+ __le64 codewords_err_s0;
+ __le64 codewords_err_s1;
+ __le64 codewords_err_s2;
+ __le64 codewords_err_s3;
+ __le64 codewords_err_s4;
+ __le64 codewords_err_s5;
+ __le64 codewords_err_s6;
+ __le64 codewords_err_s7;
+ __le64 codewords_err_s8;
+ __le64 codewords_err_s9;
+ __le64 codewords_err_s10;
+ __le64 codewords_err_s11;
+ __le64 codewords_err_s12;
+ __le64 codewords_err_s13;
+ __le64 codewords_err_s14;
+ __le64 codewords_err_s15;
+ __le64 codewords_err_s16;
+ __le32 window_size;
+ __le16 unused_0[1];
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_fdrstat_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_fdrstat_cmd_err {
+ u8 code;
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED 0x1UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_LAST PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED
+ u8 unused_0[7];
+};
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
+};
+
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
+};
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
+ __le16 virtio_net_fid;
+ __le16 vnic_id;
+};
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
+ #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
+ __le16 queue_id;
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ __le32 raw_qp_id;
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNABLE_TO_GET_RSS_CFG 0x2UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_UNSUPPORTED 0x3UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_ERR 0x4UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_MODE_FAIL 0x5UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RING_GRP_TABLE_ALLOC_ERR 0x6UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_KEY_ALLOC_ERR 0x7UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_DMA_FAILED 0x8UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RX_RING_ALLOC_ERR 0x9UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CMPL_RING_ALLOC_ERR 0xaUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HW_SET_RSS_FAILED 0xbUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CTX_INVALID 0xcUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_INVALID 0xdUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID 0xeUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 max_bds;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:768b/96B) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ #define RING_ALLOC_REQ_ENABLES_DPI_VALID 0x2000UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ __le16 schq_id;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 steering_tag;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
+ __le64 cq_handle;
+ __le16 dpi;
+ __le16 unused_5[3];
+};
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:256b/32B) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
+ __le16 ring_id;
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
+ u8 l2_addr[6];
+ u8 num_vlans;
+ u8 t_num_vlans;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_MAX_VLAN_TAGS 0x2UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_VNIC_ID 0x3UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION 0x4UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_MAC 0x65UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_BC_MC_MAC 0x66UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_VNIC 0x67UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_PF_FID 0x68UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L2_CTXT_ID 0x69UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_CTXT_CFG 0x6aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_DATA_FLD 0x6bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_CFA_LAYOUT 0x6cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_CTXT_ALLOC_FAIL 0x6dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ROCE_FLOW_ERR 0x6eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_OWNER_FID 0x6fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_REF_CNT 0x70UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_FLOW_TYPE 0x71UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_IVLAN 0x72UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_MAX_VLAN_ID 0x73UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_TNL_REQ 0x74UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_ADDR 0x75UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_IVLAN 0x76UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR 0x77UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR_TYPE 0x78UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_T_L3_ADDR_TYPE 0x79UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DST_VNIC_ID 0x7aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VNI 0x7bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_DST_ID 0x7cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_FAIL_ROCE_L2_FLOW 0x7dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_NPAR_VLAN 0x7eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ATSP_ADD 0x7fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DFLT_VLAN_FAIL 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L3_TYPE 0x81UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW 0x82UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_cmd_err (size:64b/8B) */
+struct hwrm_tunnel_dst_port_alloc_cmd_err {
+ u8 code;
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_UNKNOWN 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_TUNNEL_ALLOC_ERR 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_ACCESS_DENIED 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_GET_PORT_FAILED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_PORT_NUM_ERR 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_CUSTOM_TNL_PORT_ERR 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_TUNNEL_QUERY_ERR 0x6UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_GRE_MODE_UNSUPPORTED 0x7UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_GRE_ALREADY_ALLOC 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_TUNNEL_TYPE_INVALID 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_UPAR_ERR 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_LAST TUNNEL_DST_PORT_ALLOC_CMD_ERR_UPAR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_UNKNOWN 0x0UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_INVALID_FID 0x65UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_INVALID_CTX_ID 0x66UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_INVALID_PAYLOAD 0x67UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_CTX_STAT_RETRIEVAL_FAIL 0x68UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_RES_NOT_ALLOCATED 0x69UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_LAST HWRM_STAT_COMMON_CMD_ERR_CODE_RES_NOT_ALLOCATED
+
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+};
+
+/* ctx_eng_stats (size:512b/64B) */
+struct ctx_eng_stats {
+ __le64 eng_bytes_in;
+ __le64 eng_bytes_out;
+ __le64 aux_bytes_in;
+ __le64 aux_bytes_out;
+ __le64 commands;
+ __le64 error_commands;
+ __le64 cce_engine_usage;
+ __le64 cdd_engine_usage;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
+ u8 unused_0;
+ __le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc_cmd_err (size:64b/8B) */
+struct hwrm_stat_ctx_alloc_cmd_err {
+ u8 code;
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_INVALID_FID 0x1UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_INVALID_FLAG 0x2UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_INVALID_DMA_ADDR 0x3UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_RES_NOT_AVAIL 0x4UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_RES_POOL_EXHAUSTED 0x5UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_CTX_ALLOC_FAIL 0x6UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_LAST STAT_CTX_ALLOC_CMD_ERR_CODE_CTX_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_cmd_err (size:64b/8B) */
+struct hwrm_stat_ctx_free_cmd_err {
+ u8 code;
+ #define STAT_CTX_FREE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_INVALID_CTX_ID 0x1UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_RES_DEALLOC_FAIL 0x2UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_CTX_FREE_FAIL 0x3UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_LAST STAT_CTX_FREE_CMD_ERR_CODE_CTX_FREE_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_eng_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */
+struct hwrm_stat_ctx_eng_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 eng_bytes_in;
+ __le64 eng_bytes_out;
+ __le64 aux_bytes_in;
+ __le64 aux_bytes_out;
+ __le64 commands;
+ __le64 error_commands;
+ __le64 cce_engine_usage;
+ __le64 cdd_engine_usage;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_cmd_err (size:64b/8B) */
+struct hwrm_pcie_qstats_cmd_err {
+ u8 code;
+ #define PCIE_QSTATS_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_LEGACY_INVALID_PF_ID 0x1UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_GENERIC_INVALID_EP_IDX 0x2UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_GENERIC_MEM_ALLOC_FAIL 0x3UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_LAST PCIE_QSTATS_CMD_ERR_CODE_GENERIC_MEM_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
+/* pcie_ctx_hw_stats_v2 (size:4544b/568B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+ __le32 pcie_rd_latency_histogram[12];
+ __le32 pcie_rd_latency_all_normal_count;
+ __le32 unused_2;
+};
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_stat_generic_qstats_cmd_err (size:64b/8B) */
+struct hwrm_stat_generic_qstats_cmd_err {
+ u8 code;
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_INVALID_EP_ID 0x1UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_INVALID_STAT_SIZE 0x2UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_INVALID_DMA_ADDR 0x3UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_HOST_NOT_ACTIVE 0x4UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_MEM_ALLOC_FAIL 0x5UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_LAST STAT_GENERIC_QSTATS_CMD_ERR_CODE_MEM_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
+};
+
+/* hwrm_stat_db_error_qstats_input (size:128b/16B) */
+struct hwrm_stat_db_error_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_stat_db_error_qstats_output (size:320b/40B) */
+struct hwrm_stat_db_error_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 tx_db_drop_invalid_qp_state;
+ __le32 rx_db_drop_invalid_rq_state;
+ __le32 tx_db_drop_format_error;
+ __le32 express_db_dropped_misc_error;
+ __le32 express_db_dropped_sq_overflow;
+ __le32 express_db_dropped_rq_overflow;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_query_roce_stats_input (size:256b/32B) */
+struct hwrm_stat_query_roce_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 roce_stat_size;
+ u8 unused_0[6];
+ __le64 roce_stat_host_addr;
+};
+
+/* hwrm_stat_query_roce_stats_output (size:128b/16B) */
+struct hwrm_stat_query_roce_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 roce_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* stat_query_roce_stats_data (size:2944b/368B) */
+struct stat_query_roce_stats_data {
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
+ __le64 xp_sq_overflow_err;
+ __le64 xp_rq_overflow_error;
+};
+
+/* hwrm_stat_query_roce_stats_ext_input (size:256b/32B) */
+struct hwrm_stat_query_roce_stats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 roce_stat_size;
+ u8 unused_0[6];
+ __le64 roce_stat_host_addr;
+};
+
+/* hwrm_stat_query_roce_stats_ext_output (size:128b/16B) */
+struct hwrm_stat_query_roce_stats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 roce_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* stat_query_roce_stats_ext_data (size:2240b/280B) */
+struct stat_query_roce_stats_ext_data {
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_send_req_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_send_req_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_ecn_marked_pkts;
+ __le64 tx_cnp_bytes;
+ __le64 rx_cnp_bytes;
+ __le64 seq_err_naks_rcvd;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 to_retransmit;
+ __le64 dup_req;
+ __le64 rx_dcn_payload_cut;
+ __le64 te_bypassed;
+ __le64 tx_dcn_cnp;
+ __le64 rx_dcn_cnp;
+ __le64 rx_payload_cut;
+ __le64 rx_payload_cut_ignored;
+ __le64 rx_dcn_cnp_ignored;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 host_idx;
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_get_time_input (size:128b/16B) */
+struct hwrm_fw_get_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_get_time_output (size:192b/24B) */
+struct hwrm_fw_get_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 year;
+ #define FW_GET_TIME_RESP_YEAR_UNKNOWN 0x0UL
+ #define FW_GET_TIME_RESP_YEAR_LAST FW_GET_TIME_RESP_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_GET_TIME_RESP_ZONE_UTC 0
+ #define FW_GET_TIME_RESP_ZONE_UNKNOWN 65535
+ #define FW_GET_TIME_RESP_ZONE_LAST FW_GET_TIME_RESP_ZONE_UNKNOWN
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS 0x190UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS
+ __le16 len;
+ u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_ets (size:256b/32B) */
+struct hwrm_struct_data_dcbx_ets {
+ u8 destination;
+ #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION 0x1UL
+ #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION 0x2UL
+ #define STRUCT_DATA_DCBX_ETS_DESTINATION_LAST STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION
+ u8 max_tcs;
+ __le16 unused1;
+ u8 pri0_to_tc_map;
+ u8 pri1_to_tc_map;
+ u8 pri2_to_tc_map;
+ u8 pri3_to_tc_map;
+ u8 pri4_to_tc_map;
+ u8 pri5_to_tc_map;
+ u8 pri6_to_tc_map;
+ u8 pri7_to_tc_map;
+ u8 tc0_to_bw_map;
+ u8 tc1_to_bw_map;
+ u8 tc2_to_bw_map;
+ u8 tc3_to_bw_map;
+ u8 tc4_to_bw_map;
+ u8 tc5_to_bw_map;
+ u8 tc6_to_bw_map;
+ u8 tc7_to_bw_map;
+ u8 tc0_to_tsa_map;
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP 0x0UL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS 0x1UL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS 0x2UL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_LAST STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC
+ u8 tc1_to_tsa_map;
+ u8 tc2_to_tsa_map;
+ u8 tc3_to_tsa_map;
+ u8 tc4_to_tsa_map;
+ u8 tc5_to_tsa_map;
+ u8 tc6_to_tsa_map;
+ u8 tc7_to_tsa_map;
+ u8 unused_0[4];
+};
+
+/* hwrm_struct_data_dcbx_pfc (size:64b/8B) */
+struct hwrm_struct_data_dcbx_pfc {
+ u8 pfc_priority_bitmap;
+ u8 max_pfc_tcs;
+ u8 mbc;
+ u8 unused_0[5];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_struct_data_dcbx_feature_state (size:64b/8B) */
+struct hwrm_struct_data_dcbx_feature_state {
+ u8 dcbx_mode;
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE
+ u8 ets_state;
+ u8 pfc_state;
+ u8 app_state;
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS
+ u8 unused[3];
+ u8 resets;
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS 0x1UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC 0x2UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP 0x4UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_LAST STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE
+};
+
+/* hwrm_struct_data_lldp (size:64b/8B) */
+struct hwrm_struct_data_lldp {
+ u8 admin_state;
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_TX 0x1UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_RX 0x2UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE 0x3UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_LAST STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE
+ u8 port_description_state;
+ #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_LAST STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE
+ u8 system_name_state;
+ #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE
+ u8 system_desc_state;
+ #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE
+ u8 system_cap_state;
+ #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE
+ u8 mgmt_addr_state;
+ #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_LAST STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE
+ u8 async_event_notification_state;
+ #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_LAST STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE
+ u8 unused_0;
+};
+
+/* hwrm_struct_data_lldp_generic (size:2112b/264B) */
+struct hwrm_struct_data_lldp_generic {
+ u8 tlv_type;
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS 0x1UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT 0x2UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME 0x3UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME 0x5UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_LAST STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION
+ u8 subtype;
+ u8 length;
+ u8 unused1[5];
+ __le32 tlv_value[64];
+};
+
+/* hwrm_struct_data_lldp_device (size:1472b/184B) */
+struct hwrm_struct_data_lldp_device {
+ __le16 ttl;
+ u8 mgmt_addr_len;
+ u8 mgmt_addr_type;
+ u8 unused_3[4];
+ __le32 mgmt_addr[8];
+ __le32 system_caps;
+ u8 intf_num_type;
+ u8 mgmt_addr_oid_length;
+ u8 unused_4[2];
+ __le32 intf_num;
+ u8 unused_5[4];
+ __le32 mgmt_addr_oid[32];
+};
+
+/* hwrm_struct_data_port_description (size:64b/8B) */
+struct hwrm_struct_data_port_description {
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_struct_data_rss_v2 (size:128b/16B) */
+struct hwrm_struct_data_rss_v2 {
+ __le16 flags;
+ #define STRUCT_DATA_RSS_V2_FLAGS_HASH_VALID 0x1UL
+ __le16 rss_ctx_id;
+ __le16 num_ring_groups;
+ __le16 hash_type;
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV4 0x1UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV4 0x2UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV4 0x4UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV6 0x8UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV6 0x10UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV6 0x20UL
+ __le64 hash_key_ring_group_ids;
+};
+
+/* hwrm_struct_data_power_information (size:192b/24B) */
+struct hwrm_struct_data_power_information {
+ __le32 bkup_power_info_ver;
+ __le32 platform_bkup_power_count;
+ __le32 load_milli_watt;
+ __le32 bkup_time_milli_seconds;
+ __le32 bkup_power_status;
+ __le32 bkup_power_charge_time;
+};
+
+/* hwrm_struct_data_peer_mmap (size:1600b/200B) */
+struct hwrm_struct_data_peer_mmap {
+ __le16 fid;
+ __le16 count;
+ __le32 unused_0;
+ __le64 hpa_0;
+ __le64 gpa_0;
+ __le64 size_0;
+ __le64 hpa_1;
+ __le64 gpa_1;
+ __le64 size_1;
+ __le64 hpa_2;
+ __le64 gpa_2;
+ __le64 size_2;
+ __le64 hpa_3;
+ __le64 gpa_3;
+ __le64 size_3;
+ __le64 hpa_4;
+ __le64 gpa_4;
+ __le64 size_4;
+ __le64 hpa_5;
+ __le64 gpa_5;
+ __le64 size_5;
+ __le64 hpa_6;
+ __le64 gpa_6;
+ __le64 size_6;
+ __le64 hpa_7;
+ __le64 gpa_7;
+ __le64 size_7;
+};
+
+/* hwrm_struct_data_peer_mmap_v2 (size:1792b/224B) */
+struct hwrm_struct_data_peer_mmap_v2 {
+ __le16 fid;
+ __le16 count;
+ __le32 unused_0;
+ __le64 hpa_0;
+ __le64 gpa_0;
+ __le64 size_0;
+ __le64 hpa_1;
+ __le64 gpa_1;
+ __le64 size_1;
+ __le64 hpa_2;
+ __le64 gpa_2;
+ __le64 size_2;
+ __le64 hpa_3;
+ __le64 gpa_3;
+ __le64 size_3;
+ __le64 hpa_4;
+ __le64 gpa_4;
+ __le64 size_4;
+ __le64 hpa_5;
+ __le64 gpa_5;
+ __le64 size_5;
+ __le64 hpa_6;
+ __le64 gpa_6;
+ __le64 size_6;
+ __le64 hpa_7;
+ __le64 gpa_7;
+ __le64 size_7;
+ __le16 ds_port;
+ __le16 auth_status;
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_SUCCESS 0x0UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_NONCE_MIS 0xdUL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_SIG_INVALID 0xeUL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_AUTH_FAILED 0xfUL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_CERT_N_VAL 0x10UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_INVA_CMD_CODE 0x11UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_INVALID_HDR 0x12UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_LAST STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_INVALID_HDR
+ __le32 unused_2;
+ __le16 status[8];
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_SUCCESS 0x0UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_HDR_VER_MISMATCH 0x1UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_PKT_SOM_MISSING 0x2UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_OUT_OF_ORDER_PKTS 0x3UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ALREADY_ADDED 0x4UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ALREADY_DELETED 0x5UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NOT_ADDED 0x6UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NOT_DELETED 0x7UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NO_EP_CNTX 0x8UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_INVALID_BUF_SZ 0xaUL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ALLOC_MEM_FAILED 0xbUL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ENTRY_CNT_ERR 0xcUL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NO_RESPONSE 0x13UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_IPC_ERROR 0x14UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_LAST STRUCT_DATA_PEER_MMAP_V2_STATUS_IPC_ERROR
+};
+
+/* hwrm_struct_data_msix_per_vf (size:320b/40B) */
+struct hwrm_struct_data_msix_per_vf {
+ __le16 pf_id;
+ __le16 count;
+ __le32 unused_0;
+ __le16 start_vf_0;
+ __le16 msix_0;
+ __le16 start_vf_1;
+ __le16 msix_1;
+ __le16 start_vf_2;
+ __le16 msix_2;
+ __le16 start_vf_3;
+ __le16 msix_3;
+ __le16 start_vf_4;
+ __le16 msix_4;
+ __le16 start_vf_5;
+ __le16 msix_5;
+ __le16 start_vf_6;
+ __le16 msix_6;
+ __le16 start_vf_7;
+ __le16 msix_7;
+};
+
+/* hwrm_struct_data_dbg_token_claims (size:128b/16B) */
+struct hwrm_struct_data_dbg_token_claims {
+ __s32 claim_number;
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_EXP 4
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_CTI 7
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_AUTH_ID -67000
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_PERSIST -67001
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_SDB_EN -68000
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_DIAGRW_EN -68003
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_FW_CLI -68100
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_LAST STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_FW_CLI
+ __le16 data_type;
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_1_BYTE 0x1UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_2_BYTES 0x2UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_4_BYTES 0x3UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_8_BYTES 0x4UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_BOOLEAN 0x5UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_LAST STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_BOOLEAN
+ __le16 unused_0;
+ u8 claim_data[8];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_ALREADY_ADDED 0x4UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG 0x5UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_SUPPORTED 0x320UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE 0x321UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE
+ u8 count;
+ u8 unused_0;
+};
+
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_ipc_msg_input (size:320b/40B) */
+struct hwrm_fw_ipc_msg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FW_IPC_MSG_REQ_ENABLES_COMMAND_ID 0x1UL
+ #define FW_IPC_MSG_REQ_ENABLES_SRC_PROCESSOR 0x2UL
+ #define FW_IPC_MSG_REQ_ENABLES_DATA_OFFSET 0x4UL
+ #define FW_IPC_MSG_REQ_ENABLES_LENGTH 0x8UL
+ __le16 command_id;
+ #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG 0x1UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_MHB_HOST 0x2UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_DRVR_VERSION 0x3UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_LOG2H 0x4UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_LAST FW_IPC_MSG_REQ_COMMAND_ID_LOG2H
+ u8 src_processor;
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_CFW 0x1UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_BONO 0x2UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_APE 0x3UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG 0x4UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_LAST FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG
+ u8 unused_0;
+ __le32 data_offset;
+ __le16 length;
+ u8 unused_1[2];
+ __le64 opaque;
+};
+
+/* hwrm_fw_ipc_msg_output (size:256b/32B) */
+struct hwrm_fw_ipc_msg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 msg_data_1;
+ __le32 msg_data_2;
+ __le64 reserved64;
+ u8 reserved48[7];
+ u8 valid;
+};
+
+/* hwrm_fw_ipc_mailbox_input (size:256b/32B) */
+struct hwrm_fw_ipc_mailbox_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ u8 unused;
+ u8 event_id;
+ u8 port_id;
+ __le32 event_data1;
+ __le32 event_data2;
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_ipc_mailbox_output (size:128b/16B) */
+struct hwrm_fw_ipc_mailbox_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_ipc_mailbox_cmd_err (size:64b/8B) */
+struct hwrm_fw_ipc_mailbox_cmd_err {
+ u8 code;
+ #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_IPC_MAILBOX_CMD_ERR_CODE_LAST FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_ecn_cfg_input (size:192b/24B) */
+struct hwrm_fw_ecn_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define FW_ECN_CFG_REQ_FLAGS_ENABLE_ECN 0x1UL
+ u8 unused_0[6];
+};
+
+/* hwrm_fw_ecn_cfg_output (size:128b/16B) */
+struct hwrm_fw_ecn_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_ecn_qcfg_input (size:128b/16B) */
+struct hwrm_fw_ecn_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_ecn_qcfg_output (size:128b/16B) */
+struct hwrm_fw_ecn_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define FW_ECN_QCFG_RESP_FLAGS_ENABLE_ECN 0x1UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_health_check_input (size:128b/16B) */
+struct hwrm_fw_health_check_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_health_check_output (size:128b/16B) */
+struct hwrm_fw_health_check_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 fw_status;
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_BOOTED 0x1UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH 0x2UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_BOOTED 0x4UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH 0x8UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_BOOTED 0x10UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH 0x20UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SECOND_RT 0x40UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_FASTBOOTED 0x80UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_BOOTED 0x100UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_MISMATCH 0x200UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_MBR_CORRUPT 0x400UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CFG_MISMATCH 0x800UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_FRU_MISMATCH 0x1000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT2_BOOTED 0x2000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT2_MISMATCH 0x4000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_GXRT_BOOTED 0x8000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_GXRT_MISMATCH 0x10000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT2_BOOTED 0x20000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT2_MISMATCH 0x40000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_ART_MISMATCH 0x80000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_ART_BOOTED 0x100000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_MPRT_FW 0x3UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_RERT_FW 0x4UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_AUXRT_FW 0x5UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_AUXRT_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_MPRT_FW 0x3UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_RERT_FW 0x4UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_AUXRT_FW 0x5UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_AUXRT_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_sync_input (size:192b/24B) */
+struct hwrm_fw_sync_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 sync_action;
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI 0x1UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT 0x2UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT 0x4UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_DIR_HDR 0x8UL
+ #define FW_SYNC_REQ_SYNC_ACTION_WRITE_MBR 0x10UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CFG 0x20UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_FRU 0x40UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT2 0x80UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_GXRT 0x100UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT2 0x200UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_ART 0x400UL
+ #define FW_SYNC_REQ_SYNC_ACTION_ACTION 0x80000000UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_sync_output (size:128b/16B) */
+struct hwrm_fw_sync_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 sync_status;
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_MASK 0xffUL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SFT 0
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SUCCESS 0x0UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_IN_PROGRESS 0x1UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_TIMEOUT 0x2UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL 0x3UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_LAST FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL
+ #define FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR 0x40000000UL
+ #define FW_SYNC_RESP_SYNC_STATUS_SYNC_COMPLETE 0x80000000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_fw_sync_cmd_err (size:64b/8B) */
+struct hwrm_fw_sync_cmd_err {
+ u8 code;
+ #define FW_SYNC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SYNC_CMD_ERR_CODE_INVALID_LEN 0x1UL
+ #define FW_SYNC_CMD_ERR_CODE_INVALID_CRID 0x2UL
+ #define FW_SYNC_CMD_ERR_CODE_NO_WORKSPACE_MEM 0x2UL
+ #define FW_SYNC_CMD_ERR_CODE_SYNC_FAILED 0x3UL
+ #define FW_SYNC_CMD_ERR_CODE_LAST FW_SYNC_CMD_ERR_CODE_SYNC_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_state_qcaps_input (size:128b/16B) */
+struct hwrm_fw_state_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_state_qcaps_output (size:256b/32B) */
+struct hwrm_fw_state_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 backup_memory;
+ __le32 quiesce_timeout;
+ __le32 fw_status_blackout;
+ __le32 fw_status_max_wait;
+ u8 unused_0[4];
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_fw_state_quiesce_input (size:192b/24B) */
+struct hwrm_fw_state_quiesce_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define FW_STATE_QUIESCE_REQ_FLAGS_ERROR_RECOVERY 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_state_quiesce_output (size:192b/24B) */
+struct hwrm_fw_state_quiesce_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 quiesce_status;
+ #define FW_STATE_QUIESCE_RESP_QUIESCE_STATUS_INITIATED 0x80000000UL
+ u8 unused_0[4];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_fw_state_unquiesce_input (size:128b/16B) */
+struct hwrm_fw_state_unquiesce_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_state_unquiesce_output (size:192b/24B) */
+struct hwrm_fw_state_unquiesce_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unquiesce_status;
+ #define FW_STATE_UNQUIESCE_RESP_UNQUIESCE_STATUS_COMPLETE 0x80000000UL
+ u8 unused_0[4];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_fw_state_backup_input (size:256b/32B) */
+struct hwrm_fw_state_backup_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 backup_pg_size_backup_lvl;
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_MASK 0xfUL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_SFT 0
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_0 0x0UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_1 0x1UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2 0x2UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LAST FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_MASK 0xf0UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_SFT 4
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_LAST FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G
+ u8 unused_0[7];
+ __le64 backup_page_dir;
+};
+
+/* hwrm_fw_state_backup_output (size:192b/24B) */
+struct hwrm_fw_state_backup_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 backup_status;
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_MASK 0xffUL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SFT 0
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SUCCESS 0x0UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_QUIESCE_ERROR 0x1UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL 0x3UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_LAST FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_RESET_REQUIRED 0x40000000UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_COMPLETE 0x80000000UL
+ u8 unused_0[4];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_fw_state_restore_input (size:256b/32B) */
+struct hwrm_fw_state_restore_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 restore_pg_size_restore_lvl;
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_MASK 0xfUL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_SFT 0
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_0 0x0UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_1 0x1UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2 0x2UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LAST FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_MASK 0xf0UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_SFT 4
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_LAST FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G
+ u8 unused_0[7];
+ __le64 restore_page_dir;
+};
+
+/* hwrm_fw_state_restore_output (size:128b/16B) */
+struct hwrm_fw_state_restore_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 restore_status;
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_MASK 0xffUL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SFT 0
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SUCCESS 0x0UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_GENERAL 0x1UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_FORMAT_PARSE 0x2UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK 0x3UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_LAST FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_FAILURE_ROLLBACK_COMPLETED 0x40000000UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_COMPLETE 0x80000000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_fw_secure_cfg_input (size:256b/32B) */
+struct hwrm_fw_secure_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 enable;
+ #define FW_SECURE_CFG_REQ_ENABLE_NVRAM 0x1UL
+ #define FW_SECURE_CFG_REQ_ENABLE_GRC 0x2UL
+ #define FW_SECURE_CFG_REQ_ENABLE_UART 0x3UL
+ #define FW_SECURE_CFG_REQ_ENABLE_LAST FW_SECURE_CFG_REQ_ENABLE_UART
+ u8 config_mode;
+ #define FW_SECURE_CFG_REQ_CONFIG_MODE_PERSISTENT 0x1UL
+ #define FW_SECURE_CFG_REQ_CONFIG_MODE_RUNTIME 0x2UL
+ u8 nvm_lock_mode;
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_NONE 0x0UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_PARTIAL 0x1UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_FULL 0x2UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_CHIP 0x3UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_LAST FW_SECURE_CFG_REQ_NVM_LOCK_MODE_CHIP
+ u8 nvm_partial_lock_mask;
+ #define FW_SECURE_CFG_REQ_NVM_PARTIAL_LOCK_MASK_EXE 0x1UL
+ #define FW_SECURE_CFG_REQ_NVM_PARTIAL_LOCK_MASK_CFG 0x2UL
+ u8 grc_ctrl;
+ #define FW_SECURE_CFG_REQ_GRC_CTRL_RO 0x0UL
+ #define FW_SECURE_CFG_REQ_GRC_CTRL_RW 0x1UL
+ #define FW_SECURE_CFG_REQ_GRC_CTRL_LAST FW_SECURE_CFG_REQ_GRC_CTRL_RW
+ u8 uart_ctrl;
+ #define FW_SECURE_CFG_REQ_UART_CTRL_DISABLE 0x0UL
+ #define FW_SECURE_CFG_REQ_UART_CTRL_ENABLE 0x1UL
+ #define FW_SECURE_CFG_REQ_UART_CTRL_LAST FW_SECURE_CFG_REQ_UART_CTRL_ENABLE
+ u8 unused_0[2];
+ __le32 unused_1[2];
+};
+
+/* hwrm_fw_secure_cfg_output (size:128b/16B) */
+struct hwrm_fw_secure_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1792b/224B) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[48];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 phy_temp;
+ u8 om_temp;
+ u8 flags;
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_reg_power_query_input (size:128b/16B) */
+struct hwrm_reg_power_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_reg_power_query_output (size:192b/24B) */
+struct hwrm_reg_power_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define REG_POWER_QUERY_RESP_FLAGS_IN_POWER_AVAILABLE 0x1UL
+ #define REG_POWER_QUERY_RESP_FLAGS_OUT_POWER_AVAILABLE 0x2UL
+ __le32 in_power_mw;
+ __le32 out_power_mw;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_core_frequency_query_input (size:128b/16B) */
+struct hwrm_core_frequency_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_core_frequency_query_output (size:128b/16B) */
+struct hwrm_core_frequency_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 core_frequency_hz;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_reg_power_histogram_input (size:192b/24B) */
+struct hwrm_reg_power_histogram_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define REG_POWER_HISTOGRAM_REQ_FLAGS_CLEAR_HISTOGRAM 0x1UL
+ __le32 unused_0;
+};
+
+/* hwrm_reg_power_histogram_output (size:1088b/136B) */
+struct hwrm_reg_power_histogram_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT 0x1UL
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_INPUT 0x0UL
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT 0x1UL
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_LAST REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT
+ u8 unused_0[2];
+ __le32 sampling_period;
+ __le64 sample_count;
+ __le32 power_hist[26];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+#define BUCKET_NO_DATA_FOR_SAMPLE 0x0UL
+#define BUCKET_RANGE_8W_OR_LESS 0x1UL
+#define BUCKET_RANGE_8W_TO_9W 0x2UL
+#define BUCKET_RANGE_9W_TO_10W 0x3UL
+#define BUCKET_RANGE_10W_TO_11W 0x4UL
+#define BUCKET_RANGE_11W_TO_12W 0x5UL
+#define BUCKET_RANGE_12W_TO_13W 0x6UL
+#define BUCKET_RANGE_13W_TO_14W 0x7UL
+#define BUCKET_RANGE_14W_TO_15W 0x8UL
+#define BUCKET_RANGE_15W_TO_16W 0x9UL
+#define BUCKET_RANGE_16W_TO_18W 0xaUL
+#define BUCKET_RANGE_18W_TO_20W 0xbUL
+#define BUCKET_RANGE_20W_TO_22W 0xcUL
+#define BUCKET_RANGE_22W_TO_24W 0xdUL
+#define BUCKET_RANGE_24W_TO_26W 0xeUL
+#define BUCKET_RANGE_26W_TO_28W 0xfUL
+#define BUCKET_RANGE_28W_TO_30W 0x10UL
+#define BUCKET_RANGE_30W_TO_32W 0x11UL
+#define BUCKET_RANGE_32W_TO_34W 0x12UL
+#define BUCKET_RANGE_34W_TO_36W 0x13UL
+#define BUCKET_RANGE_36W_TO_38W 0x14UL
+#define BUCKET_RANGE_38W_TO_40W 0x15UL
+#define BUCKET_RANGE_40W_TO_42W 0x16UL
+#define BUCKET_RANGE_42W_TO_44W 0x17UL
+#define BUCKET_RANGE_44W_TO_50W 0x18UL
+#define BUCKET_RANGE_OVER_50W 0x19UL
+#define BUCKET_LAST BUCKET_RANGE_OVER_50W
+
+/* hwrm_monitor_pax_histogram_start_input (size:448b/56B) */
+struct hwrm_monitor_pax_histogram_start_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_READ 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_WRITE 0x0UL
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_LAST MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_WRITE
+ u8 unused_0[4];
+ __le64 start_addr;
+ __le64 end_addr;
+ __le32 axuser_value;
+ __le32 axuser_mask;
+ u8 lsb_sel;
+ u8 unused_1[7];
+};
+
+/* hwrm_monitor_pax_histogram_start_output (size:192b/24B) */
+struct hwrm_monitor_pax_histogram_start_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 timestamp;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define PAX_HISTOGRAM_BUCKET0 0x0UL
+#define PAX_HISTOGRAM_BUCKET1 0x1UL
+#define PAX_HISTOGRAM_BUCKET2 0x2UL
+#define PAX_HISTOGRAM_BUCKET3 0x3UL
+#define PAX_HISTOGRAM_BUCKET4 0x4UL
+#define PAX_HISTOGRAM_BUCKET5 0x5UL
+#define PAX_HISTOGRAM_BUCKET6 0x6UL
+#define PAX_HISTOGRAM_BUCKET7 0x7UL
+#define PAX_HISTOGRAM_BUCKET8 0x8UL
+#define PAX_HISTOGRAM_BUCKET9 0x9UL
+#define PAX_HISTOGRAM_BUCKET10 0xaUL
+#define PAX_HISTOGRAM_BUCKET11 0xbUL
+#define PAX_HISTOGRAM_BUCKET12 0xcUL
+#define PAX_HISTOGRAM_BUCKET13 0xdUL
+#define PAX_HISTOGRAM_BUCKET14 0xeUL
+#define PAX_HISTOGRAM_BUCKET15 0xfUL
+#define PAX_HISTOGRAM_MIN_LATENCY 0x10UL
+#define PAX_HISTOGRAM_MAX_LATENCY 0x11UL
+#define PAX_HISTOGRAM_EVENT_COUNTER 0x12UL
+#define PAX_HISTOGRAM_ACCUMULATOR 0x13UL
+#define PAX_HISTOGRAM_LAST PAX_HISTOGRAM_ACCUMULATOR
+
+/* hwrm_monitor_pax_histogram_collect_input (size:192b/24B) */
+struct hwrm_monitor_pax_histogram_collect_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_READ 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_WRITE 0x0UL
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_LAST MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_WRITE
+ u8 unused_0[4];
+};
+
+/* hwrm_monitor_pax_histogram_collect_output (size:2752b/344B) */
+struct hwrm_monitor_pax_histogram_collect_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 timestamp;
+ __le64 histogram_data_mst0[20];
+ __le64 histogram_data_mst1[20];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
+struct hwrm_wol_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* hwrm_wol_filter_free_output (size:128b/16B) */
+struct hwrm_wol_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[2];
+ __le32 segment_len;
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+#define HWRM_NVM_COMMON_CMD_ERR_UNKNOWN 0x0UL
+#define HWRM_NVM_COMMON_CMD_ERR_ACCESS_DENIED 0x65UL
+#define HWRM_NVM_COMMON_CMD_ERR_FW_BUSY 0x66UL
+#define HWRM_NVM_COMMON_CMD_ERR_FW_ABORT 0x67UL
+#define HWRM_NVM_COMMON_CMD_ERR_FW_UPGRD_IN_PROG 0x68UL
+#define HWRM_NVM_COMMON_CMD_ERR_NO_WORKSPACE_MEM 0x69UL
+#define HWRM_NVM_COMMON_CMD_ERR_RESOURCE_LOCKED 0x6aUL
+#define HWRM_NVM_COMMON_CMD_ERR_FILE_OPEN_FAILED 0x6bUL
+#define HWRM_NVM_COMMON_CMD_ERR_DMA_FAILED 0x6cUL
+#define HWRM_NVM_COMMON_CMD_ERR_LAST HWRM_NVM_COMMON_CMD_ERR_DMA_FAILED
+
+/* hwrm_nvm_raw_write_blk_input (size:320b/40B) */
+struct hwrm_nvm_raw_write_blk_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le32 dest_addr;
+ __le32 len;
+ u8 flags;
+ #define NVM_RAW_WRITE_BLK_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
+struct hwrm_nvm_raw_write_blk_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk_cmd_err (size:64b/8B) */
+struct hwrm_nvm_raw_write_blk_cmd_err {
+ u8 code;
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_INVALID_LEN 0x1UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_INVALID_ADDR 0x2UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_LAST NVM_RAW_WRITE_BLK_CMD_ERR_CODE_WRITE_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_cmd_err (size:64b/8B) */
+struct hwrm_nvm_read_cmd_err {
+ u8 code;
+ #define NVM_READ_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_READ_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_READ_CMD_ERR_CODE_INVALID_LEN 0x2UL
+ #define NVM_READ_CMD_ERR_CODE_READ_FAILED 0x3UL
+ #define NVM_READ_CMD_ERR_CODE_FRU_CRC_CHECK_FAILED 0x4UL
+ #define NVM_READ_CMD_ERR_CODE_LAST NVM_READ_CMD_ERR_CODE_FRU_CRC_CHECK_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_raw_dump_input (size:320b/40B) */
+struct hwrm_nvm_raw_dump_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 offset;
+ __le32 len;
+ u8 flags;
+ #define NVM_RAW_DUMP_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_raw_dump_output (size:128b/16B) */
+struct hwrm_nvm_raw_dump_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_dump_cmd_err (size:64b/8B) */
+struct hwrm_nvm_raw_dump_cmd_err {
+ u8 code;
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_INVALID_LEN 0x1UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_INVALID_OFFSET 0x2UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_VALIDATE_FAILED 0x3UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_LAST NVM_RAW_DUMP_CMD_ERR_CODE_VALIDATE_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_dir_entries_cmd_err {
+ u8 code;
+ #define NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_GET_DIR_LIST_FAILED 0x1UL
+ #define NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_LAST NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_GET_DIR_LIST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
+ __le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_WRITE_CMD_ERR_CODE_REQD_ERASE_FAILED 0x4UL
+ #define NVM_WRITE_CMD_ERR_CODE_VERIFY_FAILED 0x5UL
+ #define NVM_WRITE_CMD_ERR_CODE_INVALID_HEADER 0x6UL
+ #define NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED 0x7UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_modify_cmd_err (size:64b/8B) */
+struct hwrm_nvm_modify_cmd_err {
+ u8 code;
+ #define NVM_MODIFY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_MODIFY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_MODIFY_CMD_ERR_CODE_INVALID_OFFSET 0x2UL
+ #define NVM_MODIFY_CMD_ERR_CODE_ITEM_TOO_BIG_ERR 0x3UL
+ #define NVM_MODIFY_CMD_ERR_CODE_BLK_BOUNDARY_ERR 0x4UL
+ #define NVM_MODIFY_CMD_ERR_CODE_SECURITY_VIOLATION 0x5UL
+ #define NVM_MODIFY_CMD_ERR_CODE_WRITE_FAILED 0x6UL
+ #define NVM_MODIFY_CMD_ERR_CODE_ERASE_SECTORS_FAILED 0x7UL
+ #define NVM_MODIFY_CMD_ERR_CODE_COMPUTE_CRC_FAILED 0x8UL
+ #define NVM_MODIFY_CMD_ERR_CODE_LAST NVM_MODIFY_CMD_ERR_CODE_COMPUTE_CRC_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_cmd_err (size:64b/8B) */
+struct hwrm_nvm_find_dir_entry_cmd_err {
+ u8 code;
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_MGMT_FW_DISABLED 0x1UL
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x2UL
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_LAST NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_cmd_err (size:64b/8B) */
+struct hwrm_nvm_erase_dir_entry_cmd_err {
+ u8 code;
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION 0x5UL
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_LAST NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_output (size:832b/104B) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
+ __le16 art_fw_major;
+ __le16 art_fw_minor;
+ __le16 art_fw_build;
+ __le16 art_fw_patch;
+ u8 security_soc_fw_major;
+ u8 security_soc_fw_minor;
+ u8 security_soc_fw_build;
+ u8 security_soc_fw_patch;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_cmd_err (size:64b/8B) */
+struct hwrm_nvm_mod_dir_entry_cmd_err {
+ u8 code;
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION 0x5UL
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_LAST NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_DEFRAG_FAILED 0x5UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x6UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_flush_input (size:128b/16B) */
+struct hwrm_nvm_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_flush_output (size:128b/16B) */
+struct hwrm_nvm_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
+struct hwrm_nvm_flush_cmd_err {
+ u8 code;
+ #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_FLUSH_CMD_ERR_CODE_FAIL 0x1UL
+ #define NVM_FLUSH_CMD_ERR_CODE_LAST NVM_FLUSH_CMD_ERR_CODE_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_VALIDATE_OPT_VALUE 0x2UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 flags;
+ #define NVM_GET_VARIABLE_RESP_FLAGS_VALIDATE_OPT_VALUE 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x4UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x5UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x6UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x7UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x8UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_defrag_input (size:192b/24B) */
+struct hwrm_nvm_defrag_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define NVM_DEFRAG_REQ_FLAGS_DEFRAG 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_nvm_defrag_output (size:128b/16B) */
+struct hwrm_nvm_defrag_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */
+struct hwrm_nvm_defrag_cmd_err {
+ u8 code;
+ #define NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_FAIL 0x1UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL 0x2UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_LAST NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_vpd_field_info_input (size:192b/24B) */
+struct hwrm_nvm_get_vpd_field_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tag_id[2];
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_get_vpd_field_info_output (size:2176b/272B) */
+struct hwrm_nvm_get_vpd_field_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 data[256];
+ __le16 data_len;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_vpd_field_info_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_vpd_field_info_cmd_err {
+ u8 code;
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_NOT_CACHED 0x1UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_VPD_PARSE_FAILED 0x2UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_INVALID_TAG_ID 0x3UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_LAST NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_INVALID_TAG_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_vpd_field_info_input (size:256b/32B) */
+struct hwrm_nvm_set_vpd_field_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ u8 tag_id[2];
+ __le16 data_len;
+ u8 unused_0[4];
+};
+
+/* hwrm_nvm_set_vpd_field_info_output (size:128b/16B) */
+struct hwrm_nvm_set_vpd_field_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_profile_input (size:256b/32B) */
+struct hwrm_nvm_set_profile_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 option_count;
+ u8 flags;
+ #define NVM_SET_PROFILE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_PROFILE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x3eUL
+ #define NVM_SET_PROFILE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 1
+ #define NVM_SET_PROFILE_REQ_FLAGS_VALIDATE_ONLY 0x40UL
+ #define NVM_SET_PROFILE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 profile_type;
+ #define NVM_SET_PROFILE_REQ_PROFILE_TYPE_NONE 0x0UL
+ #define NVM_SET_PROFILE_REQ_PROFILE_TYPE_EROCE 0x1UL
+ #define NVM_SET_PROFILE_REQ_PROFILE_TYPE_LAST NVM_SET_PROFILE_REQ_PROFILE_TYPE_EROCE
+ u8 unused_2[3];
+};
+
+/* hwrm_nvm_set_profile_output (size:128b/16B) */
+struct hwrm_nvm_set_profile_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_profile_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_profile_cmd_err {
+ u8 code;
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_PROVISION_ERROR 0xaUL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_LAST NVM_SET_PROFILE_CMD_ERR_CODE_PROVISION_ERROR
+ u8 err_index;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_set_profile_sb (size:128b/16B) */
+struct hwrm_nvm_set_profile_sb {
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_PROFILE_SB_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_PROFILE_SB_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_PROFILE_SB_OPTION_NUM_LAST NVM_SET_PROFILE_SB_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_PROFILE_SB_FLAGS_FLAGS_UNUSED_0_MASK 0xffUL
+ #define NVM_SET_PROFILE_SB_FLAGS_FLAGS_UNUSED_0_SFT 0
+ u8 unused_0;
+};
+
+/* hwrm_selftest_qlist_input (size:128b/16B) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_irq_output (size:128b/16B) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_CQ_REASSIGN (0xcUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
+#endif /* _BNGE_HSI_H_ */
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
index 47c34990cf23..74a6bf278d88 100644
--- a/include/linux/bnxt/hsi.h
+++ b/include/linux/bnxt/hsi.h
@@ -187,6 +187,8 @@ struct cmd_nums {
#define HWRM_RING_QCFG 0x63UL
#define HWRM_RESERVED5 0x64UL
#define HWRM_RESERVED6 0x65UL
+ #define HWRM_PORT_ADSM_QSTATES 0x66UL
+ #define HWRM_PORT_EVENTS_LOG 0x67UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
@@ -235,7 +237,7 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
#define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_QSTATS_EXT_PFC_ADV 0xbaUL
#define HWRM_PORT_TX_FIR_CFG 0xbbUL
#define HWRM_PORT_TX_FIR_QCFG 0xbcUL
#define HWRM_PORT_ECN_QSTATS 0xbdUL
@@ -271,6 +273,7 @@ struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_PORT_CFG 0xdcUL
#define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_DSC_COLLECTION 0xdeUL
#define HWRM_PORT_MAC_QCAPS 0xdfUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
@@ -280,6 +283,7 @@ struct cmd_nums {
#define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
#define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
#define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_MONITOR_DEVICE_HEALTH 0xe8UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -640,8 +644,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 133
-#define HWRM_VERSION_STR "1.10.3.133"
+#define HWRM_VERSION_RSVD 151
+#define HWRM_VERSION_STR "1.10.3.151"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1416,7 +1420,8 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_L2_TX_RING 0xbUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_L2_TX_RING
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1934,7 +1939,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
#define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
#define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_STATS_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_DATA_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_CMPL_TS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_ST_SUPPORTED 0x2000UL
__le16 max_roce_vfs;
__le16 max_crypto_rx_flow_filters;
u8 unused_3[3];
@@ -4441,7 +4448,10 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_224 0x7d3UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_224 0xfa3UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_224 0x1f43UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_224
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4457,7 +4467,11 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
- u8 unused_2[6];
+ __le16 auto_link_speeds2_ext_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_EXT_MASK_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_EXT_MASK_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_EXT_MASK_800GB_PAM4_224 0x4UL
+ u8 unused_2[4];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -4491,7 +4505,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+/* hwrm_port_phy_qcfg_output (size:896b/112B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -4501,14 +4515,17 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ #define PORT_PHY_QCFG_RESP_LINK_NO_SD 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LOCK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_NO_LOCK
u8 active_fec_signal_mode;
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_224 0x3UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_224
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
@@ -4699,7 +4716,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEDR4 0x3eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEFR4 0x3fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEFR4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4859,7 +4878,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_224 0x7d3UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_224 0xfa3UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_224 0x1f43UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_224
__le16 auto_link_speeds2;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
@@ -4876,6 +4898,16 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
u8 active_lanes;
+ u8 rsvd1;
+ __le16 support_speeds2_ext;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_EXT_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_EXT_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_EXT_800GB_PAM4_224 0x4UL
+ __le16 auto_link_speeds2_ext;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_EXT_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_EXT_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_EXT_800GB_PAM4_224 0x4UL
+ u8 rsvd2[3];
u8 valid;
};
@@ -5478,7 +5510,7 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+/* hwrm_port_phy_qcaps_output (size:384b/48B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -5563,6 +5595,10 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_ADV_STATS_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_ADSM_REPORT_SUPPORTED 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PM_EVENT_LOG_SUPPORTED 0x80UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_FDRSTAT_CMD_SUPPORTED 0x100UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -5595,7 +5631,15 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
- u8 unused_1[3];
+ __le16 supported_speeds2_ext_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_FORCE_MODE_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_FORCE_MODE_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_FORCE_MODE_800GB_PAM4_224 0x4UL
+ __le16 supported_speeds2_ext_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_AUTO_MODE_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_AUTO_MODE_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_AUTO_MODE_800GB_PAM4_224 0x4UL
+ u8 unused_1[7];
u8 valid;
};
@@ -6051,6 +6095,58 @@ struct hwrm_port_led_qcaps_output {
u8 valid;
};
+/* hwrm_port_phy_fdrstat_input (size:192b/24B) */
+struct hwrm_port_phy_fdrstat_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 rsvd[2];
+ __le16 ops;
+ #define PORT_PHY_FDRSTAT_REQ_OPS_START 0x0UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_STOP 0x1UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_CLEAR 0x2UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_COUNTER 0x3UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_LAST PORT_PHY_FDRSTAT_REQ_OPS_COUNTER
+};
+
+/* hwrm_port_phy_fdrstat_output (size:3072b/384B) */
+struct hwrm_port_phy_fdrstat_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 start_time;
+ __le64 end_time;
+ __le64 cmic_start_time;
+ __le64 cmic_end_time;
+ __le64 accumulated_uncorrected_codewords;
+ __le64 accumulated_corrected_codewords;
+ __le64 accumulated_total_codewords;
+ __le64 accumulated_symbol_errors;
+ __le64 accumulated_codewords_err_s[17];
+ __le64 uncorrected_codewords;
+ __le64 corrected_codewords;
+ __le64 total_codewords;
+ __le64 symbol_errors;
+ __le64 codewords_err_s[17];
+ __le32 window_size;
+ __le16 unused_0[1];
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_fdrstat_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_fdrstat_cmd_err {
+ u8 code;
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED 0x1UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_LAST PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED
+ u8 unused_0[7];
+};
+
/* hwrm_port_mac_qcaps_input (size:192b/24B) */
struct hwrm_port_mac_qcaps_input {
__le16 req_type;
@@ -6912,6 +7008,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
#define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ #define VNIC_CFG_REQ_FLAGS_DEST_QP 0x100UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -6923,7 +7020,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
#define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
- #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ #define VNIC_CFG_REQ_ENABLES_QP_ID 0x400UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6943,7 +7040,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
#define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
#define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- __le32 raw_qp_id;
+ __le32 qp_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -7409,6 +7506,8 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
#define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
#define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ #define RING_ALLOC_REQ_FLAGS_DPI_ROCE_MANAGED 0x10UL
+ #define RING_ALLOC_REQ_FLAGS_TIMER_RESET 0x20UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -7583,6 +7682,7 @@ struct hwrm_ring_aggint_qcaps_output {
#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TMR_RESET_ON_ALLOC 0x200UL
__le32 nq_params;
#define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
__le16 num_cmpl_dma_aggr_min;
@@ -10325,6 +10425,9 @@ struct hwrm_dbg_coredump_retrieve_input {
__le16 instance;
__le16 unused_1;
u8 seg_flags;
+ #define DBG_COREDUMP_RETRIEVE_REQ_SFLAG_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_RETRIEVE_REQ_SFLAG_CRASHED_DATA 0x2UL
+ #define DBG_COREDUMP_RETRIEVE_REQ_SFLAG_NO_COMPRESS 0x4UL
u8 unused_2;
__le16 unused_3;
__le32 unused_4;
@@ -10926,6 +11029,38 @@ struct hwrm_nvm_set_variable_cmd_err {
u8 unused_0[7];
};
+/* hwrm_nvm_defrag_input (size:192b/24B) */
+struct hwrm_nvm_defrag_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define NVM_DEFRAG_REQ_FLAGS_DEFRAG 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_nvm_defrag_output (size:128b/16B) */
+struct hwrm_nvm_defrag_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */
+struct hwrm_nvm_defrag_cmd_err {
+ u8 code;
+ #define NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_FAIL 0x1UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL 0x2UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_LAST NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL
+ u8 unused_0[7];
+};
+
/* hwrm_selftest_qlist_input (size:128b/16B) */
struct hwrm_selftest_qlist_input {
__le16 req_type;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index d1eb5c7729cb..2f535331f926 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
@@ -470,7 +470,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
- void *value) {
+ void *value, u64 flags) {
return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e5be698256d1..cd9b96434904 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -287,6 +287,7 @@ struct bpf_map_owner {
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
+ bool sleepable;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
enum bpf_attach_type expected_attach_type;
@@ -673,6 +674,22 @@ void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags,
struct bpf_dynptr *ptr__uninit);
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+void *bpf_arena_alloc_pages_non_sleepable(void *p__map, void *addr__ign, u32 page_cnt, int node_id,
+ u64 flags);
+void bpf_arena_free_pages_non_sleepable(void *p__map, void *ptr__ign, u32 page_cnt);
+#else
+static inline void *bpf_arena_alloc_pages_non_sleepable(void *p__map, void *addr__ign, u32 page_cnt,
+ int node_id, u64 flags)
+{
+ return NULL;
+}
+
+static inline void bpf_arena_free_pages_non_sleepable(void *p__map, void *ptr__ign, u32 page_cnt)
+{
+}
+#endif
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* bpf_type_flag contains a set of flags that are applicable to the values of
@@ -737,7 +754,7 @@ enum bpf_type_flag {
MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
/* PTR was passed from the kernel in a trusted context, and may be
- * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * passed to kfuncs or BPF helper functions.
* Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
* PTR_UNTRUSTED refers to a kptr that was read directly from a map
* without invoking bpf_kptr_xchg(). What we really need to know is
@@ -1213,6 +1230,9 @@ enum {
#endif
};
+#define BPF_TRAMP_COOKIE_INDEX_SHIFT 8
+#define BPF_TRAMP_IS_RETURN_SHIFT 63
+
struct bpf_tramp_links {
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
int nr_links;
@@ -1293,6 +1313,7 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_MODIFY_RETURN,
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
+ BPF_TRAMP_FSESSION,
};
struct bpf_tramp_image {
@@ -1309,14 +1330,17 @@ struct bpf_tramp_image {
};
struct bpf_trampoline {
- /* hlist for trampoline_table */
- struct hlist_node hlist;
+ /* hlist for trampoline_key_table */
+ struct hlist_node hlist_key;
+ /* hlist for trampoline_ip_table */
+ struct hlist_node hlist_ip;
struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
u32 flags;
u64 key;
+ unsigned long ip;
struct {
struct btf_func_model model;
void *addr;
@@ -1418,7 +1442,7 @@ bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset,
void *src, u64 len, u64 flags);
void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
- void *buffer__opt, u64 buffer__szk);
+ void *buffer__nullable, u64 buffer__szk);
static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len)
{
@@ -1742,8 +1766,12 @@ struct bpf_prog_aux {
struct rcu_head rcu;
};
struct bpf_stream stream[2];
+ struct mutex st_ops_assoc_mutex;
+ struct bpf_map __rcu *st_ops_assoc;
};
+#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -1759,6 +1787,7 @@ struct bpf_prog {
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_func_ip:1, /* Do we call get_func_ip() */
+ call_session_cookie:1, /* Do we call bpf_session_cookie() */
tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
sleepable:1; /* BPF program is sleepable */
enum bpf_prog_type type; /* Type of BPF program */
@@ -1770,7 +1799,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
};
struct bpf_prog_stats __percpu *stats;
- int __percpu *active;
+ u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for recursion protection */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
@@ -1855,6 +1884,11 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_fsession_link {
+ struct bpf_tracing_link link;
+ struct bpf_tramp_link fexit;
+};
+
struct bpf_raw_tp_link {
struct bpf_link link;
struct bpf_raw_event_map *btp;
@@ -2002,6 +2036,40 @@ struct bpf_struct_ops_common_value {
enum bpf_struct_ops_state state;
};
+static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+ u32 val;
+
+ preempt_disable();
+ active[rctx]++;
+ val = le32_to_cpu(*(__le32 *)active);
+ preempt_enable();
+ if (val != BIT(rctx * 8))
+ return false;
+
+ return true;
+#else
+ return this_cpu_inc_return(*(int __percpu *)(prog->active)) == 1;
+#endif
+}
+
+static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+
+ preempt_disable();
+ active[rctx]--;
+ preempt_enable();
+#else
+ this_cpu_dec(*(int __percpu *)(prog->active));
+#endif
+}
+
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
/* This macro helps developer to register a struct_ops type and generate
* type information correctly. Developers should use this macro to register
@@ -2044,6 +2112,9 @@ static inline void bpf_module_put(const void *data, struct module *owner)
module_put(owner);
}
int bpf_struct_ops_link_create(union bpf_attr *attr);
+int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map);
+void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog);
+void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux);
u32 bpf_struct_ops_id(const void *kdata);
#ifdef CONFIG_NET
@@ -2091,6 +2162,17 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
+static inline int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog)
+{
+}
+static inline void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux)
+{
+ return NULL;
+}
static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
{
}
@@ -2101,6 +2183,37 @@ static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_op
#endif
+static inline int bpf_fsession_cnt(struct bpf_tramp_links *links)
+{
+ struct bpf_tramp_links fentries = links[BPF_TRAMP_FENTRY];
+ int cnt = 0;
+
+ for (int i = 0; i < links[BPF_TRAMP_FENTRY].nr_links; i++) {
+ if (fentries.links[i]->link.prog->expected_attach_type == BPF_TRACE_FSESSION)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static inline bool bpf_prog_calls_session_cookie(struct bpf_tramp_link *link)
+{
+ return link->link.prog->call_session_cookie;
+}
+
+static inline int bpf_fsession_cookie_cnt(struct bpf_tramp_links *links)
+{
+ struct bpf_tramp_links fentries = links[BPF_TRAMP_FENTRY];
+ int cnt = 0;
+
+ for (int i = 0; i < links[BPF_TRAMP_FENTRY].nr_links; i++) {
+ if (bpf_prog_calls_session_cookie(fentries.links[i]))
+ cnt++;
+ }
+
+ return cnt;
+}
+
int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
const struct bpf_ctx_arg_aux *info, u32 cnt);
@@ -2540,6 +2653,10 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG
+void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+ struct mem_cgroup **new_memcg);
+void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+ struct mem_cgroup *memcg);
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
@@ -2564,6 +2681,17 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
kvcalloc(_n, _size, _flags)
#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
__alloc_percpu_gfp(_size, _align, _flags)
+static inline void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+ struct mem_cgroup **new_memcg)
+{
+ *new_memcg = NULL;
+ *old_memcg = NULL;
+}
+
+static inline void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+ struct mem_cgroup *memcg)
+{
+}
#endif
static inline int
@@ -2764,8 +2892,8 @@ int map_set_for_each_callback_args(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
-int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
-int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 flags);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
@@ -3243,6 +3371,11 @@ static inline void bpf_prog_report_arena_violation(bool write, unsigned long add
}
#endif /* CONFIG_BPF_SYSCALL */
+static inline bool bpf_net_capable(void)
+{
+ return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
+}
+
static __always_inline int
bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
@@ -3832,14 +3965,43 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
}
#endif
+static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
+{
+ switch (map_type) {
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
{
- if (flags & ~allowed_flags)
+ u32 cpu;
+
+ if ((u32)flags & ~allowed_flags)
return -EINVAL;
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
return -EINVAL;
+ if (!(flags & BPF_F_CPU) && flags >> 32)
+ return -EINVAL;
+
+ if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
+ if (!bpf_map_supports_cpu_flags(map->map_type))
+ return -EINVAL;
+ if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS))
+ return -EINVAL;
+
+ cpu = flags >> 32;
+ if ((flags & BPF_F_CPU) && cpu >= num_possible_cpus())
+ return -ERANGE;
+ }
+
return 0;
}
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 66432248cd81..85efa9772530 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -15,12 +15,13 @@
#include <linux/types.h>
#include <linux/bpf_mem_alloc.h>
#include <uapi/linux/btf.h>
+#include <asm/rqspinlock.h>
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
struct bpf_local_storage_map_bucket {
struct hlist_head list;
- raw_spinlock_t lock;
+ rqspinlock_t lock;
};
/* Thp map is not the primary owner of a bpf_local_storage_elem.
@@ -67,6 +68,11 @@ struct bpf_local_storage_data {
u8 data[] __aligned(8);
};
+#define SELEM_MAP_UNLINKED (1 << 0)
+#define SELEM_STORAGE_UNLINKED (1 << 1)
+#define SELEM_UNLINKED (SELEM_MAP_UNLINKED | SELEM_STORAGE_UNLINKED)
+#define SELEM_TOFREE (1 << 2)
+
/* Linked to bpf_local_storage and bpf_local_storage_map */
struct bpf_local_storage_elem {
struct hlist_node map_node; /* Linked to bpf_local_storage_map */
@@ -79,7 +85,9 @@ struct bpf_local_storage_elem {
* after raw_spin_unlock
*/
};
- /* 8 bytes hole */
+ atomic_t state;
+ bool use_kmalloc_nolock;
+ /* 3 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
@@ -88,13 +96,14 @@ struct bpf_local_storage_elem {
struct bpf_local_storage {
struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
- struct bpf_local_storage_map __rcu *smap;
struct hlist_head list; /* List of bpf_local_storage_elem */
void *owner; /* The object that owns the above "list" of
* bpf_local_storage_elem.
*/
struct rcu_head rcu;
- raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+ rqspinlock_t lock; /* Protect adding/removing from the "list" */
+ u64 mem_charge; /* Copy of mem charged to owner. Protected by "lock" */
+ refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
bool use_kmalloc_nolock;
};
@@ -162,11 +171,10 @@ bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
return SDATA(selem);
}
-void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
void bpf_local_storage_map_free(struct bpf_map *map,
- struct bpf_local_storage_cache *cache,
- int __percpu *busy_counter);
+ struct bpf_local_storage_cache *cache);
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
const struct btf *btf,
@@ -176,10 +184,11 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem);
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
+int bpf_selem_unlink(struct bpf_local_storage_elem *selem);
-void bpf_selem_link_map(struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *selem);
+int bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
index 929225f7b095..0b9f4caeeb0a 100644
--- a/include/linux/bpf_mprog.h
+++ b/include/linux/bpf_mprog.h
@@ -340,4 +340,14 @@ static inline bool bpf_mprog_supported(enum bpf_prog_type type)
return false;
}
}
+
+static inline bool bpf_mprog_detach_empty(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return bpf_net_capable();
+ default:
+ return false;
+ }
+}
#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 130bcbd66f60..ef8e45a362d9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -147,8 +147,12 @@ struct bpf_reg_state {
* registers. Example:
* r1 = r2; both will have r1->id == r2->id == N
* r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ * r3 = r2; both will have r3->id == r2->id == N
+ * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->off == 10
*/
-#define BPF_ADD_CONST (1U << 31)
+#define BPF_ADD_CONST64 (1U << 31)
+#define BPF_ADD_CONST32 (1U << 30)
+#define BPF_ADD_CONST (BPF_ADD_CONST64 | BPF_ADD_CONST32)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
@@ -692,12 +696,16 @@ struct bpf_id_pair {
struct bpf_idmap {
u32 tmp_id_gen;
+ u32 cnt;
struct bpf_id_pair map[BPF_ID_MAP_SIZE];
};
struct bpf_idset {
- u32 count;
- u32 ids[BPF_ID_MAP_SIZE];
+ u32 num_ids;
+ struct {
+ u32 id;
+ u32 cnt;
+ } entries[BPF_ID_MAP_SIZE];
};
/* see verifier.c:compute_scc_callchain() */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index f06976ffb63f..48108471c5b1 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -34,7 +34,7 @@
*
* And the following kfunc:
*
- * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE)
*
* All invocations to the kfunc must pass the unmodified, unwalked task:
*
@@ -66,7 +66,6 @@
* return 0;
* }
*/
-#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
@@ -79,6 +78,7 @@
#define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */
#define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */
#define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */
+#define KF_IMPLICIT_ARGS (1 << 16) /* kfunc has implicit arguments supplied by the verifier */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
@@ -220,6 +220,7 @@ bool btf_is_module(const struct btf *btf);
bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
+u32 btf_named_start_id(const struct btf *btf, bool own);
struct btf *btf_base_btf(const struct btf *btf);
bool btf_type_is_i32(const struct btf_type *t);
bool btf_type_is_i64(const struct btf_type *t);
@@ -575,8 +576,8 @@ const char *btf_name_by_offset(const struct btf *btf, u32 offset);
const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
-u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
- const struct bpf_prog *prog);
+u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog);
+bool btf_kfunc_is_allowed(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog);
u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
const struct bpf_prog *prog);
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 5fb8d0e3f9c1..3287232e3cad 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -58,6 +58,7 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
+void can_set_skb_uid(struct sk_buff *skb);
void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 1abc25a8d144..a70a02967071 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/can.h>
+#include <net/can.h>
#include <net/sock.h>
void can_flush_echo_skb(struct net_device *dev);
@@ -37,37 +38,20 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
-/*
- * The struct can_skb_priv is used to transport additional information along
- * with the stored struct can(fd)_frame that can not be contained in existing
- * struct sk_buff elements.
- * N.B. that this information must not be modified in cloned CAN sk_buffs.
- * To modify the CAN frame content or the struct can_skb_priv content
- * skb_copy() needs to be used instead of skb_clone().
- */
-
-/**
- * struct can_skb_priv - private additional data inside CAN sk_buffs
- * @ifindex: ifindex of the first interface the CAN frame appeared on
- * @skbcnt: atomic counter to have an unique id together with skb pointer
- * @frame_len: length of CAN frame in data link layer
- * @cf: align to the following CAN frame at skb->data
- */
-struct can_skb_priv {
- int ifindex;
- int skbcnt;
- unsigned int frame_len;
- struct can_frame cf[];
-};
-
-static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
+static inline struct can_skb_ext *can_skb_ext_add(struct sk_buff *skb)
{
- return (struct can_skb_priv *)(skb->head);
+ struct can_skb_ext *csx = skb_ext_add(skb, SKB_EXT_CAN);
+
+ /* skb_ext_add() returns uninitialized space */
+ if (csx)
+ csx->can_gw_hops = 0;
+
+ return csx;
}
-static inline void can_skb_reserve(struct sk_buff *skb)
+static inline struct can_skb_ext *can_skb_ext_find(struct sk_buff *skb)
{
- skb_reserve(skb, sizeof(struct can_skb_priv));
+ return skb_ext_find(skb, SKB_EXT_CAN);
}
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 1fb08922552c..37db92b3d6f8 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -203,6 +203,12 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
ns_capable(ns, CAP_SYS_ADMIN);
}
+static inline bool checkpoint_restore_ns_capable_noaudit(struct user_namespace *ns)
+{
+ return ns_capable_noaudit(ns, CAP_CHECKPOINT_RESTORE) ||
+ ns_capable_noaudit(ns, CAP_SYS_ADMIN);
+}
+
/* audit system wants to get cap info from files as well */
int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
const struct dentry *dentry,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index f7cc60de0058..bb92f5c169ca 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -535,10 +535,10 @@ struct cgroup {
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
- u16 subtree_control;
- u16 subtree_ss_mask;
- u16 old_subtree_control;
- u16 old_subtree_ss_mask;
+ u32 subtree_control;
+ u32 subtree_ss_mask;
+ u32 old_subtree_control;
+ u32 old_subtree_ss_mask;
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 8d41b917c77d..dbc4162921e9 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -278,16 +278,21 @@ const volatile void * __must_check_fn(const volatile void *val)
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
+typedef _type lock_##_name##_t; \
static __always_inline void class_##_name##_destructor(_type *p) \
+ __no_context_analysis \
{ _type _T = *p; _exit; } \
static __always_inline _type class_##_name##_constructor(_init_args) \
+ __no_context_analysis \
{ _type t = _init; return t; }
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef lock_##_name##_t lock_##_name##ext##_t; \
typedef class_##_name##_t class_##_name##ext##_t; \
static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
{ class_##_name##_destructor(p); } \
static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ __no_context_analysis \
{ class_##_name##_t t = _init; return t; }
#define CLASS(_name, var) \
@@ -474,35 +479,80 @@ _label: \
*/
#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef _type lock_##_name##_t; \
typedef struct { \
_type *lock; \
__VA_ARGS__; \
} class_##_name##_t; \
\
static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
+ __no_context_analysis \
{ \
if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
} \
\
__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
-#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+#define __DEFINE_LOCK_GUARD_1(_name, _type, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
-#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+#define __DEFINE_LOCK_GUARD_0(_name, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(void) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = (void*)1 }, \
*_T __maybe_unused = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
+#define DECLARE_LOCK_GUARD_0_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(void) _lock;\
+static inline void class_##_name##_destructor(class_##_name##_t *_T) _unlock;
+
+/*
+ * To support Context Analysis, we need to allow the compiler to see the
+ * acquisition and release of the context lock. However, the "cleanup" helpers
+ * wrap the lock in a struct passed through separate helper functions, which
+ * hides the lock alias from the compiler (no inter-procedural analysis).
+ *
+ * To make it work, we introduce an explicit alias to the context lock instance
+ * that is "cleaned" up with a separate cleanup helper. This helper is a dummy
+ * function that does nothing at runtime, but has the "_unlock" attribute to
+ * tell the compiler what happens at the end of the scope.
+ *
+ * To generalize the pattern, the WITH_LOCK_GUARD_1_ATTRS() macro should be used
+ * to redefine the constructor, which then also creates the alias variable with
+ * the right "cleanup" attribute, *after* DECLARE_LOCK_GUARD_1_ATTRS() has been
+ * used.
+ *
+ * Example usage:
+ *
+ * DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+ * #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+ *
+ * Note: To support the for-loop based scoped helpers, the auxiliary variable
+ * must be a pointer to the "class" type because it is defined in the same
+ * statement as the guard variable. However, we initialize it with the lock
+ * pointer (despite the type mismatch, the compiler's alias analysis still works
+ * as expected). The "_unlock" attribute receives a pointer to the auxiliary
+ * variable (a double pointer to the class type), and must be cast and
+ * dereferenced appropriately.
+ */
+#define DECLARE_LOCK_GUARD_1_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T) _lock;\
+static __always_inline void __class_##_name##_cleanup_ctx(class_##_name##_t **_T) \
+ __no_context_analysis _unlock { }
+#define WITH_LOCK_GUARD_1_ATTRS(_name, _T) \
+ class_##_name##_constructor(_T), \
+ *__UNIQUE_ID(unlock) __cleanup(__class_##_name##_cleanup_ctx) = (void *)(unsigned long)(_T)
+
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
diff --git a/include/linux/clk.h b/include/linux/clk.h
index b607482ca77e..c571e294f0ef 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -479,6 +479,22 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
+ * devm_clk_bulk_get_optional_enable - Get and enable optional bulk clocks (managed)
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Behaves the same as devm_clk_bulk_get_optional() but also prepares and enables
+ * the clocks in one operation with management. The clks will automatically be
+ * disabled, unprepared and freed when the device is unbound.
+ *
+ * Return: 0 if all clocks specified in clk_bulk_data table are obtained
+ * and enabled successfully, or for any clk there was no clk provider available.
+ * Otherwise returns valid IS_ERR() condition containing errno.
+ */
+int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
@@ -1029,6 +1045,13 @@ static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
return 0;
}
+static inline int __must_check devm_clk_bulk_get_optional_enable(struct device *dev,
+ int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 2e6931735880..d0793eaaadaa 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -49,9 +49,14 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
-extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
+struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
+ unsigned int align, bool no_warn);
+struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
+bool cma_release_frozen(struct cma *cma, const struct page *pages,
+ unsigned long count);
+
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
@@ -66,24 +71,4 @@ static inline bool cma_skip_dt_default_reserved_mem(void)
}
#endif
-#ifdef CONFIG_CMA
-struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
-bool cma_free_folio(struct cma *cma, const struct folio *folio);
-bool cma_validate_zones(struct cma *cma);
-#else
-static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
-{
- return NULL;
-}
-
-static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
-{
- return false;
-}
-static inline bool cma_validate_zones(struct cma *cma)
-{
- return false;
-}
-#endif
-
#endif
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 7edf1a07b535..e1123dd28486 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -153,4 +153,4 @@
* Bindgen uses LLVM even if our C compiler is GCC, so we cannot
* rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
*/
-#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19)
+#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ > 19 || (__clang_major__ == 19 && __clang_minor__ > 0))
diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h
new file mode 100644
index 000000000000..00c074a2ccb0
--- /dev/null
+++ b/include/linux/compiler-context-analysis.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Macros and attributes for compiler-based static context analysis.
+ */
+
+#ifndef _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+#define _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+
+#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__) && !defined(__GENKSYMS__)
+
+/*
+ * These attributes define new context lock (Clang: capability) types.
+ * Internal only.
+ */
+# define __ctx_lock_type(name) __attribute__((capability(#name)))
+# define __reentrant_ctx_lock __attribute__((reentrant_capability))
+# define __acquires_ctx_lock(...) __attribute__((acquire_capability(__VA_ARGS__)))
+# define __acquires_shared_ctx_lock(...) __attribute__((acquire_shared_capability(__VA_ARGS__)))
+# define __try_acquires_ctx_lock(ret, var) __attribute__((try_acquire_capability(ret, var)))
+# define __try_acquires_shared_ctx_lock(ret, var) __attribute__((try_acquire_shared_capability(ret, var)))
+# define __releases_ctx_lock(...) __attribute__((release_capability(__VA_ARGS__)))
+# define __releases_shared_ctx_lock(...) __attribute__((release_shared_capability(__VA_ARGS__)))
+# define __returns_ctx_lock(var) __attribute__((lock_returned(var)))
+
+/*
+ * The below are used to annotate code being checked. Internal only.
+ */
+# define __excludes_ctx_lock(...) __attribute__((locks_excluded(__VA_ARGS__)))
+# define __requires_ctx_lock(...) __attribute__((requires_capability(__VA_ARGS__)))
+# define __requires_shared_ctx_lock(...) __attribute__((requires_shared_capability(__VA_ARGS__)))
+
+/*
+ * The "assert_capability" attribute is a bit confusingly named. It does not
+ * generate a check. Instead, it tells the analysis to *assume* the capability
+ * is held. This is used for augmenting runtime assertions, that can then help
+ * with patterns beyond the compiler's static reasoning abilities.
+ */
+# define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__)))
+# define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
+
+/**
+ * __guarded_by - struct member and globals attribute, declares variable
+ * only accessible within active context
+ *
+ * Declares that the struct member or global variable is only accessible within
+ * the context entered by the given context lock. Read operations on the data
+ * require shared access, while write operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long counter __guarded_by(&lock);
+ * };
+ */
+# define __guarded_by(...) __attribute__((guarded_by(__VA_ARGS__)))
+
+/**
+ * __pt_guarded_by - struct member and globals attribute, declares pointed-to
+ * data only accessible within active context
+ *
+ * Declares that the data pointed to by the struct member pointer or global
+ * pointer is only accessible within the context entered by the given context
+ * lock. Read operations on the data require shared access, while write
+ * operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long *counter __pt_guarded_by(&lock);
+ * };
+ */
+# define __pt_guarded_by(...) __attribute__((pt_guarded_by(__VA_ARGS__)))
+
+/**
+ * context_lock_struct() - declare or define a context lock struct
+ * @name: struct name
+ *
+ * Helper to declare or define a struct type that is also a context lock.
+ *
+ * .. code-block:: c
+ *
+ * context_lock_struct(my_handle) {
+ * int foo;
+ * long bar;
+ * };
+ *
+ * struct some_state {
+ * ...
+ * };
+ * // ... declared elsewhere ...
+ * context_lock_struct(some_state);
+ *
+ * Note: The implementation defines several helper functions that can acquire
+ * and release the context lock.
+ */
+# define context_lock_struct(name, ...) \
+ struct __ctx_lock_type(name) __VA_ARGS__ name; \
+ static __always_inline void __acquire_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_ctx_lock(var) { } \
+ static __always_inline void __acquire_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_shared_ctx_lock(var) { } \
+ static __always_inline bool __try_acquire_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline bool __try_acquire_shared_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_shared_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline void __release_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_ctx_lock(var) { } \
+ static __always_inline void __release_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_shared_ctx_lock(var) { } \
+ static __always_inline void __assume_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_ctx_lock(var) { } \
+ static __always_inline void __assume_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_shared_ctx_lock(var) { } \
+ struct name
+
+/**
+ * disable_context_analysis() - disables context analysis
+ *
+ * Disables context analysis. Must be paired with a later
+ * enable_context_analysis().
+ */
+# define disable_context_analysis() \
+ __diag_push(); \
+ __diag_ignore_all("-Wunknown-warning-option", "") \
+ __diag_ignore_all("-Wthread-safety", "") \
+ __diag_ignore_all("-Wthread-safety-pointer", "")
+
+/**
+ * enable_context_analysis() - re-enables context analysis
+ *
+ * Re-enables context analysis. Must be paired with a prior
+ * disable_context_analysis().
+ */
+# define enable_context_analysis() __diag_pop()
+
+/**
+ * __no_context_analysis - function attribute, disables context analysis
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Prefer use of `context_unsafe()` where possible.
+ */
+# define __no_context_analysis __attribute__((no_thread_safety_analysis))
+
+#else /* !WARN_CONTEXT_ANALYSIS */
+
+# define __ctx_lock_type(name)
+# define __reentrant_ctx_lock
+# define __acquires_ctx_lock(...)
+# define __acquires_shared_ctx_lock(...)
+# define __try_acquires_ctx_lock(ret, var)
+# define __try_acquires_shared_ctx_lock(ret, var)
+# define __releases_ctx_lock(...)
+# define __releases_shared_ctx_lock(...)
+# define __assumes_ctx_lock(...)
+# define __assumes_shared_ctx_lock(...)
+# define __returns_ctx_lock(var)
+# define __guarded_by(...)
+# define __pt_guarded_by(...)
+# define __excludes_ctx_lock(...)
+# define __requires_ctx_lock(...)
+# define __requires_shared_ctx_lock(...)
+# define __acquire_ctx_lock(var) do { } while (0)
+# define __acquire_shared_ctx_lock(var) do { } while (0)
+# define __try_acquire_ctx_lock(var, ret) (ret)
+# define __try_acquire_shared_ctx_lock(var, ret) (ret)
+# define __release_ctx_lock(var) do { } while (0)
+# define __release_shared_ctx_lock(var) do { } while (0)
+# define __assume_ctx_lock(var) do { (void)(var); } while (0)
+# define __assume_shared_ctx_lock(var) do { (void)(var); } while (0)
+# define context_lock_struct(name, ...) struct __VA_ARGS__ name
+# define disable_context_analysis()
+# define enable_context_analysis()
+# define __no_context_analysis
+
+#endif /* WARN_CONTEXT_ANALYSIS */
+
+/**
+ * context_unsafe() - disable context checking for contained code
+ *
+ * Disables context checking for contained statements or expression.
+ *
+ * .. code-block:: c
+ *
+ * struct some_data {
+ * spinlock_t lock;
+ * int counter __guarded_by(&lock);
+ * };
+ *
+ * int foo(struct some_data *d)
+ * {
+ * // ...
+ * // other code that is still checked ...
+ * // ...
+ * return context_unsafe(d->counter);
+ * }
+ */
+#define context_unsafe(...) \
+({ \
+ disable_context_analysis(); \
+ __VA_ARGS__; \
+ enable_context_analysis() \
+})
+
+/**
+ * __context_unsafe() - function attribute, disable context checking
+ * @comment: comment explaining why opt-out is safe
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Forces adding an inline comment as argument.
+ */
+#define __context_unsafe(comment) __no_context_analysis
+
+/**
+ * context_unsafe_alias() - helper to insert a context lock "alias barrier"
+ * @p: pointer aliasing a context lock or object containing context locks
+ *
+ * No-op function that acts as a "context lock alias barrier", where the
+ * analysis rightfully detects that we're switching aliases, but the switch is
+ * considered safe but beyond the analysis reasoning abilities.
+ *
+ * This should be inserted before the first use of such an alias.
+ *
+ * Implementation Note: The compiler ignores aliases that may be reassigned but
+ * their value cannot be determined (e.g. when passing a non-const pointer to an
+ * alias as a function argument).
+ */
+#define context_unsafe_alias(p) _context_unsafe_alias((void **)&(p))
+static inline void _context_unsafe_alias(void **p) { }
+
+/**
+ * token_context_lock() - declare an abstract global context lock instance
+ * @name: token context lock name
+ *
+ * Helper that declares an abstract global context lock instance @name, but not
+ * backed by a real data structure (linker error if accidentally referenced).
+ * The type name is `__ctx_lock_@name`.
+ */
+#define token_context_lock(name, ...) \
+ context_lock_struct(__ctx_lock_##name, ##__VA_ARGS__) {}; \
+ extern const struct __ctx_lock_##name *name
+
+/**
+ * token_context_lock_instance() - declare another instance of a global context lock
+ * @ctx: token context lock previously declared with token_context_lock()
+ * @name: name of additional global context lock instance
+ *
+ * Helper that declares an additional instance @name of the same token context
+ * lock class @ctx. This is helpful where multiple related token contexts are
+ * declared, to allow using the same underlying type (`__ctx_lock_@ctx`) as
+ * function arguments.
+ */
+#define token_context_lock_instance(ctx, name) \
+ extern const struct __ctx_lock_##ctx *name
+
+/*
+ * Common keywords for static context analysis.
+ */
+
+/**
+ * __must_hold() - function attribute, caller must hold exclusive context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) exclusively.
+ */
+#define __must_hold(...) __requires_ctx_lock(__VA_ARGS__)
+
+/**
+ * __must_not_hold() - function attribute, caller must not hold context lock
+ *
+ * Function attribute declaring that the caller must not hold the given context
+ * lock instance(s).
+ */
+#define __must_not_hold(...) __excludes_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires() - function attribute, function acquires context lock exclusively
+ *
+ * Function attribute declaring that the function acquires the given context
+ * lock instance(s) exclusively, but does not release them.
+ */
+#define __acquires(...) __acquires_ctx_lock(__VA_ARGS__)
+
+/*
+ * Clang's analysis does not care precisely about the value, only that it is
+ * either zero or non-zero. So the __cond_acquires() interface might be
+ * misleading if we say that @ret is the value returned if acquired. Instead,
+ * provide symbolic variants which we translate.
+ */
+#define __cond_acquires_impl_true(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_false(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonzero(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_0(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonnull(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_NULL(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+
+/**
+ * __cond_acquires() - function attribute, function conditionally
+ * acquires a context lock exclusively
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x exclusively, but does not release it. The
+ * function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires(ret, x) __cond_acquires_impl_##ret(x)
+
+/**
+ * __releases() - function attribute, function releases a context lock exclusively
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) exclusively. The associated context(s) must be active on
+ * entry.
+ */
+#define __releases(...) __releases_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire() - function to acquire context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x exclusively.
+ */
+#define __acquire(x) __acquire_ctx_lock(x)
+
+/**
+ * __release() - function to release context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x.
+ */
+#define __release(x) __release_ctx_lock(x)
+
+/**
+ * __must_hold_shared() - function attribute, caller must hold shared context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) with shared access.
+ */
+#define __must_hold_shared(...) __requires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires_shared() - function attribute, function acquires context lock shared
+ *
+ * Function attribute declaring that the function acquires the given
+ * context lock instance(s) with shared access, but does not release them.
+ */
+#define __acquires_shared(...) __acquires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __cond_acquires_shared() - function attribute, function conditionally
+ * acquires a context lock shared
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x with shared access, but does not release it.
+ * The function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires_shared(ret, x) __cond_acquires_impl_##ret(x, _shared)
+
+/**
+ * __releases_shared() - function attribute, function releases a
+ * context lock shared
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) with shared access. The associated context(s) must be
+ * active on entry.
+ */
+#define __releases_shared(...) __releases_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire_shared() - function to acquire context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x with shared
+ * access.
+ */
+#define __acquire_shared(x) __acquire_shared_ctx_lock(x)
+
+/**
+ * __release_shared() - function to release context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x with shared
+ * access.
+ */
+#define __release_shared(x) __release_shared_ctx_lock(x)
+
+/**
+ * __acquire_ret() - helper to acquire context lock of return value
+ * @call: call expression
+ * @ret_expr: acquire expression that uses __ret
+ */
+#define __acquire_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire(ret_expr); \
+ __ret; \
+ })
+
+/**
+ * __acquire_shared_ret() - helper to acquire context lock shared of return value
+ * @call: call expression
+ * @ret_expr: acquire shared expression that uses __ret
+ */
+#define __acquire_shared_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire_shared(ret_expr); \
+ __ret; \
+ })
+
+/*
+ * Attributes to mark functions returning acquired context locks.
+ *
+ * This is purely cosmetic to help readability, and should be used with the
+ * above macros as follows:
+ *
+ * struct foo { spinlock_t lock; ... };
+ * ...
+ * #define myfunc(...) __acquire_ret(_myfunc(__VA_ARGS__), &__ret->lock)
+ * struct foo *_myfunc(int bar) __acquires_ret;
+ * ...
+ */
+#define __acquires_ret __no_context_analysis
+#define __acquires_shared_ret __no_context_analysis
+
+#endif /* _LINUX_COMPILER_CONTEXT_ANALYSIS_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04487c9bd751..af16624b29fd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define data_race(expr) \
({ \
__kcsan_disable_current(); \
+ disable_context_analysis(); \
auto __v = (expr); \
+ enable_context_analysis(); \
__kcsan_enable_current(); \
__v; \
})
@@ -231,16 +233,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
"must be non-C-string (not NUL-terminated)")
/*
- * Use __typeof_unqual__() when available.
- *
- * XXX: Remove test for __CHECKER__ once
- * sparse learns about __typeof_unqual__().
- */
-#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
-# define USE_TYPEOF_UNQUAL 1
-#endif
-
-/*
* Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
* operator when available, to return an unqualified type of the exp.
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d3318a3c2577..b1b141394d13 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -41,6 +41,8 @@
# define BTF_TYPE_TAG(value) /* nothing */
#endif
+#include <linux/compiler-context-analysis.h>
+
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
@@ -51,14 +53,6 @@
# define __rcu __attribute__((noderef, address_space(__rcu)))
static inline void __chk_user_ptr(const volatile void __user *ptr) { }
static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
-/* context/locking */
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __cond_acquires(x) __attribute__((context(x,0,-1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
/* other */
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
@@ -79,14 +73,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
-/* context/locking */
-# define __must_hold(x)
-# define __acquires(x)
-# define __cond_acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
/* other */
# define __force
# define __nocast
@@ -303,6 +289,22 @@ struct ftrace_likely_data {
# define __no_kasan_or_inline __always_inline
#endif
+#ifdef CONFIG_KCSAN
+/*
+ * Type qualifier to mark variables where all data-racy accesses should be
+ * ignored by KCSAN. Note, the implementation simply marks these variables as
+ * volatile, since KCSAN will treat such accesses as "marked".
+ *
+ * Defined here because defining __data_racy as volatile for KCSAN objects only
+ * causes problems in BPF Type Format (BTF) generation since struct members
+ * of core kernel data structs will be volatile in some objects and not in
+ * others. Instead define it globally for KCSAN kernels.
+ */
+# define __data_racy volatile
+#else
+# define __data_racy
+#endif
+
#ifdef __SANITIZE_THREAD__
/*
* Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
@@ -314,16 +316,9 @@ struct ftrace_likely_data {
* disable all instrumentation. See Kconfig.kcsan where this is mandatory.
*/
# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
-/*
- * Type qualifier to mark variables where all data-racy accesses should be
- * ignored by KCSAN. Note, the implementation simply marks these variables as
- * volatile, since KCSAN will treat such accesses as "marked".
- */
-# define __data_racy volatile
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
#else
# define __no_kcsan
-# define __data_racy
#endif
#ifdef __SANITIZE_MEMORY__
@@ -369,7 +364,7 @@ struct ftrace_likely_data {
* Optional: only supported since clang >= 18
*
* gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
- * clang: https://github.com/llvm/llvm-project/pull/76348
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#counted-by-counted-by-or-null-sized-by-sized-by-or-null
*
* __bdos on clang < 19.1.2 can erroneously return 0:
* https://github.com/llvm/llvm-project/pull/110497
@@ -384,6 +379,22 @@ struct ftrace_likely_data {
#endif
/*
+ * Runtime track number of objects pointed to by a pointer member for use by
+ * CONFIG_FORTIFY_SOURCE and CONFIG_UBSAN_BOUNDS.
+ *
+ * Optional: only supported since gcc >= 16
+ * Optional: only supported since clang >= 22
+ *
+ * gcc: https://gcc.gnu.org/pipermail/gcc-patches/2025-April/681727.html
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#counted-by-counted-by-or-null-sized-by-sized-by-or-null
+ */
+#ifdef CONFIG_CC_HAS_COUNTED_BY_PTR
+#define __counted_by_ptr(member) __attribute__((__counted_by__(member)))
+#else
+#define __counted_by_ptr(member)
+#endif
+
+/*
* Optional: only supported since gcc >= 15
* Optional: not supported by Clang
*
@@ -536,6 +547,37 @@ struct ftrace_likely_data {
#endif
/*
+ * Optional: only supported since gcc >= 15, clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#index-_005f_005fbuiltin_005fcounted_005fby_005fref
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#builtin-counted-by-ref
+ */
+#if __has_builtin(__builtin_counted_by_ref)
+/**
+ * __flex_counter() - Get pointer to counter member for the given
+ * flexible array, if it was annotated with __counted_by()
+ * @FAM: Pointer to flexible array member of an addressable struct instance
+ *
+ * For example, with:
+ *
+ * struct foo {
+ * int counter;
+ * short array[] __counted_by(counter);
+ * } *p;
+ *
+ * __flex_counter(p->array) will resolve to &p->counter.
+ *
+ * Note that Clang may not allow this to be assigned to a separate
+ * variable; it must be used directly.
+ *
+ * If p->array is unannotated, this returns (void *)NULL.
+ */
+#define __flex_counter(FAM) __builtin_counted_by_ref(FAM)
+#else
+#define __flex_counter(FAM) ((void *)NULL)
+#endif
+
+/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
@@ -562,6 +604,14 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif
+#ifndef __ASSEMBLY__
+/*
+ * Use __typeof_unqual__() when available.
+ */
+#if CC_HAS_TYPEOF_UNQUAL || defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -569,6 +619,7 @@ struct ftrace_likely_data {
* __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
* non-scalar types unchanged.
*/
+#ifndef USE_TYPEOF_UNQUAL
/*
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
* is not type-compatible with 'signed char', and we define a separate case.
@@ -586,6 +637,29 @@ struct ftrace_likely_data {
__scalar_type_to_expr_cases(long), \
__scalar_type_to_expr_cases(long long), \
default: (x)))
+#else
+#define __unqual_scalar_typeof(x) __typeof_unqual__(x)
+#endif
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * __signed_scalar_typeof(x) - Declare a signed scalar type, leaving
+ * non-scalar types unchanged.
+ */
+
+#define __scalar_type_to_signed_cases(type) \
+ unsigned type: (signed type)0, \
+ signed type: (signed type)0
+
+#define __signed_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (signed char)0, \
+ __scalar_type_to_signed_cases(char), \
+ __scalar_type_to_signed_cases(short), \
+ __scalar_type_to_signed_cases(int), \
+ __scalar_type_to_signed_cases(long), \
+ __scalar_type_to_signed_cases(long long), \
+ default: (x)))
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
diff --git a/include/linux/console.h b/include/linux/console.h
index fc9f5c5c1b04..1346f0b4cd8b 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -79,12 +79,6 @@ enum vc_intensity;
* characters. (optional)
* @con_invert_region: invert a region of length @count on @vc starting at @p.
* (optional)
- * @con_debug_enter: prepare the console for the debugger. This includes, but
- * is not limited to, unblanking the console, loading an
- * appropriate palette, and allowing debugger generated output.
- * (optional)
- * @con_debug_leave: restore the console to its pre-debug state as closely as
- * possible. (optional)
*/
struct consw {
struct module *owner;
@@ -123,8 +117,6 @@ struct consw {
enum vc_intensity intensity,
bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
- void (*con_debug_enter)(struct vc_data *vc);
- void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
@@ -298,12 +290,20 @@ struct nbcon_context {
* @outbuf: Pointer to the text buffer for output
* @len: Length to write
* @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ * @cpu: CPU on which the message was generated
+ * @pid: PID of the task that generated the message
+ * @comm: Name of the task that generated the message
*/
struct nbcon_write_context {
struct nbcon_context __private ctxt;
char *outbuf;
unsigned int len;
bool unsafe_takeover;
+#ifdef CONFIG_PRINTK_EXECUTION_CTX
+ int cpu;
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+#endif
};
/**
@@ -492,8 +492,8 @@ static inline bool console_srcu_read_lock_is_held(void)
extern int console_srcu_read_lock(void);
extern void console_srcu_read_unlock(int cookie);
-extern void console_list_lock(void) __acquires(console_mutex);
-extern void console_list_unlock(void) __releases(console_mutex);
+extern void console_list_lock(void);
+extern void console_list_unlock(void);
extern struct hlist_head console_list;
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 5b1236d8c65b..440b35e459e5 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -595,7 +595,8 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc);
+ const struct cper_sec_proc_arm *proc,
+ u32 length);
void cper_print_proc_ia(const char *pfx,
const struct cper_sec_proc_ia *proc);
int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 487b3bf2e1ea..8239cd95a005 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -229,4 +229,8 @@ static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
#define smt_mitigations SMT_MITIGATIONS_OFF
#endif
+int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status);
+int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status);
+int arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 0465d1e6f72a..cc894fc38971 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -203,6 +203,7 @@ struct cpufreq_freqs {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
+struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
@@ -210,6 +211,10 @@ static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
return NULL;
}
+static inline struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu)
+{
+ return NULL;
+}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
index f7aa20f62b87..286b3ab92e15 100644
--- a/include/linux/cpuhplock.h
+++ b/include/linux/cpuhplock.h
@@ -13,6 +13,7 @@
struct device;
extern int lockdep_is_cpus_held(void);
+extern int lockdep_is_cpus_write_held(void);
#ifdef CONFIG_HOTPLUG_CPU
void cpus_write_lock(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a98d3330385c..cbd402b4f974 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,6 +18,8 @@
#include <linux/mmu_context.h>
#include <linux/jump_label.h>
+extern bool lockdep_is_cpuset_held(void);
+
#ifdef CONFIG_CPUSETS
/*
@@ -74,10 +76,10 @@ extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
+extern void lockdep_assert_cpuset_lock_held(void);
extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
-extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -195,6 +197,7 @@ static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
+static inline void lockdep_assert_cpuset_lock_held(void) { }
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
@@ -213,11 +216,6 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
-static inline bool cpuset_cpu_is_isolated(int cpu)
-{
- return false;
-}
-
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 343a140a6ba2..ed1609d78cd7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -164,7 +164,6 @@ static inline const struct cred *kernel_cred(void)
return rcu_dereference_raw(init_task.cred);
}
extern int set_security_override(struct cred *, u32);
-extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 3813373a9200..a4fea23da857 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -15,7 +15,7 @@
#include <linux/random.h>
/* Minimal region size. Every damon_region is aligned by this. */
-#define DAMON_MIN_REGION PAGE_SIZE
+#define DAMON_MIN_REGION_SZ PAGE_SIZE
/* Max priority score for DAMON-based operation schemes */
#define DAMOS_MAX_SCORE (99)
@@ -155,6 +155,8 @@ enum damos_action {
* @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node.
* @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup.
* @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup.
+ * @DAMOS_QUOTA_ACTIVE_MEM_BP: Active to total LRU memory ratio.
+ * @DAMOS_QUOTA_INACTIVE_MEM_BP: Inactive to total LRU memory ratio.
* @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
*
* Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
@@ -166,6 +168,8 @@ enum damos_quota_goal_metric {
DAMOS_QUOTA_NODE_MEM_FREE_BP,
DAMOS_QUOTA_NODE_MEMCG_USED_BP,
DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ DAMOS_QUOTA_ACTIVE_MEM_BP,
+ DAMOS_QUOTA_INACTIVE_MEM_BP,
NR_DAMOS_QUOTA_GOAL_METRICS,
};
@@ -203,7 +207,7 @@ struct damos_quota_goal {
u64 last_psi_total;
struct {
int nid;
- unsigned short memcg_id;
+ u64 memcg_id;
};
};
struct list_head list;
@@ -330,6 +334,8 @@ struct damos_watermarks {
* @sz_ops_filter_passed:
* Total bytes that passed ops layer-handled DAMOS filters.
* @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ * @nr_snapshots:
+ * Total number of DAMON snapshots that the scheme has tried.
*
* "Tried an action to a region" in this context means the DAMOS core logic
* determined the region as eligible to apply the action. The access pattern
@@ -355,6 +361,7 @@ struct damos_stat {
unsigned long sz_applied;
unsigned long sz_ops_filter_passed;
unsigned long qt_exceeds;
+ unsigned long nr_snapshots;
};
/**
@@ -416,7 +423,7 @@ struct damos_filter {
bool matching;
bool allow;
union {
- unsigned short memcg_id;
+ u64 memcg_id;
struct damon_addr_range addr_range;
int target_idx;
struct damon_size_range sz_range;
@@ -496,6 +503,7 @@ struct damos_migrate_dests {
* @ops_filters: ops layer handling &struct damos_filter objects list.
* @last_applied: Last @action applied ops-managing entity.
* @stat: Statistics of this scheme.
+ * @max_nr_snapshots: Upper limit of nr_snapshots stat.
* @list: List head for siblings.
*
* For each @apply_interval_us, DAMON finds regions which fit in the
@@ -529,9 +537,10 @@ struct damos_migrate_dests {
* unsets @last_applied when each regions walking for applying the scheme is
* finished.
*
- * After applying the &action to each region, &stat_count and &stat_sz is
- * updated to reflect the number of regions and total size of regions that the
- * &action is applied.
+ * After applying the &action to each region, &stat is updated.
+ *
+ * If &max_nr_snapshots is set as non-zero and &stat.nr_snapshots be same to or
+ * greater than it, the scheme is deactivated.
*/
struct damos {
struct damos_access_pattern pattern;
@@ -566,6 +575,7 @@ struct damos {
struct list_head ops_filters;
void *last_applied;
struct damos_stat stat;
+ unsigned long max_nr_snapshots;
struct list_head list;
};
@@ -597,7 +607,6 @@ enum damon_ops_id {
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
* @cleanup_target: Clean up each target before deallocation.
- * @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
* users should register the low level operations for their target address
@@ -630,7 +639,6 @@ enum damon_ops_id {
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup_target is called before the target will be deallocated.
- * @cleanup is called from @kdamond just before its termination.
*/
struct damon_operations {
enum damon_ops_id id;
@@ -646,7 +654,6 @@ struct damon_operations {
struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
void (*cleanup_target)(struct damon_target *t);
- void (*cleanup)(struct damon_ctx *context);
};
/*
@@ -656,7 +663,7 @@ struct damon_operations {
* @data: Data that will be passed to @fn.
* @repeat: Repeat invocations.
* @return_code: Return code from @fn invocation.
- * @dealloc_on_cancel: De-allocate when canceled.
+ * @dealloc_on_cancel: If @repeat is true, de-allocate when canceled.
*
* Control damon_call(), which requests specific kdamond to invoke a given
* function. Refer to damon_call() for more details.
@@ -749,27 +756,24 @@ struct damon_attrs {
* of the monitoring.
*
* @attrs: Monitoring attributes for accuracy/overhead control.
- * @kdamond: Kernel thread who does the monitoring.
- * @kdamond_lock: Mutex for the synchronizations with @kdamond.
*
- * For each monitoring context, one kernel thread for the monitoring is
- * created. The pointer to the thread is stored in @kdamond.
+ * For each monitoring context, one kernel thread for the monitoring, namely
+ * kdamond, is created. The pid of kdamond can be retrieved using
+ * damon_kdamond_pid().
*
- * Once started, the monitoring thread runs until explicitly required to be
- * terminated or every monitoring target is invalid. The validity of the
- * targets is checked via the &damon_operations.target_valid of @ops. The
- * termination can also be explicitly requested by calling damon_stop().
- * The thread sets @kdamond to NULL when it terminates. Therefore, users can
- * know whether the monitoring is ongoing or terminated by reading @kdamond.
- * Reads and writes to @kdamond from outside of the monitoring thread must
- * be protected by @kdamond_lock.
+ * Once started, kdamond runs until explicitly required to be terminated or
+ * every monitoring target is invalid. The validity of the targets is checked
+ * via the &damon_operations.target_valid of @ops. The termination can also be
+ * explicitly requested by calling damon_stop(). To know if a kdamond is
+ * running, damon_is_running() can be used.
*
- * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
- * Accesses to other fields must be protected by themselves.
+ * While the kdamond is running, all accesses to &struct damon_ctx from a
+ * thread other than the kdamond should be made using safe DAMON APIs,
+ * including damon_call() and damos_walk().
*
* @ops: Set of monitoring operations for given use cases.
* @addr_unit: Scale factor for core to ops address conversion.
- * @min_sz_region: Minimum region size.
+ * @min_region_sz: Minimum region size.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
@@ -806,13 +810,15 @@ struct damon_ctx {
struct damos_walk_control *walk_control;
struct mutex walk_control_lock;
-/* public: */
+ /* Working thread of the given DAMON context */
struct task_struct *kdamond;
+ /* Protects @kdamond field access */
struct mutex kdamond_lock;
+/* public: */
struct damon_operations ops;
unsigned long addr_unit;
- unsigned long min_sz_region;
+ unsigned long min_region_sz;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -901,7 +907,7 @@ static inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges, unsigned long min_sz_region);
+ unsigned int nr_ranges, unsigned long min_region_sz);
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damon_attrs *attrs);
@@ -962,13 +968,14 @@ bool damon_initialized(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
bool damon_is_running(struct damon_ctx *ctx);
+int damon_kdamond_pid(struct damon_ctx *ctx);
int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end,
- unsigned long min_sz_region);
+ unsigned long min_region_sz);
#endif /* CONFIG_DAMON */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7cecda29447e..4177c4738282 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -239,18 +239,16 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
* @cancel: callback to call
* @cancel_data: extra data for the callback to call
*/
-struct debugfs_cancellation {
+context_lock_struct(debugfs_cancellation) {
struct list_head list;
void (*cancel)(struct dentry *, void *);
void *cancel_data;
};
-void __acquires(cancellation)
-debugfs_enter_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
-void __releases(cancellation)
-debugfs_leave_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
+void debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __acquires(cancellation);
+void debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __releases(cancellation);
#else
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 800dcc360db2..ecb06f16d22c 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -69,6 +69,14 @@ struct task_delay_info {
u32 compact_count; /* total count of memory compact */
u32 wpcopy_count; /* total count of write-protect copy */
u32 irq_count; /* total count of IRQ/SOFTIRQ */
+
+ struct timespec64 blkio_delay_max_ts;
+ struct timespec64 swapin_delay_max_ts;
+ struct timespec64 freepages_delay_max_ts;
+ struct timespec64 thrashing_delay_max_ts;
+ struct timespec64 compact_delay_max_ts;
+ struct timespec64 wpcopy_delay_max_ts;
+ struct timespec64 irq_delay_max_ts;
};
#endif
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 99b1002b3e31..99c3c83ea520 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -215,9 +215,9 @@ bus_find_next_device(const struct bus_type *bus,struct device *cur)
return bus_find_device(bus, cur, NULL, device_match_any);
}
-#ifdef CONFIG_ACPI
struct acpi_device;
+#ifdef CONFIG_ACPI
/**
* bus_find_device_by_acpi_dev : device iterator for locating a particular device
* matching the ACPI COMPANION device.
@@ -231,7 +231,7 @@ bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device
}
#else
static inline struct device *
-bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
{
return NULL;
}
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
index 9c1e3d643d69..14ab9159bdda 100644
--- a/include/linux/device/devres.h
+++ b/include/linux/device/devres.h
@@ -26,10 +26,6 @@ __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
-void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
void devres_free(void *res);
void devres_add(struct device *dev, void *res);
void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0864773a57e8..822085bc2d20 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -21,7 +21,7 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
return 0;
- if (likely(!inode->i_rdev))
+ if (!inode->i_rdev)
return 0;
if (S_ISBLK(inode->i_mode))
diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h
index a3c0ce2d3a42..09bde3f748e4 100644
--- a/include/linux/dma-buf-mapping.h
+++ b/include/linux/dma-buf-mapping.h
@@ -9,7 +9,7 @@
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir);
void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 0bc492090237..133b9e637b55 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -429,18 +429,6 @@ struct dma_buf {
__poll_t active;
} cb_in, cb_out;
-#ifdef CONFIG_DMABUF_SYSFS_STATS
- /**
- * @sysfs_entry:
- *
- * For exposing information about this buffer in sysfs. See also
- * `DMA-BUF statistics`_ for the uapi this enables.
- */
- struct dma_buf_sysfs_entry {
- struct kobject kobj;
- struct dma_buf *dmabuf;
- } *sysfs_entry;
-#endif
};
/**
@@ -532,16 +520,6 @@ struct dma_buf_export_info {
};
/**
- * struct dma_buf_phys_vec - describe continuous chunk of memory
- * @paddr: physical address of that chunk
- * @len: Length of this chunk
- */
-struct dma_buf_phys_vec {
- phys_addr_t paddr;
- size_t len;
-};
-
-/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
*
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 64639e104110..d4c92fd35092 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -364,11 +364,12 @@ static inline void dma_fence_end_signalling(bool cookie) {}
static inline void __dma_fence_might_wait(void) {}
#endif
-int dma_fence_signal(struct dma_fence *fence);
-int dma_fence_signal_locked(struct dma_fence *fence);
-int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
-int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
- ktime_t timestamp);
+void dma_fence_signal(struct dma_fence *fence);
+bool dma_fence_check_and_signal(struct dma_fence *fence);
+bool dma_fence_check_and_signal_locked(struct dma_fence *fence);
+void dma_fence_signal_locked(struct dma_fence *fence);
+void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+void dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -401,6 +402,26 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+/*
+ * dma_fence_test_signaled_flag - Only check whether a fence is signaled yet.
+ * @fence: the fence to check
+ *
+ * This function just checks whether @fence is signaled, without interacting
+ * with the fence in any way. The user must, therefore, ensure through other
+ * means that fences get signaled eventually.
+ *
+ * This function uses test_bit(), which is thread-safe. Naturally, this function
+ * should be used opportunistically; a fence could get signaled at any moment
+ * after the check is done.
+ *
+ * Return: true if signaled, false otherwise.
+ */
+static inline bool
+dma_fence_test_signaled_flag(struct dma_fence *fence)
+{
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
@@ -418,7 +439,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (dma_fence_test_signaled_flag(fence))
return true;
if (fence->ops->signaled && fence->ops->signaled(fence)) {
@@ -448,7 +469,7 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (dma_fence_test_signaled_flag(fence))
return true;
if (fence->ops->signaled && fence->ops->signaled(fence)) {
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 27d15f60950a..648328a64b27 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -46,4 +46,6 @@ const char *dma_heap_get_name(struct dma_heap *heap);
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+extern bool mem_accounting;
+
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 4809204c674c..8eff2f53fd86 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -395,11 +395,15 @@ bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
+bool arch_dma_alloc_direct(struct device *dev);
+bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle);
#else
#define arch_dma_map_phys_direct(d, a) (false)
#define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#define arch_dma_alloc_direct(d) (false)
+#define arch_dma_free_direct(d, a) (false)
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 562f520b23c2..2ce295b46b8c 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -11,15 +11,23 @@
#include <linux/device.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <linux/rtnetlink.h>
struct dpll_device;
struct dpll_pin;
struct dpll_pin_esync;
+struct fwnode_handle;
+struct ref_tracker;
struct dpll_device_ops {
int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ int (*mode_set)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode mode, struct netlink_ext_ack *extack);
+ int (*supported_modes_get)(const struct dpll_device *dpll,
+ void *dpll_priv, unsigned long *modes,
+ struct netlink_ext_ack *extack);
int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
enum dpll_lock_status_error *status_error,
@@ -166,6 +174,36 @@ struct dpll_pin_properties {
u32 phase_gran;
};
+#ifdef CONFIG_DPLL_REFCNT_TRACKER
+typedef struct ref_tracker *dpll_tracker;
+#else
+typedef struct {} dpll_tracker;
+#endif
+
+#define DPLL_DEVICE_CREATED 1
+#define DPLL_DEVICE_DELETED 2
+#define DPLL_DEVICE_CHANGED 3
+#define DPLL_PIN_CREATED 4
+#define DPLL_PIN_DELETED 5
+#define DPLL_PIN_CHANGED 6
+
+struct dpll_device_notifier_info {
+ struct dpll_device *dpll;
+ u32 id;
+ u32 idx;
+ u64 clock_id;
+ enum dpll_type type;
+};
+
+struct dpll_pin_notifier_info {
+ struct dpll_pin *pin;
+ u32 id;
+ u32 idx;
+ u64 clock_id;
+ const struct fwnode_handle *fwnode;
+ const struct dpll_pin_properties *prop;
+};
+
#if IS_ENABLED(CONFIG_DPLL)
void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
void dpll_netdev_pin_clear(struct net_device *dev);
@@ -173,6 +211,9 @@ void dpll_netdev_pin_clear(struct net_device *dev);
size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
int dpll_netdev_add_pin_handle(struct sk_buff *msg,
const struct net_device *dev);
+
+struct dpll_pin *fwnode_dpll_pin_find(struct fwnode_handle *fwnode,
+ dpll_tracker *tracker);
#else
static inline void
dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
@@ -188,12 +229,19 @@ dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
{
return 0;
}
+
+static inline struct dpll_pin *
+fwnode_dpll_pin_find(struct fwnode_handle *fwnode, dpll_tracker *tracker)
+{
+ return NULL;
+}
#endif
struct dpll_device *
-dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ dpll_tracker *tracker);
-void dpll_device_put(struct dpll_device *dpll);
+void dpll_device_put(struct dpll_device *dpll, dpll_tracker *tracker);
int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
const struct dpll_device_ops *ops, void *priv);
@@ -201,9 +249,11 @@ int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
void dpll_device_unregister(struct dpll_device *dpll,
const struct dpll_device_ops *ops, void *priv);
+#define DPLL_PIN_IDX_UNSPEC U32_MAX
+
struct dpll_pin *
dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
- const struct dpll_pin_properties *prop);
+ const struct dpll_pin_properties *prop, dpll_tracker *tracker);
int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
@@ -211,7 +261,9 @@ int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
-void dpll_pin_put(struct dpll_pin *pin);
+void dpll_pin_put(struct dpll_pin *pin, dpll_tracker *tracker);
+
+void dpll_pin_fwnode_set(struct dpll_pin *pin, struct fwnode_handle *fwnode);
int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
@@ -226,4 +278,8 @@ int dpll_device_change_ntf(struct dpll_device *dpll);
int dpll_pin_change_ntf(struct dpll_pin *pin);
+int register_dpll_notifier(struct notifier_block *nb);
+
+int unregister_dpll_notifier(struct notifier_block *nb);
+
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2a43094e23f7..664898d09ff5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -406,11 +406,12 @@ void efi_native_runtime_setup(void);
#define EFI_CC_FINAL_EVENTS_TABLE_GUID EFI_GUID(0xdd4a4648, 0x2de7, 0x4665, 0x96, 0x4d, 0x21, 0xd9, 0xef, 0x5f, 0xb4, 0x46)
/*
- * This GUID is used to pass to the kernel proper the struct screen_info
- * structure that was populated by the stub based on the GOP protocol instance
- * associated with ConOut
+ * This GUIDs are used to pass to the kernel proper the primary
+ * display that has been populated by the stub based on the GOP
+ * instance associated with ConOut.
*/
-#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_PRIMARY_DISPLAY_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 87efb38b7081..f83ca0abf2cd 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
+#include <linux/audit.h>
#include <linux/irq-entry-common.h>
#include <linux/livepatch.h>
#include <linux/ptrace.h>
@@ -36,8 +37,8 @@
SYSCALL_WORK_SYSCALL_EMU | \
SYSCALL_WORK_SYSCALL_AUDIT | \
SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_RSEQ_SLICE | \
ARCH_SYSCALL_WORK_ENTER)
-
#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
SYSCALL_WORK_SYSCALL_TRACE | \
SYSCALL_WORK_SYSCALL_AUDIT | \
@@ -45,7 +46,84 @@
SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
-long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
+/**
+ * arch_ptrace_report_syscall_entry - Architecture specific ptrace_report_syscall_entry() wrapper
+ *
+ * Invoked from syscall_trace_enter() to wrap ptrace_report_syscall_entry().
+ *
+ * This allows architecture specific ptrace_report_syscall_entry()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_entry().
+ */
+static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs);
+
+#ifndef arch_ptrace_report_syscall_entry
+static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs)
+{
+ return ptrace_report_syscall_entry(regs);
+}
+#endif
+
+bool syscall_user_dispatch(struct pt_regs *regs);
+long trace_syscall_enter(struct pt_regs *regs, long syscall);
+void trace_syscall_exit(struct pt_regs *regs, long ret);
+
+static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
+{
+ if (unlikely(audit_context())) {
+ unsigned long args[6];
+
+ syscall_get_arguments(current, regs, args);
+ audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
+ }
+}
+
+static __always_inline long syscall_trace_enter(struct pt_regs *regs, unsigned long work)
+{
+ long syscall, ret = 0;
+
+ /*
+ * Handle Syscall User Dispatch. This must comes first, since
+ * the ABI here can be something that doesn't make sense for
+ * other syscall_work features.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (syscall_user_dispatch(regs))
+ return -1L;
+ }
+
+ /*
+ * User space got a time slice extension granted and relinquishes
+ * the CPU. The work stops the slice timer to avoid an extra round
+ * through hrtimer_interrupt().
+ */
+ if (work & SYSCALL_WORK_SYSCALL_RSEQ_SLICE)
+ rseq_syscall_enter_work(syscall_get_nr(current, regs));
+
+ /* Handle ptrace */
+ if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
+ ret = arch_ptrace_report_syscall_entry(regs);
+ if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
+ return -1L;
+ }
+
+ /* Do seccomp after ptrace, to catch any tracer changes. */
+ if (work & SYSCALL_WORK_SECCOMP) {
+ ret = __secure_computing();
+ if (ret == -1L)
+ return ret;
+ }
+
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
+ syscall = trace_syscall_enter(regs, syscall);
+
+ syscall_enter_audit(regs, syscall);
+
+ return ret ? : syscall;
+}
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
@@ -75,7 +153,7 @@ static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *re
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
if (work & SYSCALL_WORK_ENTER)
- syscall = syscall_trace_enter(regs, syscall, work);
+ syscall = syscall_trace_enter(regs, work);
return syscall;
}
@@ -112,6 +190,37 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
return ret;
}
+/*
+ * If SYSCALL_EMU is set, then the only reason to report is when
+ * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
+ * instruction has been already reported in syscall_enter_from_user_mode().
+ */
+static __always_inline bool report_single_step(unsigned long work)
+{
+ if (work & SYSCALL_WORK_SYSCALL_EMU)
+ return false;
+
+ return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
+}
+
+/**
+ * arch_ptrace_report_syscall_exit - Architecture specific ptrace_report_syscall_exit()
+ *
+ * This allows architecture specific ptrace_report_syscall_exit()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_exit().
+ */
+static __always_inline void arch_ptrace_report_syscall_exit(struct pt_regs *regs,
+ int step);
+
+#ifndef arch_ptrace_report_syscall_exit
+static __always_inline void arch_ptrace_report_syscall_exit(struct pt_regs *regs,
+ int step)
+{
+ ptrace_report_syscall_exit(regs, step);
+}
+#endif
+
/**
* syscall_exit_work - Handle work before returning to user mode
* @regs: Pointer to current pt_regs
@@ -119,20 +228,40 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
*
* Do one-time syscall specific work.
*/
-void syscall_exit_work(struct pt_regs *regs, unsigned long work);
+static __always_inline void syscall_exit_work(struct pt_regs *regs, unsigned long work)
+{
+ bool step;
+
+ /*
+ * If the syscall was rolled back due to syscall user dispatching,
+ * then the tracers below are not invoked for the same reason as
+ * the entry side was not invoked in syscall_trace_enter(): The ABI
+ * of these syscalls is unknown.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (unlikely(current->syscall_dispatch.on_dispatch)) {
+ current->syscall_dispatch.on_dispatch = false;
+ return;
+ }
+ }
+
+ audit_syscall_exit(regs);
+
+ if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
+ trace_syscall_exit(regs, syscall_get_return_value(current, regs));
+
+ step = report_single_step(work);
+ if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
+ arch_ptrace_report_syscall_exit(regs, step);
+}
/**
- * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * syscall_exit_to_user_mode_work - Handle one time work before returning to user mode
* @regs: Pointer to currents pt_regs
*
- * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
- * exit_to_user_mode() to perform the final transition to user mode.
+ * Step 1 of syscall_exit_to_user_mode() with the same calling convention.
*
- * Calling convention is the same as for syscall_exit_to_user_mode() and it
- * returns with all work handled and interrupts disabled. The caller must
- * invoke exit_to_user_mode() before actually switching to user mode to
- * make the final state transitions. Interrupts must stay disabled between
- * return from this function and the invocation of exit_to_user_mode().
+ * The caller must invoke steps 2-3 of syscall_exit_to_user_mode() afterwards.
*/
static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
@@ -155,15 +284,13 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
*/
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
- local_irq_disable_exit_to_user();
- syscall_exit_to_user_mode_prepare(regs);
}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
*
- * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * Invoked with interrupts enabled and fully valid @regs. Returns with all
* work handled, interrupts disabled such that the caller can immediately
* switch to user mode. Called from architecture specific syscall and ret
* from fork code.
@@ -176,6 +303,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
* - ptrace (single stepping)
*
* 2) Preparatory work
+ * - Disable interrupts
* - Exit to user mode loop (common TIF handling). Invokes
* arch_exit_to_user_mode_work() for architecture specific TIF work
* - Architecture specific one time work arch_exit_to_user_mode_prepare()
@@ -184,14 +312,17 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
* 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
* functionality in exit_to_user_mode().
*
- * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
- * exit_to_user_mode(). This function is preferred unless there is a
- * compelling architectural reason to use the separate functions.
+ * This is a combination of syscall_exit_to_user_mode_work() (1), disabling
+ * interrupts followed by syscall_exit_to_user_mode_prepare() (2) and
+ * exit_to_user_mode() (3). This function is preferred unless there is a
+ * compelling architectural reason to invoke the functions separately.
*/
static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
syscall_exit_to_user_mode_work(regs);
+ local_irq_disable_exit_to_user();
+ syscall_exit_to_user_mode_prepare(regs);
instrumentation_end();
exit_to_user_mode();
}
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 5c9162193d26..798abec67a1b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -216,13 +216,43 @@ static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
struct link_mode_info {
- int speed;
- u8 lanes;
- u8 duplex;
+ int speed;
+ u8 lanes;
+ u8 min_pairs;
+ u8 pairs;
+ u8 duplex;
+ u16 mediums;
};
extern const struct link_mode_info link_mode_params[];
+enum ethtool_link_medium {
+ ETHTOOL_LINK_MEDIUM_BASET = 0,
+ ETHTOOL_LINK_MEDIUM_BASEK,
+ ETHTOOL_LINK_MEDIUM_BASES,
+ ETHTOOL_LINK_MEDIUM_BASEC,
+ ETHTOOL_LINK_MEDIUM_BASEL,
+ ETHTOOL_LINK_MEDIUM_BASED,
+ ETHTOOL_LINK_MEDIUM_BASEE,
+ ETHTOOL_LINK_MEDIUM_BASEF,
+ ETHTOOL_LINK_MEDIUM_BASEV,
+ ETHTOOL_LINK_MEDIUM_BASEMLD,
+ ETHTOOL_LINK_MEDIUM_NONE,
+
+ __ETHTOOL_LINK_MEDIUM_LAST,
+};
+
+#define ETHTOOL_MEDIUM_FIBER_BITS (BIT(ETHTOOL_LINK_MEDIUM_BASES) | \
+ BIT(ETHTOOL_LINK_MEDIUM_BASEL) | \
+ BIT(ETHTOOL_LINK_MEDIUM_BASEF))
+
+enum ethtool_link_medium ethtool_str_to_medium(const char *str);
+
+static inline int ethtool_linkmode_n_pairs(unsigned int mode)
+{
+ return link_mode_params[mode].pairs;
+}
+
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index f0cf2714ec52..262e24d83313 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -201,9 +201,9 @@ struct handle_to_path_ctx {
* @commit_metadata: commit metadata changes to stable storage
*
* See Documentation/filesystems/nfs/exporting.rst for details on how to use
- * this interface correctly.
+ * this interface correctly and the definition of the flags.
*
- * encode_fh:
+ * @encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
* file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
@@ -215,7 +215,7 @@ struct handle_to_path_ctx {
* greater than @max_len*4 bytes). On error @max_len contains the minimum
* size(in 4 byte unit) needed to encode the file handle.
*
- * fh_to_dentry:
+ * @fh_to_dentry:
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle
* fragment (@fh, @fh_len). It should return a &struct dentry which refers
* to the same file that the file handle fragment refers to. If it cannot,
@@ -227,31 +227,44 @@ struct handle_to_path_ctx {
* created with d_alloc_root. The caller can then find any other extant
* dentries by following the d_alias links.
*
- * fh_to_parent:
+ * @fh_to_parent:
* Same as @fh_to_dentry, except that it returns a pointer to the parent
* dentry if it was encoded into the filehandle fragment by @encode_fh.
*
- * get_name:
+ * @get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX + 1 sized
* buffer. get_name() should return %0 on success, a negative error code
* or error. @get_name will be called without @parent->i_rwsem held.
*
- * get_parent:
+ * @get_parent:
* @get_parent should find the parent directory for the given @child which
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
- * permission:
+ * @permission:
* Allow filesystems to specify a custom permission function.
*
- * open:
+ * @open:
* Allow filesystems to specify a custom open function.
*
- * commit_metadata:
+ * @commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
+ * @get_uuid:
+ * Get a filesystem unique signature exposed to clients.
+ *
+ * @map_blocks:
+ * Map and, if necessary, allocate blocks for a layout.
+ *
+ * @commit_blocks:
+ * Commit blocks in a layout once the client is done with them.
+ *
+ * @flags:
+ * Allows the filesystem to communicate to nfsd that it may want to do things
+ * differently when dealing with it.
+ *
* Locking rules:
* get_parent is called with child->d_inode->i_rwsem down
* get_name is not (which is possibly inconsistent)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 05cc251035da..65fb70382675 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -304,10 +304,6 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
-
- /* called at KDB enter and leave time to prepare the console */
- int (*fb_debug_enter)(struct fb_info *info);
- int (*fb_debug_leave)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index 2f5e5588ee07..d2c9740e26a8 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -242,7 +242,14 @@ bool locks_owner_has_blockers(struct file_lock_context *flctx,
static inline struct file_lock_context *
locks_inode_context(const struct inode *inode)
{
- return smp_load_acquire(&inode->i_flctx);
+ /*
+ * Paired with smp_store_release in locks_get_lock_context().
+ *
+ * Ensures ->i_flctx will be visible if we spotted the flag.
+ */
+ if (likely(!(smp_load_acquire(&inode->i_opflags) & IOP_FLCTX)))
+ return NULL;
+ return READ_ONCE(inode->i_flctx);
}
#else /* !CONFIG_FILE_LOCKING */
@@ -469,7 +476,7 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
@@ -488,7 +495,7 @@ static inline int break_deleg(struct inode *inode, unsigned int flags)
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
@@ -533,8 +540,11 @@ static inline int break_deleg_wait(struct delegated_inode *di)
static inline int break_layout(struct inode *inode, bool wait)
{
+ struct file_lock_context *flctx;
+
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ flctx = locks_inode_context(inode);
+ if (flctx && !list_empty_careful(&flctx->flc_lease)) {
unsigned int flags = LEASE_BREAK_LAYOUT;
if (!wait)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fd54fed8f95f..44d7ae95ddbc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1167,6 +1167,7 @@ bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
bool bpf_jit_supports_private_stack(void);
bool bpf_jit_supports_timed_may_goto(void);
+bool bpf_jit_supports_fsession(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
u64 arch_bpf_timed_may_goto(void);
@@ -1375,24 +1376,13 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-int __bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char *sym);
+int bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline int
-bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym)
-{
- int ret = __bpf_address_lookup(addr, size, off, sym);
-
- if (ret && modname)
- *modname = NULL;
- return ret;
-}
-
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
@@ -1431,8 +1421,8 @@ static inline bool bpf_jit_kallsyms_enabled(void)
}
static inline int
-__bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char *sym)
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym)
{
return 0;
}
@@ -1453,13 +1443,6 @@ static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
return NULL;
}
-static inline int
-bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym)
-{
- return 0;
-}
-
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
}
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 6143b7d28eac..986d712e4d94 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -526,14 +526,13 @@ struct fw_iso_packet {
struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
+ dma_addr_t *dma_addrs;
int page_count;
- int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
@@ -547,21 +546,26 @@ union fw_iso_callback {
fw_iso_mc_callback_t mc;
};
+enum fw_iso_context_flag {
+ FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS = BIT(0),
+};
+
struct fw_iso_context {
struct fw_card *card;
struct work_struct work;
int type;
int channel;
int speed;
- bool drop_overflow_headers;
+ int flags;
size_t header_size;
+ size_t header_storage_size;
union fw_iso_callback callback;
void *callback_data;
};
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
+struct fw_iso_context *__fw_iso_context_create(struct fw_card *card, int type, int channel,
+ int speed, size_t header_size, size_t header_storage_size,
+ union fw_iso_callback callback, void *callback_data);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -570,6 +574,26 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+static inline struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
+ int channel, int speed, size_t header_size, fw_iso_callback_t callback,
+ void *callback_data)
+{
+ union fw_iso_callback cb = { .sc = callback };
+
+ return __fw_iso_context_create(card, type, channel, speed, header_size, PAGE_SIZE, cb,
+ callback_data);
+}
+
+static inline struct fw_iso_context *fw_iso_context_create_with_header_storage_size(
+ struct fw_card *card, int type, int channel, int speed, size_t header_size,
+ size_t header_storage_size, fw_iso_callback_t callback, void *callback_data)
+{
+ union fw_iso_callback cb = { .sc = callback };
+
+ return __fw_iso_context_create(card, type, channel, speed, header_size, header_storage_size,
+ cb, callback_data);
+}
+
/**
* fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
* @ctx: the isochronous context
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
index 1f97764fdfd7..51e99f47e90e 100644
--- a/include/linux/firmware/cirrus/cs_dsp_test_utils.h
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -126,7 +126,7 @@ struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
unsigned int fw_version);
void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
unsigned int alg_id, unsigned int alg_ver,
- int type, unsigned int offset,
+ int type, u16 offset, u32 offset32,
const void *payload_data, size_t payload_len_bytes);
void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
const char *info);
@@ -136,6 +136,10 @@ void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
unsigned int alg_id, unsigned int alg_ver,
int mem_region, unsigned int reg_addr_offset,
const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_patch_off32(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
index 74e5a4f6c13a..eae24dde9e41 100644
--- a/include/linux/firmware/cirrus/wmfw.h
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -172,7 +172,7 @@ struct wmfw_coeff_item {
__le16 type;
__le32 id;
__le32 ver;
- __le32 sr;
+ __le32 offset32;
__le32 len;
u8 data[];
} __packed;
@@ -200,4 +200,9 @@ struct wmfw_coeff_item {
#define WMFW_HALO_XM_PACKED 0x11
#define WMFW_HALO_YM_PACKED 0x12
+#define WMFW_ADSP2_XM_LONG 0xf405
+#define WMFW_ADSP2_YM_LONG 0xf406
+#define WMFW_HALO_XM_PACKED_LONG 0xf411
+#define WMFW_HALO_YM_PACKED_LONG 0xf412
+
#endif
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
index a33b45027356..ba5d93bd6158 100644
--- a/include/linux/firmware/imx/sm.h
+++ b/include/linux/firmware/imx/sm.h
@@ -26,6 +26,8 @@
#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */
#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */
+#define SCMI_IMX952_CTRL_BYPASS_AUDMIX 8U /* WAKE AUDMIX */
+
#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
int scmi_imx_misc_ctrl_set(u32 id, u32 val);
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index a55ca771286b..5747bd191bf1 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -66,19 +66,33 @@ int qcom_scm_set_warm_boot_addr(void *entry);
void qcom_scm_cpu_power_down(u32 flags);
int qcom_scm_set_remote_state(u32 state, u32 id);
-struct qcom_scm_pas_metadata {
+struct qcom_scm_pas_context {
+ struct device *dev;
+ u32 pas_id;
+ phys_addr_t mem_phys;
+ size_t mem_size;
void *ptr;
dma_addr_t phys;
ssize_t size;
+ bool use_tzmem;
};
-int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
- struct qcom_scm_pas_metadata *ctx);
-void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
-int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
-int qcom_scm_pas_auth_and_reset(u32 peripheral);
-int qcom_scm_pas_shutdown(u32 peripheral);
-bool qcom_scm_pas_supported(u32 peripheral);
+struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev,
+ u32 pas_id,
+ phys_addr_t mem_phys,
+ size_t mem_size);
+int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size,
+ struct qcom_scm_pas_context *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx);
+int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 pas_id);
+int qcom_scm_pas_shutdown(u32 pas_id);
+bool qcom_scm_pas_supported(u32 pas_id);
+struct resource_table *qcom_scm_pas_get_rsc_table(struct qcom_scm_pas_context *ctx,
+ void *input_rt, size_t input_rt_size,
+ size_t *output_rt_size);
+
+int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
diff --git a/include/linux/firmware/xlnx-zynqmp-crypto.h b/include/linux/firmware/xlnx-zynqmp-crypto.h
new file mode 100644
index 000000000000..56595ab37c43
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-crypto.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for XilSECURE APIs.
+ *
+ * Copyright (C) 2014-2022 Xilinx, Inc.
+ * Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
+#define __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
+
+/**
+ * struct xlnx_feature - Feature data
+ * @family: Family code of platform
+ * @subfamily: Subfamily code of platform
+ * @feature_id: Feature id of module
+ * @data: Collection of all supported platform data
+ */
+struct xlnx_feature {
+ u32 family;
+ u32 feature_id;
+ void *data;
+};
+
+/* xilSecure API commands module id + api id */
+#define XSECURE_API_AES_INIT 0x509
+#define XSECURE_API_AES_OP_INIT 0x50a
+#define XSECURE_API_AES_UPDATE_AAD 0x50b
+#define XSECURE_API_AES_ENCRYPT_UPDATE 0x50c
+#define XSECURE_API_AES_ENCRYPT_FINAL 0x50d
+#define XSECURE_API_AES_DECRYPT_UPDATE 0x50e
+#define XSECURE_API_AES_DECRYPT_FINAL 0x50f
+#define XSECURE_API_AES_KEY_ZERO 0x510
+#define XSECURE_API_AES_WRITE_KEY 0x511
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
+void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map);
+int versal_pm_aes_key_write(const u32 keylen,
+ const u32 keysrc, const u64 keyaddr);
+int versal_pm_aes_key_zero(const u32 keysrc);
+int versal_pm_aes_op_init(const u64 hw_req);
+int versal_pm_aes_update_aad(const u64 aad_addr, const u32 aad_len);
+int versal_pm_aes_enc_update(const u64 in_params, const u64 in_addr);
+int versal_pm_aes_dec_update(const u64 in_params, const u64 in_addr);
+int versal_pm_aes_dec_final(const u64 gcm_addr);
+int versal_pm_aes_enc_final(const u64 gcm_addr);
+int versal_pm_aes_init(void);
+
+#else
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int versal_pm_aes_key_write(const u32 keylen,
+ const u32 keysrc, const u64 keyaddr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_key_zero(const u32 keysrc)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_op_init(const u64 hw_req)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_update_aad(const u64 aad_addr,
+ const u32 aad_len)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_enc_update(const u64 in_params,
+ const u64 in_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_dec_update(const u64 in_params,
+ const u64 in_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_enc_final(const u64 gcm_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_dec_final(const u64 gcm_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_init(void)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 15fdbd089bbf..d70dcd462b44 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp-ufs.h>
+#include <linux/firmware/xlnx-zynqmp-crypto.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -589,9 +590,7 @@ int zynqmp_pm_release_node(const u32 node);
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
-int zynqmp_pm_aes_engine(const u64 address, u32 *out);
int zynqmp_pm_efuse_access(const u64 address, u32 *out);
-int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
int zynqmp_pm_fpga_get_config_status(u32 *value);
@@ -772,22 +771,11 @@ static inline int zynqmp_pm_set_requirement(const u32 node,
return -ENODEV;
}
-static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
{
return -ENODEV;
}
-static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
- const u32 flags)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index b3b53f8c1b28..171982e53c9a 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
-#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/const.h>
#include <linux/limits.h>
@@ -10,10 +9,9 @@
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
#define __RENAME(x) __asm__(#x)
-#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
-#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
-#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
- FIELD_PREP(GENMASK(7, 1), func))
+#define FORTIFY_REASON_DIR(r) ((r) & 1)
+#define FORTIFY_REASON_FUNC(r) ((r) >> 1)
+#define FORTIFY_REASON(func, write) ((func) << 1 | (write))
/* Overridden by KUnit tests. */
#ifndef fortify_panic
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
index 2b85fe9e7f9a..b1e575665fc5 100644
--- a/include/linux/framer/framer.h
+++ b/include/linux/framer/framer.h
@@ -96,7 +96,7 @@ struct framer {
#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
int framer_pm_runtime_get(struct framer *framer);
int framer_pm_runtime_get_sync(struct framer *framer);
-int framer_pm_runtime_put(struct framer *framer);
+void framer_pm_runtime_put(struct framer *framer);
int framer_pm_runtime_put_sync(struct framer *framer);
int framer_init(struct framer *framer);
int framer_exit(struct framer *framer);
@@ -124,9 +124,8 @@ static inline int framer_pm_runtime_get_sync(struct framer *framer)
return -ENOSYS;
}
-static inline int framer_pm_runtime_put(struct framer *framer)
+static inline void framer_pm_runtime_put(struct framer *framer)
{
- return -ENOSYS;
}
static inline int framer_pm_runtime_put_sync(struct framer *framer)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a01621fa636a..2e4d1e8b0e71 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -631,6 +631,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_MGTIME 0x0020
#define IOP_CACHED_LINK 0x0040
#define IOP_FASTPERM_MAY_EXEC 0x0080
+#define IOP_FLCTX 0x0100
/*
* Inode state bits. Protected by inode->i_lock
@@ -1717,6 +1718,13 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
struct timespec64 simple_inode_init_ts(struct inode *inode);
+static inline int inode_time_dirty_flag(struct inode *inode)
+{
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
+ return I_DIRTY_TIME;
+ return I_DIRTY_SYNC;
+}
+
/*
* Snapshotting support.
*/
@@ -1985,6 +1993,11 @@ int wrap_directory_iterator(struct file *, struct dir_context *,
static int shared_##x(struct file *file , struct dir_context *ctx) \
{ return wrap_directory_iterator(file, ctx, x); }
+enum fs_update_time {
+ FS_UPD_ATIME,
+ FS_UPD_CMTIME,
+};
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
@@ -2012,7 +2025,9 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, int);
+ int (*update_time)(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
+ void (*sync_lazytime)(struct inode *inode);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
@@ -2239,16 +2254,8 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-enum file_time_flags {
- S_ATIME = 1,
- S_MTIME = 2,
- S_CTIME = 4,
- S_VERSION = 8,
-};
-
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
-int inode_update_time(struct inode *inode, int flags);
static inline void file_accessed(struct file *file)
{
@@ -2276,8 +2283,6 @@ struct file_system_type {
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
- struct dentry *(*mount) (struct file_system_type *, int,
- const char *, void *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
@@ -2401,8 +2406,10 @@ static inline void super_set_sysfs_name_generic(struct super_block *sb, const ch
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
void iput_not_last(struct inode *);
-int inode_update_timestamps(struct inode *inode, int flags);
-int generic_update_time(struct inode *, int);
+int inode_update_time(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
+int generic_update_time(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2411,14 +2418,19 @@ extern struct kobject *fs_kobj;
/* fs/open.c */
struct audit_names;
-struct filename {
+
+struct __filename_head {
const char *name; /* pointer to actual string */
- const __user char *uptr; /* original userland pointer */
- atomic_t refcnt;
+ int refcnt;
struct audit_names *aname;
- const char iname[];
+};
+#define EMBEDDED_NAME_MAX (192 - sizeof(struct __filename_head))
+struct filename {
+ struct __filename_head;
+ const char iname[EMBEDDED_NAME_MAX];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+static_assert(sizeof(struct filename) % 64 == 0);
static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
@@ -2459,7 +2471,7 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_open_nonotify(const struct path *path, int flags,
const struct cred *cred);
-struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+struct file *dentry_create(struct path *path, int flags, umode_t mode,
const struct cred *cred);
const struct path *backing_file_user_path(const struct file *f);
@@ -2513,11 +2525,23 @@ static inline struct filename *getname_maybe_null(const char __user *name, int f
extern void putname(struct filename *name);
DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
-static inline struct filename *refname(struct filename *name)
-{
- atomic_inc(&name->refcnt);
- return name;
-}
+struct delayed_filename {
+ struct filename *__incomplete_filename; // don't touch
+};
+#define INIT_DELAYED_FILENAME(ptr) \
+ ((void)(*(ptr) = (struct delayed_filename){}))
+int delayed_getname(struct delayed_filename *, const char __user *);
+int delayed_getname_uflags(struct delayed_filename *v, const char __user *, int);
+void dismiss_delayed_filename(struct delayed_filename *);
+int putname_to_delayed(struct delayed_filename *, struct filename *);
+struct filename *complete_getname(struct delayed_filename *);
+
+DEFINE_CLASS(filename, struct filename *, putname(_T), getname(p), const char __user *p)
+EXTEND_CLASS(filename, _kernel, getname_kernel(p), const char *p)
+EXTEND_CLASS(filename, _flags, getname_flags(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _uflags, getname_uflags(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _maybe_null, getname_maybe_null(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _complete_delayed, complete_getname(p), struct delayed_filename *p)
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
@@ -2536,10 +2560,8 @@ static inline int finish_open_simple(struct file *file, int error)
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
-extern struct kmem_cache *names_cachep;
-
-#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname() kmalloc(PATH_MAX, GFP_KERNEL)
+#define __putname(name) kfree(name)
void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
@@ -2659,6 +2681,11 @@ static inline int path_permission(const struct path *path, int mask)
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode);
+int may_delete_dentry(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *victim, bool isdir);
+int may_create_dentry(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *child);
+
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
@@ -3219,7 +3246,6 @@ extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode);
-extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
index 6bd3009e09b3..fa7638b81246 100644
--- a/include/linux/fs/super_types.h
+++ b/include/linux/fs/super_types.h
@@ -35,6 +35,7 @@ struct user_namespace;
struct workqueue_struct;
struct writeback_control;
struct xattr_handler;
+struct fserror_event;
extern struct super_block *blockdev_superblock;
@@ -96,7 +97,6 @@ struct super_operations {
const void *owner);
int (*unfreeze_fs)(struct super_block *sb);
int (*statfs)(struct dentry *dentry, struct kstatfs *kstatfs);
- int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin)(struct super_block *sb);
int (*show_options)(struct seq_file *seq, struct dentry *dentry);
@@ -124,6 +124,9 @@ struct super_operations {
*/
int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
void (*shutdown)(struct super_block *sb);
+
+ /* Report a filesystem error */
+ void (*report_error)(const struct fserror_event *event);
};
struct super_block {
@@ -268,6 +271,9 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
long s_min_writeback_pages;
+
+ /* number of fserrors that are being sent to fsnotify/filesystems */
+ refcount_t s_pending_errors;
} __randomize_layout;
/*
diff --git a/include/linux/fserror.h b/include/linux/fserror.h
new file mode 100644
index 000000000000..5e1ad78c346e
--- /dev/null
+++ b/include/linux/fserror.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef _LINUX_FSERROR_H__
+#define _LINUX_FSERROR_H__
+
+void fserror_mount(struct super_block *sb);
+void fserror_unmount(struct super_block *sb);
+
+enum fserror_type {
+ /* pagecache I/O failed */
+ FSERR_BUFFERED_READ,
+ FSERR_BUFFERED_WRITE,
+
+ /* direct I/O failed */
+ FSERR_DIRECTIO_READ,
+ FSERR_DIRECTIO_WRITE,
+
+ /* out of band media error reported */
+ FSERR_DATA_LOST,
+
+ /* filesystem metadata */
+ FSERR_METADATA,
+};
+
+struct fserror_event {
+ struct work_struct work;
+ struct super_block *sb;
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ enum fserror_type type;
+
+ /* negative error number */
+ int error;
+};
+
+void fserror_report(struct super_block *sb, struct inode *inode,
+ enum fserror_type type, loff_t pos, u64 len, int error,
+ gfp_t gfp);
+
+static inline void fserror_report_io(struct inode *inode,
+ enum fserror_type type, loff_t pos,
+ u64 len, int error, gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, type, pos, len, error, gfp);
+}
+
+static inline void fserror_report_data_lost(struct inode *inode, loff_t pos,
+ u64 len, gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, FSERR_DATA_LOST, pos, len, -EIO,
+ gfp);
+}
+
+static inline void fserror_report_file_metadata(struct inode *inode, int error,
+ gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, FSERR_METADATA, 0, 0, error, gfp);
+}
+
+static inline void fserror_report_metadata(struct super_block *sb, int error,
+ gfp_t gfp)
+{
+ fserror_report(sb, NULL, FSERR_METADATA, 0, 0, error, gfp);
+}
+
+static inline void fserror_report_shutdown(struct super_block *sb, gfp_t gfp)
+{
+ fserror_report(sb, NULL, FSERR_METADATA, 0, 0, -ESHUTDOWN, gfp);
+}
+
+#endif /* _LINUX_FSERROR_H__ */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0d954ea7b179..95985400d3d8 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -553,7 +553,7 @@ struct fsnotify_mark_connector {
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
- struct hlist_head list;
+ struct hlist_head list; /* List of marks */
};
/*
@@ -562,6 +562,9 @@ struct fsnotify_mark_connector {
*/
struct fsnotify_sb_info {
struct fsnotify_mark_connector __rcu *sb_marks;
+ /* List of connectors for inode marks */
+ struct list_head inode_conn_list;
+ spinlock_t list_lock; /* Lock protecting inode_conn_list */
/*
* Number of inode/mount/sb objects that are being watched in this sb.
* Note that inodes objects are currently double-accounted.
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 5bc7280425a7..fed91023bea9 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -31,13 +31,6 @@ struct fsverity_info;
/* Verity operations for filesystems */
struct fsverity_operations {
/**
- * The offset of the pointer to struct fsverity_info in the
- * filesystem-specific part of the inode, relative to the beginning of
- * the common part of the inode (the 'struct inode').
- */
- ptrdiff_t inode_info_offs;
-
- /**
* Begin enabling verity on the given file.
*
* @filp: a readonly file descriptor for the file
@@ -97,10 +90,6 @@ struct fsverity_operations {
*
* @inode: the inode
* @index: 0-based index of the page within the Merkle tree
- * @num_ra_pages: The number of Merkle tree pages that should be
- * prefetched starting at @index if the page at @index
- * isn't already cached. Implementations may ignore this
- * argument; it's only a performance optimization.
*
* This can be called at any time on an open verity file. It may be
* called by multiple processes concurrently, even with the same page.
@@ -110,13 +99,28 @@ struct fsverity_operations {
* Return: the page on success, ERR_PTR() on failure
*/
struct page *(*read_merkle_tree_page)(struct inode *inode,
- pgoff_t index,
- unsigned long num_ra_pages);
+ pgoff_t index);
+
+ /**
+ * Perform readahead of a Merkle tree for the given inode.
+ *
+ * @inode: the inode
+ * @index: 0-based index of the first page within the Merkle tree
+ * @nr_pages: number of pages to be read ahead.
+ *
+ * This can be called at any time on an open verity file. It may be
+ * called by multiple processes concurrently, even with the same range.
+ *
+ * Optional method so that ->read_merkle_tree_page preferably finds
+ * cached data instead of issuing dependent I/O.
+ */
+ void (*readahead_merkle_tree)(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
/**
- * Write a Merkle tree block to the given inode.
+ * Write a Merkle tree block to the given file.
*
- * @inode: the inode for which the Merkle tree is being built
+ * @file: the file for which the Merkle tree is being built
* @buf: the Merkle tree block to write
* @pos: the position of the block in the Merkle tree (in bytes)
* @size: the Merkle tree block size (in bytes)
@@ -126,43 +130,48 @@ struct fsverity_operations {
*
* Return: 0 on success, -errno on failure
*/
- int (*write_merkle_tree_block)(struct inode *inode, const void *buf,
+ int (*write_merkle_tree_block)(struct file *file, const void *buf,
u64 pos, unsigned int size);
};
#ifdef CONFIG_FS_VERITY
-
-/*
- * Returns the address of the verity info pointer within the filesystem-specific
- * part of the inode. (To save memory on filesystems that don't support
- * fsverity, a field in 'struct inode' itself is no longer used.)
+/**
+ * fsverity_active() - do reads from the inode need to go through fs-verity?
+ * @inode: inode to check
+ *
+ * This checks whether the inode's verity info has been set, and reads need
+ * to verify the file data.
+ *
+ * Return: true if reads need to go through fs-verity, otherwise false
*/
-static inline struct fsverity_info **
-fsverity_info_addr(const struct inode *inode)
+static inline bool fsverity_active(const struct inode *inode)
{
- VFS_WARN_ON_ONCE(inode->i_sb->s_vop->inode_info_offs == 0);
- return (void *)inode + inode->i_sb->s_vop->inode_info_offs;
+ if (IS_VERITY(inode)) {
+ /*
+ * This pairs with the try_cmpxchg in set_mask_bits()
+ * used to set the S_VERITY bit in i_flags.
+ */
+ smp_mb();
+ return true;
+ }
+
+ return false;
}
+struct fsverity_info *__fsverity_get_info(const struct inode *inode);
+/**
+ * fsverity_get_info - get fsverity information for an inode
+ * @inode: inode to operate on.
+ *
+ * This gets the fsverity_info for @inode if it exists. Safe to call without
+ * knowin that a fsverity_info exist for @inode, including on file systems that
+ * do not support fsverity.
+ */
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
- /*
- * Since this function can be called on inodes belonging to filesystems
- * that don't support fsverity at all, and fsverity_info_addr() doesn't
- * work on such filesystems, we have to start with an IS_VERITY() check.
- * Checking IS_VERITY() here is also useful to minimize the overhead of
- * fsverity_active() on non-verity files.
- */
- if (!IS_VERITY(inode))
+ if (!fsverity_active(inode))
return NULL;
-
- /*
- * Pairs with the cmpxchg_release() in fsverity_set_info(). I.e.,
- * another task may publish the inode's verity info concurrently,
- * executing a RELEASE barrier. Use smp_load_acquire() here to safely
- * ACQUIRE the memory the other task published.
- */
- return smp_load_acquire(fsverity_info_addr(inode));
+ return __fsverity_get_info(inode);
}
/* enable.c */
@@ -179,27 +188,6 @@ int fsverity_get_digest(struct inode *inode,
/* open.c */
int __fsverity_file_open(struct inode *inode, struct file *filp);
-int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
-void __fsverity_cleanup_inode(struct inode *inode);
-
-/**
- * fsverity_cleanup_inode() - free the inode's verity info, if present
- * @inode: an inode being evicted
- *
- * Filesystems must call this on inode eviction to free the inode's verity info.
- */
-static inline void fsverity_cleanup_inode(struct inode *inode)
-{
- /*
- * Only IS_VERITY() inodes can have verity info, so start by checking
- * for IS_VERITY() (which is faster than retrieving the pointer to the
- * verity info). This minimizes overhead for non-verity inodes.
- */
- if (IS_VERITY(inode))
- __fsverity_cleanup_inode(inode);
- else
- VFS_WARN_ON_ONCE(*fsverity_info_addr(inode) != NULL);
-}
/* read_metadata.c */
@@ -207,12 +195,18 @@ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
-void fsverity_verify_bio(struct bio *bio);
+bool fsverity_verify_blocks(struct fsverity_info *vi, struct folio *folio,
+ size_t len, size_t offset);
+void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
#else /* !CONFIG_FS_VERITY */
+static inline bool fsverity_active(const struct inode *inode)
+{
+ return false;
+}
+
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
return NULL;
@@ -251,16 +245,6 @@ static inline int __fsverity_file_open(struct inode *inode, struct file *filp)
return -EOPNOTSUPP;
}
-static inline int __fsverity_prepare_setattr(struct dentry *dentry,
- struct iattr *attr)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fsverity_cleanup_inode(struct inode *inode)
-{
-}
-
/* read_metadata.c */
static inline int fsverity_ioctl_read_metadata(struct file *filp,
@@ -271,14 +255,16 @@ static inline int fsverity_ioctl_read_metadata(struct file *filp,
/* verify.c */
-static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+static inline bool fsverity_verify_blocks(struct fsverity_info *vi,
+ struct folio *folio, size_t len,
size_t offset)
{
WARN_ON_ONCE(1);
return false;
}
-static inline void fsverity_verify_bio(struct bio *bio)
+static inline void fsverity_verify_bio(struct fsverity_info *vi,
+ struct bio *bio)
{
WARN_ON_ONCE(1);
}
@@ -290,32 +276,16 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
#endif /* !CONFIG_FS_VERITY */
-static inline bool fsverity_verify_folio(struct folio *folio)
-{
- return fsverity_verify_blocks(folio, folio_size(folio), 0);
-}
-
-static inline bool fsverity_verify_page(struct page *page)
+static inline bool fsverity_verify_folio(struct fsverity_info *vi,
+ struct folio *folio)
{
- return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+ return fsverity_verify_blocks(vi, folio, folio_size(folio), 0);
}
-/**
- * fsverity_active() - do reads from the inode need to go through fs-verity?
- * @inode: inode to check
- *
- * This checks whether the inode's verity info has been set.
- *
- * Filesystems call this from ->readahead() to check whether the pages need to
- * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
- * a race condition where the file is being read concurrently with
- * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before the verity info.)
- *
- * Return: true if reads need to go through fs-verity, otherwise false
- */
-static inline bool fsverity_active(const struct inode *inode)
+static inline bool fsverity_verify_page(struct fsverity_info *vi,
+ struct page *page)
{
- return fsverity_get_info(inode) != NULL;
+ return fsverity_verify_blocks(vi, page_folio(page), PAGE_SIZE, 0);
}
/**
@@ -338,22 +308,12 @@ static inline int fsverity_file_open(struct inode *inode, struct file *filp)
return 0;
}
-/**
- * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
- * @dentry: dentry through which the inode is being changed
- * @attr: attributes to change
- *
- * Verity files are immutable, so deny truncates. This isn't covered by the
- * open-time check because sys_truncate() takes a path, not a file descriptor.
- *
- * Return: 0 on success, -errno on failure
- */
-static inline int fsverity_prepare_setattr(struct dentry *dentry,
- struct iattr *attr)
-{
- if (IS_VERITY(d_inode(dentry)))
- return __fsverity_prepare_setattr(dentry, attr);
- return 0;
-}
+void fsverity_cleanup_inode(struct inode *inode);
+void fsverity_readahead(struct fsverity_info *vi, pgoff_t index,
+ unsigned long nr_pages);
+
+struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index);
+void generic_readahead_merkle_tree(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
#endif /* _LINUX_FSVERITY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a3a8989e3268..1a4d36fc9085 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,16 +82,19 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+struct ftrace_func_entry;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym);
+ unsigned long *off, char **modname,
+ const unsigned char **modbuildid, char *sym);
#else
static inline int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym)
+ unsigned long *off, char **modname,
+ const unsigned char **modbuildid, char *sym)
{
return 0;
}
@@ -359,7 +362,6 @@ enum {
FTRACE_OPS_FL_DIRECT = BIT(17),
FTRACE_OPS_FL_SUBOP = BIT(18),
FTRACE_OPS_FL_GRAPH = BIT(19),
- FTRACE_OPS_FL_JMP = BIT(20),
};
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -403,9 +405,17 @@ enum ftrace_ops_cmd {
* Negative on failure. The return value is dependent on the
* callback.
*/
-typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, unsigned long ip, enum ftrace_ops_cmd cmd);
#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_HASH_DEFAULT_BITS 10
+
+struct ftrace_hash *alloc_ftrace_hash(int size_bits);
+void free_ftrace_hash(struct ftrace_hash *hash);
+struct ftrace_func_entry *add_ftrace_hash_entry_direct(struct ftrace_hash *hash,
+ unsigned long ip, unsigned long direct);
+
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
struct ftrace_hash __rcu *notrace_hash;
@@ -535,6 +545,10 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash);
+int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash);
+int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock);
+
void ftrace_stub_direct_tramp(void);
#else
@@ -561,6 +575,21 @@ static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned l
return -ENODEV;
}
+static inline int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ return -ENODEV;
+}
+
+static inline int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ return -ENODEV;
+}
+
+static inline int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock)
+{
+ return -ENODEV;
+}
+
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
index 15627ceea9bc..386fa48c4a95 100644
--- a/include/linux/ftrace_regs.h
+++ b/include/linux/ftrace_regs.h
@@ -33,6 +33,31 @@ struct ftrace_regs;
#define ftrace_regs_get_frame_pointer(fregs) \
frame_pointer(&arch_ftrace_regs(fregs)->regs)
+static __always_inline void
+ftrace_partial_regs_update(struct ftrace_regs *fregs, struct pt_regs *regs) { }
+
+#else
+
+/*
+ * ftrace_partial_regs_update - update the original ftrace_regs from regs
+ * @fregs: The ftrace_regs to update from @regs
+ * @regs: The partial regs from ftrace_partial_regs() that was updated
+ *
+ * Some architectures have the partial regs living in the ftrace_regs
+ * structure, whereas other architectures need to make a different copy
+ * of the @regs. If a partial @regs is retrieved by ftrace_partial_regs() and
+ * if the code using @regs updates a field (like the instruction pointer or
+ * stack pointer) it may need to propagate that change to the original @fregs
+ * it retrieved the partial @regs from. Use this function to guarantee that
+ * update happens.
+ */
+static __always_inline void
+ftrace_partial_regs_update(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ ftrace_regs_set_instruction_pointer(fregs, instruction_pointer(regs));
+ ftrace_regs_set_return_value(fregs, regs_return_value(regs));
+}
+
#endif /* HAVE_ARCH_FTRACE_REGS */
/* This can be overridden by the architectures */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
deleted file mode 100644
index c304dcdb4eac..000000000000
--- a/include/linux/getcpu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GETCPU_H
-#define _LINUX_GETCPU_H 1
-
-/* Cache for getcpu() to speed it up. Results might be a short time
- out of date, but will be faster.
-
- User programs should not refer to the contents of this structure.
- I repeat they should not refer to it. If they do they will break
- in future kernels.
-
- It is only a private cache for vgetcpu(). It will change in future kernels.
- The user program must store this information per thread (__thread)
- If you want 100% accurate information pass NULL instead. */
-struct getcpu_cache {
- unsigned long blob[128 / sizeof(long)];
-};
-
-#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b155929af5b1..6ecf6dda93e0 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -407,9 +407,15 @@ extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
+/* A helper for checking if gfp includes all the specified flags */
+static inline bool gfp_has_flags(gfp_t gfp, gfp_t flags)
+{
+ return (gfp & flags) == flags;
+}
+
static inline bool gfp_has_io_fs(gfp_t gfp)
{
- return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+ return gfp_has_flags(gfp, __GFP_IO | __GFP_FS);
}
/*
@@ -430,39 +436,29 @@ typedef unsigned int __bitwise acr_flags_t;
#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
/* The below functions must be run on a range from a single zone. */
-extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- acr_flags_t alloc_flags, gfp_t gfp_mask);
-#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
-
-extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask);
-#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
-
-#endif
+int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_frozen_range(...) \
+ alloc_hooks(alloc_contig_frozen_range_noprof(__VA_ARGS__))
+
+int alloc_contig_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_range(...) \
+ alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
+
+struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
+ gfp_t gfp_mask, int nid, nodemask_t *nodemask);
+#define alloc_contig_frozen_pages(...) \
+ alloc_hooks(alloc_contig_frozen_pages_noprof(__VA_ARGS__))
+
+struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
+#define alloc_contig_pages(...) \
+ alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
+
+void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages);
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
-
-#ifdef CONFIG_CONTIG_ALLOC
-static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
- int nid, nodemask_t *node)
-{
- struct page *page;
-
- if (WARN_ON(!order || !(gfp & __GFP_COMP)))
- return NULL;
-
- page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
-
- return page ? page_folio(page) : NULL;
-}
-#else
-static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
- int nid, nodemask_t *node)
-{
- return NULL;
-}
#endif
-/* This should be paired with folio_put() rather than free_contig_range(). */
-#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 3de43b12209e..814bb2892f99 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -309,8 +309,10 @@ enum {
*
* %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
* watermark is applied to allow access to "atomic reserves".
- * The current implementation doesn't support NMI and few other strict
- * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ * The current implementation doesn't support NMI, nor contexts that disable
+ * preemption under PREEMPT_RT. This includes raw_spin_lock() and plain
+ * preempt_disable() - see "Memory allocation" in
+ * Documentation/core-api/real-time/differences.rst for more info.
*
* %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
@@ -321,6 +323,7 @@ enum {
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback. It is very
* likely to fail to allocate memory, even for very small allocations.
+ * The same restrictions on calling contexts apply as for %GFP_ATOMIC.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index cafeb7a40ad1..0d8408582918 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -607,6 +607,42 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
flags, label);
}
+/**
+ * devm_fwnode_gpiod_get_optional - obtain an optional GPIO from firmware node
+ * @dev: GPIO consumer
+ * @fwnode: handle of the firmware node
+ * @con_id: function within the GPIO consumer
+ * @flags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * This function can be used for drivers that get their configuration
+ * from opaque firmware.
+ *
+ * GPIO descriptors returned from this function are automatically disposed on
+ * driver detach.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the optional function @con_id of device
+ * dev, NULL if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
+ */
+static inline
+struct gpio_desc *devm_fwnode_gpiod_get_optional(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+
+ desc = devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+ return NULL;
+
+ return desc;
+}
+
struct acpi_gpio_params {
unsigned int crs_entry_index;
unsigned short line_index;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index abc20f9810fd..af03db851a1d 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -197,15 +197,111 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
}
#endif
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
+#ifndef clear_user_page
+/**
+ * clear_user_page() - clear a page to be mapped to user space
+ * @addr: the address of the page
+ * @vaddr: the address of the user mapping
+ * @page: the page
+ *
+ * We condition the definition of clear_user_page() on the architecture
+ * not having a custom clear_user_highpage(). That's because if there
+ * is some special flushing needed for clear_user_highpage() then it
+ * is likely that clear_user_page() also needs some magic. And, since
+ * our only caller is the generic clear_user_highpage(), not defining
+ * is not much of a loss.
+ */
+static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
+{
+ clear_page(addr);
+}
+#endif
+
+/**
+ * clear_user_pages() - clear a page range to be mapped to user space
+ * @addr: start address
+ * @vaddr: start address of the user mapping
+ * @page: start page
+ * @npages: number of pages
+ *
+ * Assumes that the region (@addr, +@npages) has been validated
+ * already so this does no exception handling.
+ *
+ * If the architecture provides a clear_user_page(), use that;
+ * otherwise, we can safely use clear_pages().
+ */
+static inline void clear_user_pages(void *addr, unsigned long vaddr,
+ struct page *page, unsigned int npages)
+{
+
+#ifdef clear_user_page
+ do {
+ clear_user_page(addr, vaddr, page);
+ addr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+#else
+ /*
+ * Prefer clear_pages() to allow for architectural optimizations
+ * when operating on contiguous page ranges.
+ */
+ clear_pages(addr, npages);
+#endif
+}
+
+/**
+ * clear_user_highpage() - clear a page to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ *
+ * With !CONFIG_HIGHMEM this (and the copy_user_highpage() below) will
+ * be plain clear_user_page() (and copy_user_page()).
+ */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
kunmap_local(addr);
}
+#endif /* clear_user_highpage */
+
+/**
+ * clear_user_highpages() - clear a page range to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ * @npages: number of pages
+ *
+ * Assumes that all the pages in the region (@page, +@npages) are valid
+ * so this does no exception handling.
+ */
+static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
+ unsigned int npages)
+{
+
+#if defined(clear_user_highpage) || defined(CONFIG_HIGHMEM)
+ /*
+ * An architecture defined clear_user_highpage() implies special
+ * handling is needed.
+ *
+ * So we use that or, the generic variant if CONFIG_HIGHMEM is
+ * enabled.
+ */
+ do {
+ clear_user_highpage(page, vaddr);
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+#else
+
+ /*
+ * Prefer clear_user_pages() to allow for architectural optimizations
+ * when operating on contiguous page ranges.
+ */
+ clear_user_pages(page_address(page), vaddr, page, npages);
#endif
+}
#ifndef vma_alloc_zeroed_movable_folio
/**
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
deleted file mode 100644
index 07414c241e65..000000000000
--- a/include/linux/hippidevice.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Definitions for the HIPPI handlers.
- *
- * Version: @(#)hippidevice.h 1.0.0 05/26/97
- *
- * Author: Jes Sorensen, <Jes.Sorensen@cern.ch>
- *
- * hippidevice.h is based on previous fddidevice.h work by
- * Ross Biro
- * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- * Alan Cox, <gw4pts@gw4pts.ampr.org>
- * Lawrence V. Stefani, <stefani@lkg.dec.com>
- */
-#ifndef _LINUX_HIPPIDEVICE_H
-#define _LINUX_HIPPIDEVICE_H
-
-#include <linux/if_hippi.h>
-
-#ifdef __KERNEL__
-
-struct neigh_parms;
-struct net_device;
-struct sk_buff;
-
-struct hippi_cb {
- __u32 ifield;
-};
-
-__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-int hippi_mac_addr(struct net_device *dev, void *p);
-int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-struct net_device *alloc_hippi_dev(int sizeof_priv);
-#endif
-
-#endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index ca1ec437a3ca..51a6dc2b97e9 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -447,12 +447,16 @@ struct hisi_qp_ops {
int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
};
+struct instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
+};
+
struct hisi_qp {
u32 qp_id;
u16 sq_depth;
u16 cq_depth;
u8 alg_type;
- u8 req_type;
struct qm_dma qdma;
void *sqe;
@@ -462,7 +466,6 @@ struct hisi_qp {
struct hisi_qp_status qp_status;
struct hisi_qp_ops *hw_ops;
- void *qp_ctx;
void (*req_cb)(struct hisi_qp *qp, void *data);
void (*event_cb)(struct hisi_qp *qp);
@@ -471,6 +474,11 @@ struct hisi_qp {
bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
+
+ u32 ref_count;
+ spinlock_t qp_lock;
+ struct instance_backlog backlog;
+ const void **msg;
};
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
@@ -563,6 +571,7 @@ void hisi_qm_reset_done(struct pci_dev *pdev);
int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op);
+int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
@@ -575,7 +584,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
void hisi_acc_free_sgl_pool(struct device *dev,
struct hisi_acc_sgl_pool *pool);
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
- u8 alg_type, int node, struct hisi_qp **qps);
+ u8 *alg_type, int node, struct hisi_qp **qps);
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 9fa9c30a34e6..5e7a63143a4a 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -380,7 +380,7 @@ struct host1x_driver {
struct list_head list;
int (*probe)(struct host1x_device *device);
- int (*remove)(struct host1x_device *device);
+ void (*remove)(struct host1x_device *device);
void (*shutdown)(struct host1x_device *device);
};
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 0de12f14d6a4..74adbd4e7003 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -112,12 +112,6 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
}
-static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
-{
- timer->node.expires = tv64;
- timer->_softexpires = tv64;
-}
-
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = ktime_add_safe(timer->node.expires, time);
@@ -140,15 +134,6 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
return timer->_softexpires;
}
-static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
-{
- return timer->node.expires;
-}
-static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
-{
- return timer->_softexpires;
-}
-
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->node.expires);
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index aa49ffa130e5..02b010df6570 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -6,26 +6,6 @@
#include <linux/timerqueue.h>
#include <linux/seqlock.h>
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-# define HIGH_RES_NSEC 1
-# define KTIME_HIGH_RES (HIGH_RES_NSEC)
-# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
-
-#else
-
-# define MONOTONIC_RES_NSEC LOW_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-
-#endif
-
#ifdef CONFIG_64BIT
# define __hrtimer_clock_base_align ____cacheline_aligned
#else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e51b8ef0cebd..94a03591990c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -171,11 +171,11 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
+extern int movable_gigantic_pages __read_mostly;
extern int sysctl_hugetlb_shm_group __read_mostly;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
void hugetlb_bootmem_alloc(void);
-bool hugetlb_bootmem_allocated(void);
extern nodemask_t hugetlb_bootmem_nodes;
void hugetlb_bootmem_set_nodes(void);
@@ -280,6 +280,8 @@ void fixup_hugetlb_reservations(struct vm_area_struct *vma);
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+unsigned int arch_hugetlb_cma_order(void);
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
@@ -929,7 +931,7 @@ static inline bool hugepage_movable_supported(struct hstate *h)
if (!hugepage_migration_supported(h))
return false;
- if (hstate_is_gigantic(h))
+ if (hstate_is_gigantic(h) && !movable_gigantic_pages)
return false;
return true;
}
@@ -1303,11 +1305,6 @@ static inline bool hugetlbfs_pagecache_present(
static inline void hugetlb_bootmem_alloc(void)
{
}
-
-static inline bool hugetlb_bootmem_allocated(void)
-{
- return false;
-}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -1321,9 +1318,9 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
}
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
-extern void __init hugetlb_cma_reserve(int order);
+extern void __init hugetlb_cma_reserve(void);
#else
-static inline __init void hugetlb_cma_reserve(int order)
+static inline __init void hugetlb_cma_reserve(void)
{
}
#endif
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index b424555753b1..b77bc55a4cf3 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -15,6 +15,7 @@
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -48,6 +49,7 @@ struct hwrng {
/* internal. */
struct list_head list;
struct kref ref;
+ struct work_struct cleanup_work;
struct completion cleanup_done;
struct completion dying;
};
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 9fcb6410a584..971d53349b6f 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -25,7 +25,7 @@
* @I3C_ERROR_M2: M2 error
*
* These are the standard error codes as defined by the I3C specification.
- * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * When -EIO is returned by the i3c_device_do_i3c_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
* &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
@@ -79,9 +79,6 @@ struct i3c_xfer {
enum i3c_error_code err;
};
-/* keep back compatible */
-#define i3c_priv_xfer i3c_xfer
-
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
@@ -308,15 +305,23 @@ static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
i3c_i2c_driver_unregister, \
__i2cdrv)
+#if IS_ENABLED(CONFIG_I3C)
int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
int nxfers, enum i3c_xfer_mode mode);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
+#else
+static inline int
+i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
+{
+ return -EOPNOTSUPP;
+}
-static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_xfer *xfers,
- int nxfers)
+static inline u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev)
{
- return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+ return 0;
}
+#endif
int i3c_device_do_setdasa(struct i3c_device *dev);
@@ -358,6 +363,5 @@ int i3c_device_request_ibi(struct i3c_device *dev,
void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
-u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 58d01ed4cce7..592b646f6134 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -462,6 +462,8 @@ struct i3c_bus {
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
* @set_speed: adjust I3C open drain mode timing.
+ * @set_dev_nack_retry: configure device NACK retry count for the master
+ * controller.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -491,6 +493,8 @@ struct i3c_master_controller_ops {
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
+ int (*set_dev_nack_retry)(struct i3c_master_controller *master,
+ unsigned long dev_nack_retry_cnt);
};
/**
@@ -505,6 +509,8 @@ struct i3c_master_controller_ops {
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
* @hotjoin: true if the master support hotjoin
+ * @rpm_allowed: true if Runtime PM allowed
+ * @rpm_ibi_allowed: true if IBI and Hot-Join allowed while runtime suspended
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
@@ -514,6 +520,7 @@ struct i3c_master_controller_ops {
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
* be done from a sleep-able context
+ * @dev_nack_retry_count: retry count when slave device nack
*
* A &struct i3c_master_controller has to be registered to the I3C subsystem
* through i3c_master_register(). None of &struct i3c_master_controller fields
@@ -528,12 +535,15 @@ struct i3c_master_controller {
unsigned int secondary : 1;
unsigned int init_done : 1;
unsigned int hotjoin: 1;
+ unsigned int rpm_allowed: 1;
+ unsigned int rpm_ibi_allowed: 1;
struct {
struct list_head i3c;
struct list_head i2c;
} boardinfo;
struct i3c_bus bus;
struct workqueue_struct *wq;
+ unsigned int dev_nack_retry_count;
};
/**
@@ -595,6 +605,7 @@ int i3c_master_get_free_addr(struct i3c_master_controller *master,
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr);
int i3c_master_do_daa(struct i3c_master_controller *master);
+int i3c_master_do_daa_ext(struct i3c_master_controller *master, bool rstdaa);
struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
size_t len, bool force_bounce,
enum dma_data_direction dir);
diff --git a/include/linux/ieee80211-eht.h b/include/linux/ieee80211-eht.h
index f9782e46c5e5..f8e9f5d36d2a 100644
--- a/include/linux/ieee80211-eht.h
+++ b/include/linux/ieee80211-eht.h
@@ -558,6 +558,17 @@ struct ieee80211_mle_tdls_common_info {
#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+#define IEEE80211_EML_CTRL_EMLSR_MODE BIT(0)
+#define IEEE80211_EML_CTRL_EMLMR_MODE BIT(1)
+#define IEEE80211_EML_CTRL_EMLSR_PARAM_UPDATE BIT(2)
+#define IEEE80211_EML_CTRL_INDEV_COEX_ACT BIT(3)
+
+#define IEEE80211_EML_EMLSR_PAD_DELAY 0x07
+#define IEEE80211_EML_EMLSR_TRANS_DELAY 0x38
+
+#define IEEE80211_EML_EMLMR_RX_MCS_MAP 0xf0
+#define IEEE80211_EML_EMLMR_TX_MCS_MAP 0x0f
+
/* no fixed fields in PRIO_ACCESS */
/**
@@ -1179,4 +1190,4 @@ static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
_data + ieee80211_mle_common_size(_data),\
_len - ieee80211_mle_common_size(_data))
-#endif /* LINUX_IEEE80211_H */
+#endif /* LINUX_IEEE80211_EHT_H */
diff --git a/include/linux/ieee80211-s1g.h b/include/linux/ieee80211-s1g.h
index 5b9ed2dcc00e..22dde4cbc1b0 100644
--- a/include/linux/ieee80211-s1g.h
+++ b/include/linux/ieee80211-s1g.h
@@ -572,4 +572,4 @@ static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
}
}
-#endif /* LINUX_IEEE80211_H */
+#endif /* LINUX_IEEE80211_S1G_H */
diff --git a/include/linux/ieee80211-uhr.h b/include/linux/ieee80211-uhr.h
new file mode 100644
index 000000000000..132acced7d79
--- /dev/null
+++ b/include/linux/ieee80211-uhr.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 UHR definitions
+ *
+ * Copyright (c) 2025-2026 Intel Corporation
+ */
+#ifndef LINUX_IEEE80211_UHR_H
+#define LINUX_IEEE80211_UHR_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_UHR_OPER_PARAMS_DPS_ENA 0x0001
+#define IEEE80211_UHR_OPER_PARAMS_NPCA_ENA 0x0002
+#define IEEE80211_UHR_OPER_PARAMS_DBE_ENA 0x0004
+#define IEEE80211_UHR_OPER_PARAMS_PEDCA_ENA 0x0008
+
+struct ieee80211_uhr_operation {
+ __le16 params;
+ u8 basic_mcs_nss_set[4];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_UHR_NPCA_PARAMS_PRIMARY_CHAN_OFFS 0x0000000F
+#define IEEE80211_UHR_NPCA_PARAMS_MIN_DUR_THRESH 0x000000F0
+#define IEEE80211_UHR_NPCA_PARAMS_SWITCH_DELAY 0x00003F00
+#define IEEE80211_UHR_NPCA_PARAMS_SWITCH_BACK_DELAY 0x000FC000
+#define IEEE80211_UHR_NPCA_PARAMS_INIT_QSRC 0x00300000
+#define IEEE80211_UHR_NPCA_PARAMS_MOPLEN 0x00400000
+#define IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES 0x00800000
+
+struct ieee80211_uhr_npca_info {
+ __le32 params;
+ __le16 dis_subch_bmap[];
+} __packed;
+
+static inline bool ieee80211_uhr_oper_size_ok(const u8 *data, u8 len,
+ bool beacon)
+{
+ const struct ieee80211_uhr_operation *oper = (const void *)data;
+ u8 needed = sizeof(*oper);
+
+ if (len < needed)
+ return false;
+
+ /* nothing else present in beacons */
+ if (beacon)
+ return true;
+
+ /* FIXME: DPS, DBE, P-EDCA (consider order, also relative to NPCA) */
+
+ if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_NPCA_ENA)) {
+ const struct ieee80211_uhr_npca_info *npca =
+ (const void *)oper->variable;
+
+ needed += sizeof(*npca);
+
+ if (len < needed)
+ return false;
+
+ if (npca->params & cpu_to_le32(IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES))
+ needed += sizeof(npca->dis_subch_bmap[0]);
+ }
+
+ return len >= needed;
+}
+
+/*
+ * Note: cannot call this on the element coming from a beacon,
+ * must ensure ieee80211_uhr_oper_size_ok(..., false) first
+ */
+static inline const struct ieee80211_uhr_npca_info *
+ieee80211_uhr_npca_info(const struct ieee80211_uhr_operation *oper)
+{
+ if (!(oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_NPCA_ENA)))
+ return NULL;
+
+ /* FIXME: DPS */
+
+ return (const void *)oper->variable;
+}
+
+static inline const __le16 *
+ieee80211_uhr_npca_dis_subch_bitmap(const struct ieee80211_uhr_operation *oper)
+{
+ const struct ieee80211_uhr_npca_info *npca;
+
+ npca = ieee80211_uhr_npca_info(oper);
+ if (!npca)
+ return NULL;
+ if (!(npca->params & cpu_to_le32(IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES)))
+ return NULL;
+ return npca->dis_subch_bmap;
+}
+
+#define IEEE80211_UHR_MAC_CAP0_DPS_SUPP 0x01
+#define IEEE80211_UHR_MAC_CAP0_DPS_ASSIST_SUPP 0x02
+#define IEEE80211_UHR_MAC_CAP0_DPS_AP_STATIC_HCM_SUPP 0x04
+#define IEEE80211_UHR_MAC_CAP0_NPCA_SUPP 0x10
+#define IEEE80211_UHR_MAC_CAP0_ENH_BSR_SUPP 0x20
+#define IEEE80211_UHR_MAC_CAP0_ADD_MAP_TID_SUPP 0x40
+#define IEEE80211_UHR_MAC_CAP0_EOTSP_SUPP 0x80
+
+#define IEEE80211_UHR_MAC_CAP1_DSO_SUPP 0x01
+#define IEEE80211_UHR_MAC_CAP1_PEDCA_SUPP 0x02
+#define IEEE80211_UHR_MAC_CAP1_DBE_SUPP 0x04
+#define IEEE80211_UHR_MAC_CAP1_UL_LLI_SUPP 0x08
+#define IEEE80211_UHR_MAC_CAP1_P2P_LLI_SUPP 0x10
+#define IEEE80211_UHR_MAC_CAP1_PUO_SUPP 0x20
+#define IEEE80211_UHR_MAC_CAP1_AP_PUO_SUPP 0x40
+#define IEEE80211_UHR_MAC_CAP1_DUO_SUPP 0x80
+
+#define IEEE80211_UHR_MAC_CAP2_OMC_UL_MU_DIS_RX_SUPP 0x01
+#define IEEE80211_UHR_MAC_CAP2_AOM_SUPP 0x02
+#define IEEE80211_UHR_MAC_CAP2_IFCS_LOC_SUPP 0x04
+#define IEEE80211_UHR_MAC_CAP2_UHR_TRS_SUPP 0x08
+#define IEEE80211_UHR_MAC_CAP2_TXSPG_SUPP 0x10
+#define IEEE80211_UHR_MAC_CAP2_TXOP_RET_IN_TXSPG 0x20
+#define IEEE80211_UHR_MAC_CAP2_UHR_OM_PU_TO_LOW 0xC0
+
+#define IEEE80211_UHR_MAC_CAP3_UHR_OM_PU_TO_HIGH 0x03
+#define IEEE80211_UHR_MAC_CAP3_PARAM_UPD_ADV_NOTIF_INTV 0x1C
+#define IEEE80211_UHR_MAC_CAP3_UPD_IND_TIM_INTV_LOW 0xE0
+
+#define IEEE80211_UHR_MAC_CAP4_UPD_IND_TIM_INTV_HIGH 0x03
+#define IEEE80211_UHR_MAC_CAP4_BOUNDED_ESS 0x04
+#define IEEE80211_UHR_MAC_CAP4_BTM_ASSURANCE 0x08
+#define IEEE80211_UHR_MAC_CAP4_CO_BF_SUPP 0x10
+
+#define IEEE80211_UHR_MAC_CAP_DBE_MAX_BW 0x07
+#define IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES 0x08
+#define IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES 0x10
+
+struct ieee80211_uhr_cap_mac {
+ u8 mac_cap[5];
+} __packed;
+
+struct ieee80211_uhr_cap {
+ struct ieee80211_uhr_cap_mac mac;
+ /* DBE, PHY capabilities */
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_SND_NDP_LE80 0x01
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_DL_MU_LE80 0x02
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_SND_NDP_160 0x04
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_DL_MU_160 0x08
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_SND_NDP_320 0x10
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_DL_MU_320 0x20
+#define IEEE80211_UHR_PHY_CAP_ELR_RX 0x40
+#define IEEE80211_UHR_PHY_CAP_ELR_TX 0x80
+
+struct ieee80211_uhr_cap_phy {
+ u8 cap;
+} __packed;
+
+static inline bool ieee80211_uhr_capa_size_ok(const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_uhr_cap *cap = (const void *)data;
+ size_t needed = sizeof(*cap) + sizeof(struct ieee80211_uhr_cap_phy);
+
+ if (len < needed)
+ return false;
+
+ /*
+ * A non-AP STA does not include the DBE Capability Parameters field
+ * in the UHR MAC Capabilities Information field.
+ */
+ if (from_ap && cap->mac.mac_cap[1] & IEEE80211_UHR_MAC_CAP1_DBE_SUPP) {
+ u8 dbe;
+
+ needed += 1;
+ if (len < needed)
+ return false;
+
+ dbe = cap->variable[0];
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES)
+ needed += 3;
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES)
+ needed += 3;
+ }
+
+ return len >= needed;
+}
+
+static inline const struct ieee80211_uhr_cap_phy *
+ieee80211_uhr_phy_cap(const struct ieee80211_uhr_cap *cap, bool from_ap)
+{
+ u8 offs = 0;
+
+ if (from_ap && cap->mac.mac_cap[1] & IEEE80211_UHR_MAC_CAP1_DBE_SUPP) {
+ u8 dbe = cap->variable[0];
+
+ offs += 1;
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES)
+ offs += 3;
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES)
+ offs += 3;
+ }
+
+ return (const void *)&cap->variable[offs];
+}
+
+#define IEEE80211_SMD_INFO_CAPA_DL_DATA_FWD 0x01
+#define IEEE80211_SMD_INFO_CAPA_MAX_NUM_PREP 0x0E
+#define IEEE80211_SMD_INFO_CAPA_TYPE 0x10
+#define IEEE80211_SMD_INFO_CAPA_PTK_PER_AP_MLD 0x20
+
+struct ieee80211_smd_info {
+ u8 id[ETH_ALEN];
+ u8 capa;
+ __le16 timeout;
+} __packed;
+
+#endif /* LINUX_IEEE80211_UHR_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 96439de55f07..0aa2fb8f88de 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2025 Intel Corporation
+ * Copyright (c) 2018 - 2026 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -1186,6 +1186,12 @@ struct ieee80211_mgmt {
u8 action_code;
u8 variable[];
} __packed epcs;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 control;
+ u8 variable[];
+ } __packed eml_omn;
} u;
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -1200,8 +1206,9 @@ struct ieee80211_mgmt {
#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
+#define BSS_MEMBERSHIP_SELECTOR_UHR_PHY 120
-#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY
+#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_UHR_PHY
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1351,6 +1358,7 @@ struct ieee80211_tdls_data {
#define WLAN_AUTH_FILS_SK 4
#define WLAN_AUTH_FILS_SK_PFS 5
#define WLAN_AUTH_FILS_PK 6
+#define WLAN_AUTH_EPPKE 9
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1801,6 +1809,15 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
+ WLAN_EID_EXT_UHR_OPER = 151,
+ WLAN_EID_EXT_UHR_CAPA = 152,
+ WLAN_EID_EXT_MACP = 153,
+ WLAN_EID_EXT_SMD = 154,
+ WLAN_EID_EXT_BSS_SMD_TRANS_PARAMS = 155,
+ WLAN_EID_EXT_CHAN_USAGE = 156,
+ WLAN_EID_EXT_UHR_MODE_CHG = 157,
+ WLAN_EID_EXT_UHR_PARAM_UPD = 158,
+ WLAN_EID_EXT_TXPI = 159,
};
/* Action category code */
@@ -2744,6 +2761,22 @@ static inline bool for_each_element_completed(const struct element *element,
#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4)
#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
+/* EBPCC = Enhanced BSS Parameter Change Count */
+#define IEEE80211_ENH_CRIT_UPD_EBPCC 0x0F
+#define IEEE80211_ENH_CRIT_UPD_TYPE 0x70
+#define IEEE80211_ENH_CRIT_UPD_TYPE_NO_UHR 0
+#define IEEE80211_ENH_CRIT_UPD_TYPE_UHR 1
+#define IEEE80211_ENH_CRIT_UPD_ALL 0x80
+
+/**
+ * struct ieee80211_enh_crit_upd - enhanced critical update (UHR)
+ * @v: value of the enhanced critical update data,
+ * see %IEEE80211_ENH_CRIT_UPD_* to parse the bits
+ */
+struct ieee80211_enh_crit_upd {
+ u8 v;
+} __packed;
+
/*
* reduced neighbor report, based on Draft P802.11ax_D6.1,
* section 9.4.2.170 and accepted contributions.
@@ -2762,6 +2795,7 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10
#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+#define IEEE80211_RNR_TBTT_PARAMS_SAME_SMD 0x80
#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127
#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
@@ -2814,12 +2848,14 @@ struct ieee80211_tbtt_info_ge_11 {
u8 bss_params;
s8 psd_20;
struct ieee80211_rnr_mld_params mld_params;
+ struct ieee80211_enh_crit_upd enh_crit_upd;
} __packed;
#include "ieee80211-ht.h"
#include "ieee80211-vht.h"
#include "ieee80211-he.h"
#include "ieee80211-eht.h"
+#include "ieee80211-uhr.h"
#include "ieee80211-mesh.h"
#include "ieee80211-s1g.h"
#include "ieee80211-p2p.h"
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index f7f34eb15e06..e6272f9c5e42 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -594,8 +594,17 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
}
+struct vlan_type_depth {
+ __be16 type;
+ u16 depth;
+};
+
+struct vlan_type_depth __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset);
+
/**
- * __vlan_get_protocol_offset() - get protocol EtherType.
+ * vlan_get_protocol_offset_inline() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
* @mac_offset: MAC offset
@@ -604,40 +613,24 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
- __be16 type,
- int mac_offset,
- int *depth)
+static inline
+__be16 vlan_get_protocol_offset_inline(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
- unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
-
- /* if type is 802.1Q/AD then the header should already be
- * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
- * ETH_HLEN otherwise
- */
if (eth_type_vlan(type)) {
- if (vlan_depth) {
- if (WARN_ON(vlan_depth < VLAN_HLEN))
- return 0;
- vlan_depth -= VLAN_HLEN;
- } else {
- vlan_depth = ETH_HLEN;
- }
- do {
- struct vlan_hdr vhdr, *vh;
+ struct vlan_type_depth res;
- vh = skb_header_pointer(skb, mac_offset + vlan_depth,
- sizeof(vhdr), &vhdr);
- if (unlikely(!vh || !--parse_depth))
- return 0;
+ res = __vlan_get_protocol_offset(skb, type, mac_offset);
- type = vh->h_vlan_encapsulated_proto;
- vlan_depth += VLAN_HLEN;
- } while (eth_type_vlan(type));
+ if (depth && res.type)
+ *depth = res.depth;
+ return res.type;
}
if (depth)
- *depth = vlan_depth;
+ *depth = skb->mac_len;
return type;
}
@@ -645,7 +638,7 @@ static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- return __vlan_get_protocol_offset(skb, type, 0, depth);
+ return vlan_get_protocol_offset_inline(skb, type, 0, depth);
}
/**
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 8e29cb4e6a01..abf8923f8fc5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -69,6 +69,7 @@ static inline int ima_measure_critical_data(const char *event_label,
#ifdef CONFIG_HAVE_IMA_KEXEC
int __init ima_free_kexec_buffer(void);
int __init ima_get_kexec_buffer(void **addr, size_t *size);
+int ima_validate_range(phys_addr_t phys, size_t size);
#endif
#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 5730ba6b1cfa..dccbeb25f701 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -38,11 +38,11 @@ struct in_device {
struct ip_mc_list *mc_tomb;
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
- unsigned long mr_maxdelay;
unsigned long mr_qi; /* Query Interval */
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
+ u32 mr_maxdelay;
u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
index 92045d18cbfc..28776ee28d8e 100644
--- a/include/linux/init_syscalls.h
+++ b/include/linux/init_syscalls.h
@@ -17,3 +17,4 @@ int __init init_mkdir(const char *pathname, umode_t mode);
int __init init_rmdir(const char *pathname);
int __init init_utimes(char *filename, struct timespec64 *ts);
int __init init_dup(struct file *file);
+int __init init_pivot_root(const char *new_root, const char *put_old);
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index f1a1f4c92ded..7e5d26c8136f 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -3,8 +3,6 @@
#ifndef __LINUX_INITRD_H
#define __LINUX_INITRD_H
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
/* starting block # of image */
extern int rd_image_start;
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 711a1f0d1a73..a1b4cf81adc2 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
@@ -55,6 +56,19 @@ static __always_inline void instrument_read_write(const volatile void *v, size_t
kcsan_check_read_write(v, size);
}
+static __always_inline void instrument_atomic_check_alignment(const volatile void *v, size_t size)
+{
+#ifndef __DISABLE_EXPORTS
+ if (IS_ENABLED(CONFIG_DEBUG_ATOMIC)) {
+ unsigned int mask = size - 1;
+
+ if (IS_ENABLED(CONFIG_DEBUG_ATOMIC_LARGEST_ALIGN))
+ mask &= sizeof(struct { long x; } __aligned_largest) - 1;
+ WARN_ON_ONCE((unsigned long)v & mask);
+ }
+#endif
+}
+
/**
* instrument_atomic_read - instrument atomic read access
* @v: address of access
@@ -67,6 +81,7 @@ static __always_inline void instrument_atomic_read(const volatile void *v, size_
{
kasan_check_read(v, size);
kcsan_check_atomic_read(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
@@ -81,6 +96,7 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
{
kasan_check_write(v, size);
kcsan_check_atomic_write(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
@@ -95,6 +111,7 @@ static __always_inline void instrument_atomic_read_write(const volatile void *v,
{
kasan_check_write(v, size);
kcsan_check_atomic_read_write(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 266f2b39213a..6cd26ffb0505 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -181,9 +181,8 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
extern int __must_check
-__request_percpu_irq(unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *devname,
- const cpumask_t *affinity, void __percpu *percpu_dev_id);
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler, const char *devname,
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -193,17 +192,8 @@ static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
- return __request_percpu_irq(irq, handler, 0,
- devname, NULL, percpu_dev_id);
-}
-
-static inline int __must_check
-request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
- const char *devname, const cpumask_t *affinity,
- void __percpu *percpu_dev_id)
-{
- return __request_percpu_irq(irq, handler, 0,
- devname, affinity, percpu_dev_id);
+ return request_percpu_irq_affinity(irq, handler, devname,
+ NULL, percpu_dev_id);
}
extern int __must_check
@@ -228,7 +218,7 @@ static inline int __must_check
devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
- return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+ return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags | IRQF_COND_ONESHOT,
devname, dev_id);
}
@@ -871,12 +861,6 @@ static inline void init_irq_proc(void)
}
#endif
-#ifdef CONFIG_IRQ_TIMINGS
-void irq_timings_enable(void);
-void irq_timings_disable(void);
-u64 irq_timings_next_event(u64 now);
-#endif
-
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 85fe4e6b275c..d1aa4edfc2a5 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -12,6 +12,7 @@ void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
bool io_is_uring_fops(struct file *file);
+int __io_uring_fork(struct task_struct *tsk);
static inline void io_uring_files_cancel(void)
{
@@ -25,9 +26,16 @@ static inline void io_uring_task_cancel(void)
}
static inline void io_uring_free(struct task_struct *tsk)
{
- if (tsk->io_uring)
+ if (tsk->io_uring || tsk->io_uring_restrict)
__io_uring_free(tsk);
}
+static inline int io_uring_fork(struct task_struct *tsk)
+{
+ if (tsk->io_uring_restrict)
+ return __io_uring_fork(tsk);
+
+ return 0;
+}
#else
static inline void io_uring_task_cancel(void)
{
@@ -46,6 +54,10 @@ static inline bool io_is_uring_fops(struct file *file)
{
return false;
}
+static inline int io_uring_fork(struct task_struct *tsk)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index a3e8ddc9b380..3e4a82a6f817 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -219,12 +219,26 @@ struct io_rings {
struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
+struct io_bpf_filter;
+struct io_bpf_filters {
+ refcount_t refs; /* ref for ->bpf_filters */
+ spinlock_t lock; /* protects ->bpf_filters modifications */
+ struct io_bpf_filter __rcu **filters;
+ struct rcu_head rcu_head;
+};
+
struct io_restriction {
DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ struct io_bpf_filters *bpf_filters;
+ /* ->bpf_filters needs COW on modification */
+ bool bpf_filters_cow;
u8 sqe_flags_allowed;
u8 sqe_flags_required;
- bool registered;
+ /* IORING_OP_* restrictions exist */
+ bool op_registered;
+ /* IORING_REGISTER_* restrictions exist */
+ bool reg_registered;
};
struct io_submit_link {
@@ -259,7 +273,8 @@ struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int drain_next: 1;
- unsigned int restricted: 1;
+ unsigned int op_restricted: 1;
+ unsigned int reg_restricted: 1;
unsigned int off_timeout_used: 1;
unsigned int drain_active: 1;
unsigned int has_evfd: 1;
@@ -274,6 +289,8 @@ struct io_ring_ctx {
struct task_struct *submitter_task;
struct io_rings *rings;
+ /* cache of ->restrictions.bpf_filters->filters */
+ struct io_bpf_filter __rcu **bpf_filters;
struct percpu_ref refs;
clockid_t clockid;
@@ -316,7 +333,7 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
bool poll_multi_queue;
- struct io_wq_work_list iopoll_list;
+ struct list_head iopoll_list;
struct io_file_table file_table;
struct io_rsrc_data buf_table;
@@ -444,6 +461,9 @@ struct io_ring_ctx {
struct list_head defer_list;
unsigned nr_drained;
+ /* protected by ->completion_lock */
+ unsigned nr_req_allocated;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list; /* track busy poll napi_id */
spinlock_t napi_lock; /* napi_list lock */
@@ -456,10 +476,6 @@ struct io_ring_ctx {
DECLARE_HASHTABLE(napi_ht, 4);
#endif
- /* protected by ->completion_lock */
- unsigned evfd_last_cq_tail;
- unsigned nr_req_allocated;
-
/*
* Protection for resize vs mmap races - both the mmap and resize
* side will need to grab this lock, to prevent either side from
@@ -714,15 +730,21 @@ struct io_kiocb {
atomic_t refs;
bool cancel_seq_set;
- struct io_task_work io_task_work;
+
+ union {
+ struct io_task_work io_task_work;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ };
+
union {
/*
* for polled requests, i.e. IORING_OP_POLL_ADD and async armed
* poll
*/
struct hlist_node hash_node;
- /* For IOPOLL setup queues, with hybrid polling */
- u64 iopoll_start;
+ /* IOPOLL completion handling */
+ struct list_head iopoll_node;
/* for private io_kiocb freeing */
struct rcu_head rcu_head;
};
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6bb941707d12..99b7209dabd7 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -345,9 +345,9 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
void iomap_read_folio(const struct iomap_ops *ops,
- struct iomap_read_folio_ctx *ctx);
+ struct iomap_read_folio_ctx *ctx, void *private);
void iomap_readahead(const struct iomap_ops *ops,
- struct iomap_read_folio_ctx *ctx);
+ struct iomap_read_folio_ctx *ctx, void *private);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
@@ -566,6 +566,15 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+/*
+ * Bounce buffer instead of using zero copy access.
+ *
+ * This is needed if the device needs stable data to checksum or generate
+ * parity. The file system must hook into the I/O submission and offload
+ * completions to user context for reads when this is set.
+ */
+#define IOMAP_DIO_BOUNCE (1 << 4)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
@@ -599,7 +608,7 @@ static inline void iomap_bio_read_folio(struct folio *folio,
.cur_folio = folio,
};
- iomap_read_folio(ops, &ctx);
+ iomap_read_folio(ops, &ctx, NULL);
}
static inline void iomap_bio_readahead(struct readahead_control *rac,
@@ -610,7 +619,7 @@ static inline void iomap_bio_readahead(struct readahead_control *rac,
.rac = rac,
};
- iomap_readahead(ops, &ctx);
+ iomap_readahead(ops, &ctx, NULL);
}
#endif /* CONFIG_BLOCK */
diff --git a/include/linux/iommu-debug-pagealloc.h b/include/linux/iommu-debug-pagealloc.h
new file mode 100644
index 000000000000..46c3c1f70150
--- /dev/null
+++ b/include/linux/iommu-debug-pagealloc.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 - Google Inc
+ * Author: Mostafa Saleh <smostafa@google.com>
+ * IOMMU API debug page alloc sanitizer
+ */
+
+#ifndef __LINUX_IOMMU_DEBUG_PAGEALLOC_H
+#define __LINUX_IOMMU_DEBUG_PAGEALLOC_H
+
+#ifdef CONFIG_IOMMU_DEBUG_PAGEALLOC
+DECLARE_STATIC_KEY_FALSE(iommu_debug_initialized);
+
+extern struct page_ext_operations page_iommu_debug_ops;
+
+void __iommu_debug_check_unmapped(const struct page *page, int numpages);
+
+static inline void iommu_debug_check_unmapped(const struct page *page, int numpages)
+{
+ if (static_branch_unlikely(&iommu_debug_initialized))
+ __iommu_debug_check_unmapped(page, numpages);
+}
+
+#else
+static inline void iommu_debug_check_unmapped(const struct page *page,
+ int numpages)
+{
+}
+
+#endif /* CONFIG_IOMMU_DEBUG_PAGEALLOC */
+
+#endif /* __LINUX_IOMMU_DEBUG_PAGEALLOC_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8c66284a91a8..54b8b48c762e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -910,6 +910,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+struct iommu_domain *iommu_driver_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
@@ -1187,6 +1188,10 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
ioasid_t iommu_alloc_global_pasid(struct device *dev);
void iommu_free_global_pasid(ioasid_t pasid);
+
+/* PCI device reset functions */
+int pci_dev_reset_iommu_prepare(struct pci_dev *pdev);
+void pci_dev_reset_iommu_done(struct pci_dev *pdev);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1510,6 +1515,15 @@ static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
}
static inline void iommu_free_global_pasid(ioasid_t pasid) {}
+
+static inline int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline void pci_dev_reset_iommu_done(struct pci_dev *pdev)
+{
+}
#endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IRQ_MSI_IOMMU
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 9afa30f9346f..5533a5debf3f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -10,6 +10,7 @@
#define _LINUX_IOPORT_H
#ifndef __ASSEMBLY__
+#include <linux/args.h>
#include <linux/bits.h>
#include <linux/compiler.h>
#include <linux/minmax.h>
@@ -165,8 +166,12 @@ enum {
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
-#define DEFINE_RES(_start, _size, _flags) \
+#define __DEFINE_RES0() \
+ DEFINE_RES_NAMED(0, 0, NULL, IORESOURCE_UNSET)
+#define __DEFINE_RES3(_start, _size, _flags) \
DEFINE_RES_NAMED(_start, _size, NULL, _flags)
+#define DEFINE_RES(...) \
+ CONCATENATE(__DEFINE_RES, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
@@ -232,6 +237,7 @@ struct resource_constraint {
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
+extern struct resource soft_reserve_resource;
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
@@ -338,7 +344,7 @@ static inline bool resource_union(const struct resource *r1, const struct resour
* Check if this resource is added to a resource tree or detached. Caller is
* responsible for not racing assignment.
*/
-static inline bool resource_assigned(struct resource *res)
+static inline bool resource_assigned(const struct resource *res)
{
return res->parent;
}
@@ -418,6 +424,10 @@ walk_system_ram_res_rev(u64 start, u64 end, void *arg,
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
+extern int walk_soft_reserve_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
+region_intersects_soft_reserve(resource_size_t start, size_t size);
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7294e4e89b79..443053a76dcf 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -126,6 +126,28 @@ static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
skb_network_header_len(skb);
}
+static inline unsigned int
+ipv6_payload_len(const struct sk_buff *skb, const struct ipv6hdr *ip6)
+{
+ u32 len = ntohs(ip6->payload_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len :
+ skb->len - skb_network_offset(skb) - sizeof(struct ipv6hdr);
+}
+
+static inline unsigned int skb_ipv6_payload_len(const struct sk_buff *skb)
+{
+ return ipv6_payload_len(skb, ipv6_hdr(skb));
+}
+
+#define IPV6_MAXPLEN 65535
+
+static inline void ipv6_set_payload_len(struct ipv6hdr *ip6, unsigned int len)
+{
+ ip6->payload_len = len <= IPV6_MAXPLEN ? htons(len) : 0;
+}
+
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
@@ -155,7 +177,6 @@ struct inet6_skb_parm {
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
-#define IP6SKB_FAKEJUMBO 512
#define IP6SKB_MULTIPATH 1024
#define IP6SKB_MCROUTE 2048
};
@@ -205,17 +226,14 @@ struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;
-struct inet6_cork {
- struct ipv6_txoptions *opt;
- u8 hop_limit;
- u8 tclass;
- u8 dontfrag:1;
-};
-
/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
/* Used in tx path (inet6_csk_route_socket(), ip6_xmit()) */
struct in6_addr saddr;
+ union {
+ struct in6_addr daddr;
+ struct in6_addr final;
+ };
__be32 flow_label;
u32 dst_cookie;
struct ipv6_txoptions __rcu *opt;
@@ -267,7 +285,6 @@ struct ipv6_pinfo {
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
- struct inet6_cork cork;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4a9f1d7b08c3..951acbdb9f84 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -459,6 +459,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* checks against the supplied affinity mask are not
* required. This is used for CPU hotplug where the
* target CPU is not yet set in the cpu_online_mask.
+ * @irq_pre_redirect: Optional function to be invoked before redirecting
+ * an interrupt via irq_work. Called only on CONFIG_SMP.
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
@@ -503,6 +505,7 @@ struct irq_chip {
void (*irq_eoi)(struct irq_data *data);
int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
+ void (*irq_pre_redirect)(struct irq_data *data);
int (*irq_retrigger)(struct irq_data *data);
int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
@@ -595,9 +598,6 @@ enum {
#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
-struct irqaction;
-extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
@@ -658,7 +658,7 @@ extern void handle_fasteoi_nmi(struct irq_desc *desc);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
-extern int irq_chip_pm_put(struct irq_data *data);
+extern void irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
@@ -687,6 +687,13 @@ extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
+#ifdef CONFIG_SMP
+void irq_chip_pre_redirect_parent(struct irq_data *data);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+int irq_chip_redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force);
#endif
/* Disable or mask interrupts during a kernel kexec */
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
index 68ddcdb1cec5..3da1ad80fc9d 100644
--- a/include/linux/irqchip/arm-gic-v5.h
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -265,6 +265,12 @@
#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+#define GICV5_GSI_IC_TYPE GENMASK(31, 29)
+#define GICV5_GSI_IWB_TYPE 0x7
+
+#define GICV5_GSI_IWB_FRAME_ID GENMASK(28, 16)
+#define GICV5_GSI_IWB_WIRE GENMASK(15, 0)
+
/*
* Global Data structures and functions
*/
@@ -344,6 +350,7 @@ void __init gicv5_init_lpi_domain(void);
void __init gicv5_free_lpi_domain(void);
int gicv5_irs_of_probe(struct device_node *parent);
+int gicv5_irs_acpi_probe(void);
void gicv5_irs_remove(void);
int gicv5_irs_enable(void);
void gicv5_irs_its_probe(void);
@@ -391,4 +398,5 @@ int gicv5_alloc_lpi(void);
void gicv5_free_lpi(u32 lpi);
void __init gicv5_its_of_probe(struct device_node *parent);
+void __init gicv5_its_acpi_probe(void);
#endif
diff --git a/include/linux/irqchip/irq-renesas-rzt2h.h b/include/linux/irqchip/irq-renesas-rzt2h.h
new file mode 100644
index 000000000000..853fd5ee0b22
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzt2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/T2H Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZT2H
+#define __LINUX_IRQ_RENESAS_RZT2H
+
+#include <linux/platform_device.h>
+
+#define RZT2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
+
+#ifdef CONFIG_RENESAS_RZT2H_ICU
+void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no);
+#else
+static inline void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZT2H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 17902861de76..dae9a9b93665 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -2,9 +2,10 @@
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
-#include <linux/rcupdate.h>
+#include <linux/irq_work.h>
#include <linux/kobject.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
/*
* Core internal functions to deal with irq descriptors
@@ -30,6 +31,17 @@ struct irqstat {
};
/**
+ * struct irq_redirect - interrupt redirection metadata
+ * @work: Harg irq_work item for handler execution on a different CPU
+ * @target_cpu: CPU to run irq handler on in case the current CPU is not part
+ * of the irq affinity mask
+ */
+struct irq_redirect {
+ struct irq_work work;
+ unsigned int target_cpu;
+};
+
+/**
* struct irq_desc - interrupt descriptor
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
@@ -46,6 +58,7 @@ struct irqstat {
* @threads_handled: stats field for deferred spurious detection of threaded handlers
* @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
+ * @redirect: Facility for redirecting interrupts via irq_work
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
@@ -83,6 +96,7 @@ struct irq_desc {
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
+ struct irq_redirect redirect;
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -185,6 +199,7 @@ int generic_handle_irq_safe(unsigned int irq);
int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq);
+bool generic_handle_demux_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 62f81bbeb490..73c25d40846c 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -257,7 +257,8 @@ static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
- const char *name, phys_addr_t *pa);
+ const char *name, phys_addr_t *pa,
+ struct fwnode_handle *parent);
enum {
IRQCHIP_FWNODE_REAL,
@@ -267,18 +268,39 @@ enum {
static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_parented_fwnode(const char *name,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL, parent);
}
static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
- NULL);
+ NULL, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_id_parented_fwnode(const char *name, int id,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
+ NULL, parent);
}
static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa, NULL);
+}
+
+static inline struct fwnode_handle *irq_domain_alloc_parented_fwnode(phys_addr_t *pa,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa, parent);
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f5eaf76198f3..a53a00d36228 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1815,7 +1815,4 @@ static inline int jbd2_handle_buffer_credits(handle_t *handle)
#endif /* __KERNEL__ */
-#define EFSBADCRC EBADMSG /* Bad CRC detected */
-#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
-
#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5b46924fdff5..e5570a16cbb1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -21,7 +21,6 @@
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/bitops.h>
-#include <linux/hex.h>
#include <linux/kstrtox.h>
#include <linux/log2.h>
#include <linux/math.h>
@@ -32,7 +31,7 @@
#include <linux/build_bug.h>
#include <linux/sprintf.h>
#include <linux/static_call_types.h>
-#include <linux/instruction_pointer.h>
+#include <linux/trace_printk.h>
#include <linux/util_macros.h>
#include <linux/wordpart.h>
@@ -40,8 +39,6 @@
#include <uapi/linux/kernel.h>
-#define STACK_MAGIC 0xdeadbeef
-
struct completion;
struct user;
@@ -192,215 +189,9 @@ enum system_states {
};
extern enum system_states system_state;
-/*
- * General tracing related utility functions - trace_printk(),
- * tracing_on/tracing_off and tracing_start()/tracing_stop
- *
- * Use tracing_on/tracing_off when you want to quickly turn on or off
- * tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/tracing/tracing_on
- * file, which gives a means for the kernel and userspace to interact.
- * Place a tracing_off() in the kernel where you want tracing to end.
- * From user space, examine the trace, and then echo 1 > tracing_on
- * to continue tracing.
- *
- * tracing_stop/tracing_start has slightly more overhead. It is used
- * by things like suspend to ram where disabling the recording of the
- * trace is not enough, but tracing must actually stop because things
- * like calling smp_processor_id() may crash the system.
- *
- * Most likely, you want to use tracing_on/tracing_off.
- */
-
-enum ftrace_dump_mode {
- DUMP_NONE,
- DUMP_ALL,
- DUMP_ORIG,
- DUMP_PARAM,
-};
-
-#ifdef CONFIG_TRACING
-void tracing_on(void);
-void tracing_off(void);
-int tracing_is_on(void);
-void tracing_snapshot(void);
-void tracing_snapshot_alloc(void);
-
-extern void tracing_start(void);
-extern void tracing_stop(void);
-
-static inline __printf(1, 2)
-void ____trace_printk_check_format(const char *fmt, ...)
-{
-}
-#define __trace_printk_check_format(fmt, args...) \
-do { \
- if (0) \
- ____trace_printk_check_format(fmt, ##args); \
-} while (0)
-
-/**
- * trace_printk - printf formatting in the ftrace buffer
- * @fmt: the printf format for printing
- *
- * Note: __trace_printk is an internal function for trace_printk() and
- * the @ip is passed in via the trace_printk() macro.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving trace_printks scattered around in
- * your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used.)
- *
- * A little optimization trick is done here. If there's only one
- * argument, there's no need to scan the string for printf formats.
- * The trace_puts() will suffice. But how can we take advantage of
- * using trace_puts() when trace_printk() has only one argument?
- * By stringifying the args and checking the size we can tell
- * whether or not there are args. __stringify((__VA_ARGS__)) will
- * turn into "()\0" with a size of 3 when there are no args, anything
- * else will be bigger. All we need to do is define a string to this,
- * and then take its size and compare to 3. If it's bigger, use
- * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
- * let gcc optimize the rest.
- */
-
-#define trace_printk(fmt, ...) \
-do { \
- char _______STR[] = __stringify((__VA_ARGS__)); \
- if (sizeof(_______STR) > 3) \
- do_trace_printk(fmt, ##__VA_ARGS__); \
- else \
- trace_puts(fmt); \
-} while (0)
-
-#define do_trace_printk(fmt, args...) \
-do { \
- static const char *trace_printk_fmt __used \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(fmt) ? fmt : NULL; \
- \
- __trace_printk_check_format(fmt, ##args); \
- \
- if (__builtin_constant_p(fmt)) \
- __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
- else \
- __trace_printk(_THIS_IP_, fmt, ##args); \
-} while (0)
-
-extern __printf(2, 3)
-int __trace_bprintk(unsigned long ip, const char *fmt, ...);
-
-extern __printf(2, 3)
-int __trace_printk(unsigned long ip, const char *fmt, ...);
-
-/**
- * trace_puts - write a string into the ftrace buffer
- * @str: the string to record
- *
- * Note: __trace_bputs is an internal function for trace_puts and
- * the @ip is passed in via the trace_puts macro.
- *
- * This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" effects,
- * where the processing of the print format is still too much.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving trace_puts scattered around in
- * your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used.)
- *
- * Returns: 0 if nothing was written, positive # if string was.
- * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
- */
-
-#define trace_puts(str) ({ \
- static const char *trace_printk_fmt __used \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(str) ? str : NULL; \
- \
- if (__builtin_constant_p(str)) \
- __trace_bputs(_THIS_IP_, trace_printk_fmt); \
- else \
- __trace_puts(_THIS_IP_, str, strlen(str)); \
-})
-extern int __trace_bputs(unsigned long ip, const char *str);
-extern int __trace_puts(unsigned long ip, const char *str, int size);
-
-extern void trace_dump_stack(int skip);
-
-/*
- * The double __builtin_constant_p is because gcc will give us an error
- * if we try to allocate the static variable to fmt if it is not a
- * constant. Even with the outer if statement.
- */
-#define ftrace_vprintk(fmt, vargs) \
-do { \
- if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt __used \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(fmt) ? fmt : NULL; \
- \
- __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
- } else \
- __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
-} while (0)
-
-extern __printf(2, 0) int
-__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
-
-extern __printf(2, 0) int
-__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
-
-extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
-#else
-static inline void tracing_start(void) { }
-static inline void tracing_stop(void) { }
-static inline void trace_dump_stack(int skip) { }
-
-static inline void tracing_on(void) { }
-static inline void tracing_off(void) { }
-static inline int tracing_is_on(void) { return 0; }
-static inline void tracing_snapshot(void) { }
-static inline void tracing_snapshot_alloc(void) { }
-
-static inline __printf(1, 2)
-int trace_printk(const char *fmt, ...)
-{
- return 0;
-}
-static __printf(1, 0) inline int
-ftrace_vprintk(const char *fmt, va_list ap)
-{
- return 0;
-}
-static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
-#endif /* CONFIG_TRACING */
-
/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_DYNAMIC_FTRACE
# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
-/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
-#define VERIFY_OCTAL_PERMISSIONS(perms) \
- (BUILD_BUG_ON_ZERO((perms) < 0) + \
- BUILD_BUG_ON_ZERO((perms) > 0777) + \
- /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
- BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
- BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
- /* USER_WRITABLE >= GROUP_WRITABLE */ \
- BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
- /* OTHER_WRITABLE? Generally considered a bad idea. */ \
- BUILD_BUG_ON_ZERO((perms) & 2) + \
- (perms))
#endif
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index 5f7b9de97e8d..ac4129d1d741 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -11,49 +11,26 @@ struct kho_scratch {
phys_addr_t size;
};
+struct kho_vmalloc;
+
struct folio;
struct page;
-#define DECLARE_KHOSER_PTR(name, type) \
- union { \
- phys_addr_t phys; \
- type ptr; \
- } name
-#define KHOSER_STORE_PTR(dest, val) \
- ({ \
- typeof(val) v = val; \
- typecheck(typeof((dest).ptr), v); \
- (dest).phys = virt_to_phys(v); \
- })
-#define KHOSER_LOAD_PTR(src) \
- ({ \
- typeof(src) s = src; \
- (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
- })
-
-struct kho_vmalloc_chunk;
-struct kho_vmalloc {
- DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
- unsigned int total_pages;
- unsigned short flags;
- unsigned short order;
-};
-
#ifdef CONFIG_KEXEC_HANDOVER
bool kho_is_enabled(void);
bool is_kho_boot(void);
int kho_preserve_folio(struct folio *folio);
void kho_unpreserve_folio(struct folio *folio);
-int kho_preserve_pages(struct page *page, unsigned int nr_pages);
-void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
+int kho_preserve_pages(struct page *page, unsigned long nr_pages);
+void kho_unpreserve_pages(struct page *page, unsigned long nr_pages);
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
void *kho_alloc_preserve(size_t size);
void kho_unpreserve_free(void *mem);
void kho_restore_free(void *mem);
struct folio *kho_restore_folio(phys_addr_t phys);
-struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
+struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages);
void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
int kho_add_subtree(const char *name, void *fdt);
void kho_remove_subtree(void *fdt);
diff --git a/include/linux/kho/abi/kexec_handover.h b/include/linux/kho/abi/kexec_handover.h
new file mode 100644
index 000000000000..2201a0d2c159
--- /dev/null
+++ b/include/linux/kho/abi/kexec_handover.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
+ * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
+ * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
+ * Copyright (C) 2025 Google LLC, Jason Miu <jasonmiu@google.com>
+ */
+
+#ifndef _LINUX_KHO_ABI_KEXEC_HANDOVER_H
+#define _LINUX_KHO_ABI_KEXEC_HANDOVER_H
+
+#include <linux/types.h>
+
+/**
+ * DOC: Kexec Handover ABI
+ *
+ * Kexec Handover uses the ABI defined below for passing preserved data from
+ * one kernel to the next.
+ * The ABI uses Flattened Device Tree (FDT) format. The first kernel creates an
+ * FDT which is then passed to the next kernel during a kexec handover.
+ *
+ * This interface is a contract. Any modification to the FDT structure, node
+ * properties, compatible string, or the layout of the data structures
+ * referenced here constitutes a breaking change. Such changes require
+ * incrementing the version number in KHO_FDT_COMPATIBLE to prevent a new kernel
+ * from misinterpreting data from an older kernel. Changes are allowed provided
+ * the compatibility version is incremented. However, backward/forward
+ * compatibility is only guaranteed for kernels supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The FDT serves as a central registry for physical
+ * addresses of preserved data structures and sub-FDTs. The first kernel
+ * populates this FDT with references to memory regions and other FDTs that
+ * need to persist across the kexec transition. The subsequent kernel then
+ * parses this FDT to locate and restore the preserved data.::
+ *
+ * / {
+ * compatible = "kho-v1";
+ *
+ * preserved-memory-map = <0x...>;
+ *
+ * <subnode-name-1> {
+ * fdt = <0x...>;
+ * };
+ *
+ * <subnode-name-2> {
+ * fdt = <0x...>;
+ * };
+ * ... ...
+ * <subnode-name-N> {
+ * fdt = <0x...>;
+ * };
+ * };
+ *
+ * Root KHO Node (/):
+ * - compatible: "kho-v1"
+ *
+ * Indentifies the overall KHO ABI version.
+ *
+ * - preserved-memory-map: u64
+ *
+ * Physical memory address pointing to the root of the
+ * preserved memory map data structure.
+ *
+ * Subnodes (<subnode-name-N>):
+ * Subnodes can also be added to the root node to
+ * describe other preserved data blobs. The <subnode-name-N>
+ * is provided by the subsystem that uses KHO for preserving its
+ * data.
+ *
+ * - fdt: u64
+ *
+ * Physical address pointing to a subnode FDT blob that is also
+ * being preserved.
+ */
+
+/* The compatible string for the KHO FDT root node. */
+#define KHO_FDT_COMPATIBLE "kho-v1"
+
+/* The FDT property for the preserved memory map. */
+#define KHO_FDT_MEMORY_MAP_PROP_NAME "preserved-memory-map"
+
+/* The FDT property for sub-FDTs. */
+#define KHO_FDT_SUB_TREE_PROP_NAME "fdt"
+
+/**
+ * DOC: Kexec Handover ABI for vmalloc Preservation
+ *
+ * The Kexec Handover ABI for preserving vmalloc'ed memory is defined by
+ * a set of structures and helper macros. The layout of these structures is a
+ * stable contract between kernels and is versioned by the KHO_FDT_COMPATIBLE
+ * string.
+ *
+ * The preservation is managed through a main descriptor &struct kho_vmalloc,
+ * which points to a linked list of &struct kho_vmalloc_chunk structures. These
+ * chunks contain the physical addresses of the preserved pages, allowing the
+ * next kernel to reconstruct the vmalloc area with the same content and layout.
+ * Helper macros are also defined for storing and loading pointers within
+ * these structures.
+ */
+
+/* Helper macro to define a union for a serializable pointer. */
+#define DECLARE_KHOSER_PTR(name, type) \
+ union { \
+ u64 phys; \
+ type ptr; \
+ } name
+
+/* Stores the physical address of a serializable pointer. */
+#define KHOSER_STORE_PTR(dest, val) \
+ ({ \
+ typeof(val) v = val; \
+ typecheck(typeof((dest).ptr), v); \
+ (dest).phys = virt_to_phys(v); \
+ })
+
+/* Loads the stored physical address back to a pointer. */
+#define KHOSER_LOAD_PTR(src) \
+ ({ \
+ typeof(src) s = src; \
+ (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
+ })
+
+/*
+ * This header is embedded at the beginning of each `kho_vmalloc_chunk`
+ * and contains a pointer to the next chunk in the linked list,
+ * stored as a physical address for handover.
+ */
+struct kho_vmalloc_hdr {
+ DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
+};
+
+#define KHO_VMALLOC_SIZE \
+ ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
+ sizeof(u64))
+
+/*
+ * Each chunk is a single page and is part of a linked list that describes
+ * a preserved vmalloc area. It contains the header with the link to the next
+ * chunk and a zero terminated array of physical addresses of the pages that
+ * make up the preserved vmalloc area.
+ */
+struct kho_vmalloc_chunk {
+ struct kho_vmalloc_hdr hdr;
+ u64 phys[KHO_VMALLOC_SIZE];
+};
+
+static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
+
+/*
+ * Describes a preserved vmalloc memory area, including the
+ * total number of pages, allocation flags, page order, and a pointer to the
+ * first chunk of physical page addresses.
+ */
+struct kho_vmalloc {
+ DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
+ unsigned int total_pages;
+ unsigned short flags;
+ unsigned short order;
+};
+
+#endif /* _LINUX_KHO_ABI_KEXEC_HANDOVER_H */
diff --git a/include/linux/kho/abi/luo.h b/include/linux/kho/abi/luo.h
index bb099c92e469..46750a0ddf88 100644
--- a/include/linux/kho/abi/luo.h
+++ b/include/linux/kho/abi/luo.h
@@ -8,10 +8,10 @@
/**
* DOC: Live Update Orchestrator ABI
*
- * This header defines the stable Application Binary Interface used by the
- * Live Update Orchestrator to pass state from a pre-update kernel to a
- * post-update kernel. The ABI is built upon the Kexec HandOver framework
- * and uses a Flattened Device Tree to describe the preserved data.
+ * Live Update Orchestrator uses the stable Application Binary Interface
+ * defined below to pass state from a pre-update kernel to a post-update
+ * kernel. The ABI is built upon the Kexec HandOver framework and uses a
+ * Flattened Device Tree to describe the preserved data.
*
* This interface is a contract. Any modification to the FDT structure, node
* properties, compatible strings, or the layout of the `__packed` serialization
@@ -37,6 +37,11 @@
* compatible = "luo-session-v1";
* luo-session-header = <phys_addr_of_session_header_ser>;
* };
+ *
+ * luo-flb {
+ * compatible = "luo-flb-v1";
+ * luo-flb-header = <phys_addr_of_flb_header_ser>;
+ * };
* };
*
* Main LUO Node (/):
@@ -56,6 +61,17 @@
* is the header for a contiguous block of memory containing an array of
* `struct luo_session_ser`, one for each preserved session.
*
+ * File-Lifecycle-Bound Node (luo-flb):
+ * This node describes all preserved global objects whose lifecycle is bound
+ * to that of the preserved files (e.g., shared IOMMU state).
+ *
+ * - compatible: "luo-flb-v1"
+ * Identifies the FLB ABI version.
+ * - luo-flb-header: u64
+ * The physical address of a `struct luo_flb_header_ser`. This structure is
+ * the header for a contiguous block of memory containing an array of
+ * `struct luo_flb_ser`, one for each preserved global object.
+ *
* Serialization Structures:
* The FDT properties point to memory regions containing arrays of simple,
* `__packed` structures. These structures contain the actual preserved state.
@@ -74,6 +90,16 @@
* Metadata for a single preserved file. Contains the `compatible` string to
* find the correct handler in the new kernel, a user-provided `token` for
* identification, and an opaque `data` handle for the handler to use.
+ *
+ * - struct luo_flb_header_ser:
+ * Header for the FLB array. Contains the total page count of the
+ * preserved memory block and the number of `struct luo_flb_ser` entries
+ * that follow.
+ *
+ * - struct luo_flb_ser:
+ * Metadata for a single preserved global object. Contains its `name`
+ * (compatible string), an opaque `data` handle, and the `count`
+ * number of files depending on it.
*/
#ifndef _LINUX_KHO_ABI_LUO_H
@@ -163,4 +189,59 @@ struct luo_session_ser {
struct luo_file_set_ser file_set_ser;
} __packed;
+/* The max size is set so it can be reliably used during in serialization */
+#define LIVEUPDATE_FLB_COMPAT_LENGTH 48
+
+#define LUO_FDT_FLB_NODE_NAME "luo-flb"
+#define LUO_FDT_FLB_COMPATIBLE "luo-flb-v1"
+#define LUO_FDT_FLB_HEADER "luo-flb-header"
+
+/**
+ * struct luo_flb_header_ser - Header for the serialized FLB data block.
+ * @pgcnt: The total number of pages occupied by the entire preserved memory
+ * region, including this header and the subsequent array of
+ * &struct luo_flb_ser entries.
+ * @count: The number of &struct luo_flb_ser entries that follow this header
+ * in the memory block.
+ *
+ * This structure is located at the physical address specified by the
+ * `LUO_FDT_FLB_HEADER` FDT property. It provides the new kernel with the
+ * necessary information to find and iterate over the array of preserved
+ * File-Lifecycle-Bound objects and to manage the underlying memory.
+ *
+ * If this structure is modified, LUO_FDT_FLB_COMPATIBLE must be updated.
+ */
+struct luo_flb_header_ser {
+ u64 pgcnt;
+ u64 count;
+} __packed;
+
+/**
+ * struct luo_flb_ser - Represents the serialized state of a single FLB object.
+ * @name: The unique compatibility string of the FLB object, used to find the
+ * corresponding &struct liveupdate_flb handler in the new kernel.
+ * @data: The opaque u64 handle returned by the FLB's .preserve() operation
+ * in the old kernel. This handle encapsulates the entire state needed
+ * for restoration.
+ * @count: The reference count at the time of serialization; i.e., the number
+ * of preserved files that depended on this FLB. This is used by the
+ * new kernel to correctly manage the FLB's lifecycle.
+ *
+ * An array of these structures is created in a preserved memory region and
+ * passed to the new kernel. Each entry allows the LUO core to restore one
+ * global, shared object.
+ *
+ * If this structure is modified, LUO_FDT_FLB_COMPATIBLE must be updated.
+ */
+struct luo_flb_ser {
+ char name[LIVEUPDATE_FLB_COMPAT_LENGTH];
+ u64 data;
+ u64 count;
+} __packed;
+
+/* Kernel Live Update Test ABI */
+#ifdef CONFIG_LIVEUPDATE_TEST
+#define LIVEUPDATE_TEST_FLB_COMPATIBLE(i) "liveupdate-test-flb-v" #i
+#endif
+
#endif /* _LINUX_KHO_ABI_LUO_H */
diff --git a/include/linux/kho/abi/memblock.h b/include/linux/kho/abi/memblock.h
new file mode 100644
index 000000000000..27b042f470e1
--- /dev/null
+++ b/include/linux/kho/abi/memblock.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KHO_ABI_MEMBLOCK_H
+#define _LINUX_KHO_ABI_MEMBLOCK_H
+
+/**
+ * DOC: memblock kexec handover ABI
+ *
+ * Memblock can serialize its current memory reservations created with
+ * reserve_mem command line option across kexec through KHO.
+ * The post-KHO kernel can then consume these reservations and they are
+ * guaranteed to have the same physical address.
+ *
+ * The state is serialized using Flattened Device Tree (FDT) format. Any
+ * modification to the FDT structure, node properties, or the compatible
+ * strings constitutes a breaking change. Such changes require incrementing the
+ * version number in the relevant `_COMPATIBLE` string to prevent a new kernel
+ * from misinterpreting data from an old kernel.
+ *
+ * Changes are allowed provided the compatibility version is incremented.
+ * However, backward/forward compatibility is only guaranteed for kernels
+ * supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The entire memblock state is encapsulated within a single KHO entry named
+ * "memblock".
+ * This entry contains an FDT with the following layout:
+ *
+ * .. code-block:: none
+ *
+ * / {
+ * compatible = "memblock-v1";
+ *
+ * n1 {
+ * compatible = "reserve-mem-v1";
+ * start = <0xc06b 0x4000000>;
+ * size = <0x04 0x00>;
+ * };
+ * };
+ *
+ * Main memblock node (/):
+ *
+ * - compatible: "memblock-v1"
+
+ * Identifies the overall memblock ABI version.
+ *
+ * reserved_mem node:
+ * These nodes describe all reserve_mem regions. The node name is the name
+ * defined by the user for a reserve_mem region.
+ *
+ * - compatible: "reserve-mem-v1"
+ *
+ * Identifies the ABI version of reserve_mem descriptions
+ *
+ * - start: u64
+ *
+ * Physical address of the reserved memory region.
+ *
+ * - size: u64
+ *
+ * size in bytes of the reserved memory region.
+ */
+
+/* Top level memblock FDT node name. */
+#define MEMBLOCK_KHO_FDT "memblock"
+
+/* The compatible string for the memblock FDT root node. */
+#define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1"
+
+/* The compatible string for the reserve_mem FDT nodes. */
+#define RESERVE_MEM_KHO_NODE_COMPATIBLE "reserve-mem-v1"
+
+#endif /* _LINUX_KHO_ABI_MEMBLOCK_H */
diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h
index da7d063474a1..68cb6303b846 100644
--- a/include/linux/kho/abi/memfd.h
+++ b/include/linux/kho/abi/memfd.h
@@ -12,13 +12,13 @@
#define _LINUX_KHO_ABI_MEMFD_H
#include <linux/types.h>
-#include <linux/kexec_handover.h>
+#include <linux/kho/abi/kexec_handover.h>
/**
* DOC: memfd Live Update ABI
*
- * This header defines the ABI for preserving the state of a memfd across a
- * kexec reboot using the LUO.
+ * memfd uses the ABI defined below for preserving its state across a kexec
+ * reboot using the LUO.
*
* The state is serialized into a packed structure `struct memfd_luo_ser`
* which is handed over to the next kernel via the KHO mechanism.
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index eb1946a70cff..d7a9053ff4fe 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -17,8 +17,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
-extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
- bool install_pmd);
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -42,10 +42,9 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
}
-static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr, bool install_pmd)
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
- return 0;
}
static inline void khugepaged_min_free_kbytes_update(void)
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 88e82ab1367c..9bc6abe57572 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -81,6 +81,7 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *mutex)
+ __cond_acquires(true, mutex)
{
if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
@@ -102,6 +103,7 @@ static inline int kref_put_mutex(struct kref *kref,
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
+ __cond_acquires(true, lock)
{
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8d27403888ce..c92c1149ee6e 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -100,6 +100,7 @@ void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
void kthread_exit(long result) __noreturn;
void kthread_complete_and_exit(struct completion *, long) __noreturn;
+int kthreads_update_housekeeping(void);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 39534fafa36a..00346ce3af5e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -46,13 +46,12 @@
/*
* Quirk flags bits.
- * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ * ata_device->quirks is an u64, so __ATA_QUIRK_MAX must not exceed 64.
*/
enum ata_quirks {
__ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
__ATA_QUIRK_NODMA, /* DMA problems */
__ATA_QUIRK_NONCQ, /* Don't use NCQ */
- __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
__ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
__ATA_QUIRK_DISABLE, /* Disable it */
__ATA_QUIRK_HPA_SIZE, /* Native size off by one */
@@ -74,8 +73,7 @@ enum ata_quirks {
__ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
__ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
__ATA_QUIRK_NOTRIM, /* Do not use TRIM */
- __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
- __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */
+ __ATA_QUIRK_MAX_SEC, /* Limit max sectors */
__ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
__ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
__ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */
@@ -91,38 +89,36 @@ enum ata_quirks {
* Some quirks may be drive/controller pair dependent.
*/
enum {
- ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
- ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
- ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
- ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
- ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
- ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
- ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
- ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
- ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
- ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
- ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
- ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
- ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
- ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
- ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
- ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
- ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
- ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
- ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
- ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
- ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
- ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
- ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
- ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
- ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
- ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191),
- ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
- ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
- ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
- ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
- ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
- ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+ ATA_QUIRK_DIAGNOSTIC = BIT_ULL(__ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = BIT_ULL(__ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = BIT_ULL(__ATA_QUIRK_NONCQ),
+ ATA_QUIRK_BROKEN_HPA = BIT_ULL(__ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = BIT_ULL(__ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = BIT_ULL(__ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = BIT_ULL(__ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = BIT_ULL(__ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = BIT_ULL(__ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = BIT_ULL(__ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = BIT_ULL(__ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = BIT_ULL(__ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = BIT_ULL(__ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = BIT_ULL(__ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = BIT_ULL(__ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = BIT_ULL(__ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = BIT_ULL(__ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = BIT_ULL(__ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = BIT_ULL(__ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = BIT_ULL(__ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = BIT_ULL(__ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = BIT_ULL(__ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = BIT_ULL(__ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC = BIT_ULL(__ATA_QUIRK_MAX_SEC),
+ ATA_QUIRK_MAX_TRIM_128M = BIT_ULL(__ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = BIT_ULL(__ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_LPM_ON_ATI = BIT_ULL(__ATA_QUIRK_NO_LPM_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = BIT_ULL(__ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = BIT_ULL(__ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = BIT_ULL(__ATA_QUIRK_NO_FUA),
};
enum {
@@ -723,7 +719,7 @@ struct ata_cdl {
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int quirks; /* List of broken features */
+ u64 quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -903,6 +899,9 @@ struct ata_port {
u64 qc_active;
int nr_active_links; /* #links with active qcs */
+ struct work_struct deferred_qc_work;
+ struct ata_queued_cmd *deferred_qc;
+
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -1150,7 +1149,8 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
#else
#define ATA_SCSI_COMPAT_IOCTL /* empty */
#endif
-extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+extern enum scsi_qc_status ata_scsi_queuecmd(struct Scsi_Host *h,
+ struct scsi_cmnd *cmd);
#if IS_REACHABLE(CONFIG_ATA)
bool ata_scsi_dma_need_drain(struct request *rq);
#else
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ae1b541446c9..df9eebe6afca 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -144,11 +144,13 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
}
static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ __acquires(__bitlock(0, b))
{
bit_spin_lock(0, (unsigned long *)b);
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ __releases(__bitlock(0, b))
{
__bit_spin_unlock(0, (unsigned long *)b);
}
diff --git a/include/linux/list_private.h b/include/linux/list_private.h
new file mode 100644
index 000000000000..19b01d16beda
--- /dev/null
+++ b/include/linux/list_private.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef _LINUX_LIST_PRIVATE_H
+#define _LINUX_LIST_PRIVATE_H
+
+/**
+ * DOC: Private List Primitives
+ *
+ * Provides a set of list primitives identical in function to those in
+ * ``<linux/list.h>``, but designed for cases where the embedded
+ * ``&struct list_head`` is private member.
+ */
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+
+#define __list_private_offset(type, member) \
+ ((size_t)(&ACCESS_PRIVATE(((type *)0), member)))
+
+/**
+ * list_private_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the identifier passed to ACCESS_PRIVATE.
+ */
+#define list_private_entry(ptr, type, member) ({ \
+ const struct list_head *__mptr = (ptr); \
+ (type *)((char *)__mptr - __list_private_offset(type, member)); \
+})
+
+/**
+ * list_private_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the identifier passed to ACCESS_PRIVATE.
+ */
+#define list_private_first_entry(ptr, type, member) \
+ list_private_entry((ptr)->next, type, member)
+
+/**
+ * list_private_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the identifier passed to ACCESS_PRIVATE.
+ */
+#define list_private_last_entry(ptr, type, member) \
+ list_private_entry((ptr)->prev, type, member)
+
+/**
+ * list_private_next_entry - get the next element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_next_entry(pos, member) \
+ list_private_entry(ACCESS_PRIVATE(pos, member).next, typeof(*(pos)), member)
+
+/**
+ * list_private_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_private_next_entry_circular(pos, head, member) \
+ (list_is_last(&ACCESS_PRIVATE(pos, member), head) ? \
+ list_private_first_entry(head, typeof(*(pos)), member) : \
+ list_private_next_entry(pos, member))
+
+/**
+ * list_private_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_prev_entry(pos, member) \
+ list_private_entry(ACCESS_PRIVATE(pos, member).prev, typeof(*(pos)), member)
+
+/**
+ * list_private_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_private_prev_entry_circular(pos, head, member) \
+ (list_is_first(&ACCESS_PRIVATE(pos, member), head) ? \
+ list_private_last_entry(head, typeof(*(pos)), member) : \
+ list_private_prev_entry(pos, member))
+
+/**
+ * list_private_entry_is_head - test if the entry points to the head of the list
+ * @pos: the type * to cursor
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_entry_is_head(pos, head, member) \
+ list_is_head(&ACCESS_PRIVATE(pos, member), (head))
+
+/**
+ * list_private_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_for_each_entry(pos, head, member) \
+ for (pos = list_private_first_entry(head, typeof(*pos), member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_next_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_private_last_entry(head, typeof(*pos), member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_prev_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_private_for_each_entry_continue(pos, head, member) \
+ for (pos = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_next_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_private_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_private_prev_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_prev_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_from - iterate over list of given type from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing from current position.
+ */
+#define list_private_for_each_entry_from(pos, head, member) \
+ for (; !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_next_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_from_reverse - iterate backwards over list of given type
+ * from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, continuing from current position.
+ */
+#define list_private_for_each_entry_from_reverse(pos, head, member) \
+ for (; !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_prev_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_private_first_entry(head, typeof(*pos), member), \
+ n = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_next_entry(n, member))
+
+/**
+ * list_private_for_each_entry_safe_continue - continue list iteration safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing after current point,
+ * safe against removal of list entry.
+ */
+#define list_private_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_private_next_entry(pos, member), \
+ n = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_next_entry(n, member))
+
+/**
+ * list_private_for_each_entry_safe_from - iterate over list from current point safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type from current point, safe against
+ * removal of list entry.
+ */
+#define list_private_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_next_entry(n, member))
+
+/**
+ * list_private_for_each_entry_safe_reverse - iterate backwards over list safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, safe against removal
+ * of list entry.
+ */
+#define list_private_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_private_last_entry(head, typeof(*pos), member), \
+ n = list_private_prev_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_prev_entry(n, member))
+
+/**
+ * list_private_safe_reset_next - reset a stale list_for_each_entry_safe loop
+ * @pos: the loop cursor used in the list_for_each_entry_safe loop
+ * @n: temporary storage used in list_for_each_entry_safe
+ * @member: the name of the list_head within the struct.
+ *
+ * list_safe_reset_next is not safe to use in general if the list may be
+ * modified concurrently (eg. the lock is dropped in the loop body). An
+ * exception to this is if the cursor element (pos) is pinned in the list,
+ * and list_safe_reset_next is called after re-taking the lock and before
+ * completing the current iteration of the loop body.
+ */
+#define list_private_safe_reset_next(pos, n, member) \
+ n = list_private_next_entry(pos, member)
+
+#endif /* _LINUX_LIST_PRIVATE_H */
diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h
index a7f6ee5b6771..fe82a6c3005f 100644
--- a/include/linux/liveupdate.h
+++ b/include/linux/liveupdate.h
@@ -11,10 +11,13 @@
#include <linux/compiler.h>
#include <linux/kho/abi/luo.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/types.h>
#include <uapi/linux/liveupdate.h>
struct liveupdate_file_handler;
+struct liveupdate_flb;
+struct liveupdate_session;
struct file;
/**
@@ -99,6 +102,118 @@ struct liveupdate_file_handler {
* registered file handlers.
*/
struct list_head __private list;
+ /* A list of FLB dependencies. */
+ struct list_head __private flb_list;
+};
+
+/**
+ * struct liveupdate_flb_op_args - Arguments for FLB operation callbacks.
+ * @flb: The global FLB instance for which this call is performed.
+ * @data: For .preserve(): [OUT] The callback sets this field.
+ * For .unpreserve(): [IN] The handle from .preserve().
+ * For .retrieve(): [IN] The handle from .preserve().
+ * @obj: For .preserve(): [OUT] Sets this to the live object.
+ * For .retrieve(): [OUT] Sets this to the live object.
+ * For .finish(): [IN] The live object from .retrieve().
+ *
+ * This structure bundles all parameters for the FLB operation callbacks.
+ */
+struct liveupdate_flb_op_args {
+ struct liveupdate_flb *flb;
+ u64 data;
+ void *obj;
+};
+
+/**
+ * struct liveupdate_flb_ops - Callbacks for global File-Lifecycle-Bound data.
+ * @preserve: Called when the first file using this FLB is preserved.
+ * The callback must save its state and return a single,
+ * self-contained u64 handle by setting the 'argp->data'
+ * field and 'argp->obj'.
+ * @unpreserve: Called when the last file using this FLB is unpreserved
+ * (aborted before reboot). Receives the handle via
+ * 'argp->data' and live object via 'argp->obj'.
+ * @retrieve: Called on-demand in the new kernel, the first time a
+ * component requests access to the shared object. It receives
+ * the preserved handle via 'argp->data' and must reconstruct
+ * the live object, returning it by setting the 'argp->obj'
+ * field.
+ * @finish: Called in the new kernel when the last file using this FLB
+ * is finished. Receives the live object via 'argp->obj' for
+ * cleanup.
+ * @owner: Module reference
+ *
+ * Operations that manage global shared data with file bound lifecycle,
+ * triggered by the first file that uses it and concluded by the last file that
+ * uses it, across all sessions.
+ */
+struct liveupdate_flb_ops {
+ int (*preserve)(struct liveupdate_flb_op_args *argp);
+ void (*unpreserve)(struct liveupdate_flb_op_args *argp);
+ int (*retrieve)(struct liveupdate_flb_op_args *argp);
+ void (*finish)(struct liveupdate_flb_op_args *argp);
+ struct module *owner;
+};
+
+/*
+ * struct luo_flb_private_state - Private FLB state structures.
+ * @count: The number of preserved files currently depending on this FLB.
+ * This is used to trigger the preserve/unpreserve/finish ops on the
+ * first/last file.
+ * @data: The opaque u64 handle returned by .preserve() or passed to
+ * .retrieve().
+ * @obj: The live kernel object returned by .preserve() or .retrieve().
+ * @lock: A mutex that protects all fields within this structure, providing
+ * the synchronization service for the FLB's ops.
+ * @finished: True once the FLB's finish() callback has run.
+ * @retrieved: True once the FLB's retrieve() callback has run.
+ */
+struct luo_flb_private_state {
+ long count;
+ u64 data;
+ void *obj;
+ struct mutex lock;
+ bool finished;
+ bool retrieved;
+};
+
+/*
+ * struct luo_flb_private - Keep separate incoming and outgoing states.
+ * @list: A global list of registered FLBs.
+ * @outgoing: The runtime state for the pre-reboot
+ * (preserve/unpreserve) lifecycle.
+ * @incoming: The runtime state for the post-reboot (retrieve/finish)
+ * lifecycle.
+ * @users: With how many File-Handlers this FLB is registered.
+ * @initialized: true when private fields have been initialized.
+ */
+struct luo_flb_private {
+ struct list_head list;
+ struct luo_flb_private_state outgoing;
+ struct luo_flb_private_state incoming;
+ int users;
+ bool initialized;
+};
+
+/**
+ * struct liveupdate_flb - A global definition for a shared data object.
+ * @ops: Callback functions
+ * @compatible: The compatibility string (e.g., "iommu-core-v1"
+ * that uniquely identifies the FLB type this handler
+ * supports. This is matched against the compatible string
+ * associated with individual &struct liveupdate_flb
+ * instances.
+ *
+ * This struct is the "template" that a driver registers to define a shared,
+ * file-lifecycle-bound object. The actual runtime state (the live object,
+ * refcount, etc.) is managed privately by the LUO core.
+ */
+struct liveupdate_flb {
+ const struct liveupdate_flb_ops *ops;
+ const char compatible[LIVEUPDATE_FLB_COMPAT_LENGTH];
+
+ /* private: */
+ struct luo_flb_private __private private;
};
#ifdef CONFIG_LIVEUPDATE
@@ -112,6 +227,14 @@ int liveupdate_reboot(void);
int liveupdate_register_file_handler(struct liveupdate_file_handler *fh);
int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh);
+int liveupdate_register_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb);
+int liveupdate_unregister_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb);
+
+int liveupdate_flb_get_incoming(struct liveupdate_flb *flb, void **objp);
+int liveupdate_flb_get_outgoing(struct liveupdate_flb *flb, void **objp);
+
#else /* CONFIG_LIVEUPDATE */
static inline bool liveupdate_enabled(void)
@@ -134,5 +257,29 @@ static inline int liveupdate_unregister_file_handler(struct liveupdate_file_hand
return -EOPNOTSUPP;
}
+static inline int liveupdate_register_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_unregister_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_flb_get_incoming(struct liveupdate_flb *flb,
+ void **objp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_flb_get_outgoing(struct liveupdate_flb *flb,
+ void **objp)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_LIVEUPDATE */
#endif /* _LINUX_LIVEUPDATE_H */
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index b0e6ab329b00..b8830148a859 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -14,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
+#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
+#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -29,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(this_cpu_ptr(lock), flags)
+ __local_lock_irqsave(__this_cpu_local_lock(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
+#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
+#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -50,7 +50,7 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+ __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags)
/**
* local_trylock_init - Runtime initialize a lock instance
@@ -66,7 +66,7 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock))
#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
@@ -81,27 +81,44 @@
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock_irqsave(lock, flags) \
- __local_trylock_irqsave(this_cpu_ptr(lock), flags)
-
-DEFINE_GUARD(local_lock, local_lock_t __percpu*,
- local_lock(_T),
- local_unlock(_T))
-DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
- local_lock_irq(_T),
- local_unlock_irq(_T))
+ __local_trylock_irqsave(__this_cpu_local_lock(lock), flags)
+
+DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu,
+ local_lock(_T->lock),
+ local_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu,
+ local_lock_irq(_T->lock),
+ local_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
local_lock_irqsave(_T->lock, _T->flags),
local_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#define local_lock_nested_bh(_lock) \
- __local_lock_nested_bh(this_cpu_ptr(_lock))
+ __local_lock_nested_bh(__this_cpu_local_lock(_lock))
#define local_unlock_nested_bh(_lock) \
- __local_unlock_nested_bh(this_cpu_ptr(_lock))
-
-DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
- local_lock_nested_bh(_T),
- local_unlock_nested_bh(_T))
+ __local_unlock_nested_bh(__this_cpu_local_lock(_lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu,
+ local_lock_nested_bh(_T->lock),
+ local_unlock_nested_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T))
+#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T)
+
+DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T))
+#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T)
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8f82b4eb542f..eff711bf973f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -4,25 +4,30 @@
#endif
#include <linux/percpu-defs.h>
+#include <linux/irqflags.h>
#include <linux/lockdep.h>
+#include <linux/debug_locks.h>
+#include <asm/current.h>
#ifndef CONFIG_PREEMPT_RT
-typedef struct {
+context_lock_struct(local_lock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
-} local_lock_t;
+};
+typedef struct local_lock local_lock_t;
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
-typedef struct {
+context_lock_struct(local_trylock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
u8 acquired;
-} local_trylock_t;
+};
+typedef struct local_trylock local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@@ -84,7 +89,10 @@ do { \
local_lock_debug_init(lock); \
} while (0)
-#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+#define __local_trylock_init(lock) \
+do { \
+ __local_lock_init((local_lock_t *)lock); \
+} while (0)
#define __spinlock_nested_bh_init(lock) \
do { \
@@ -117,22 +125,25 @@ do { \
do { \
preempt_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
preempt_disable(); \
@@ -146,10 +157,10 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
local_irq_save(flags); \
@@ -163,7 +174,7 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
/* preemption or migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
@@ -186,18 +197,21 @@ do { \
#define __local_unlock(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
@@ -206,13 +220,20 @@ do { \
do { \
lockdep_assert_in_softirq(); \
local_lock_acquire((lock)); \
+ __acquire(lock); \
} while (0)
#define __local_unlock_nested_bh(lock) \
- local_lock_release((lock))
+ do { \
+ __release(lock); \
+ local_lock_release((lock)); \
+ } while (0)
#else /* !CONFIG_PREEMPT_RT */
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
@@ -267,7 +288,7 @@ do { \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, context_unsafe(({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
@@ -279,17 +300,40 @@ do { \
migrate_enable(); \
} \
__locked; \
- })
+ })))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
typecheck(unsigned long, flags); \
flags = 0; \
__local_trylock(lock); \
- })
+ }))
/* migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#endif /* CONFIG_PREEMPT_RT */
+
+#if defined(WARN_CONTEXT_ANALYSIS)
+/*
+ * Because the compiler only knows about the base per-CPU variable, use this
+ * helper function to make the compiler think we lock/unlock the @base variable,
+ * and hide the fact we actually pass the per-CPU instance to lock/unlock
+ * functions.
+ */
+static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#ifndef CONFIG_PREEMPT_RT
+static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#endif /* CONFIG_PREEMPT_RT */
+#else /* WARN_CONTEXT_ANALYSIS */
+#define __this_cpu_local_lock(base) this_cpu_ptr(base)
+#endif /* WARN_CONTEXT_ANALYSIS */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index dd634103b014..621566345406 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -282,16 +282,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
#define lockdep_assert_held(l) \
- lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+ do { lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_not_held(l) \
lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
#define lockdep_assert_held_write(l) \
- lockdep_assert(lockdep_is_held_type(l, 0))
+ do { lockdep_assert(lockdep_is_held_type(l, 0)); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_held_read(l) \
- lockdep_assert(lockdep_is_held_type(l, 1))
+ do { lockdep_assert(lockdep_is_held_type(l, 1)); __assume_shared_ctx_lock(l); } while (0)
#define lockdep_assert_held_once(l) \
lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
@@ -389,10 +389,10 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert(c) do { } while (0)
#define lockdep_assert_once(c) do { } while (0)
-#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held(l) __assume_ctx_lock(l)
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) __assume_ctx_lock(l)
+#define lockdep_assert_held_read(l) __assume_shared_ctx_lock(l)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 815d871fadfc..6ded24cdb4a8 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -49,9 +49,7 @@ static inline void lockref_init(struct lockref *lockref)
void lockref_get(struct lockref *lockref);
int lockref_put_return(struct lockref *lockref);
bool lockref_get_not_zero(struct lockref *lockref);
-bool lockref_put_or_lock(struct lockref *lockref);
-#define lockref_put_or_lock(_lockref) \
- (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+bool lockref_put_or_lock(struct lockref *lockref) __cond_acquires(false, &lockref->lock);
void lockref_mark_dead(struct lockref *lockref);
bool lockref_get_not_dead(struct lockref *lockref);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 2eac3fc9303d..e17ceb32e0c9 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -44,7 +44,7 @@ int __ilog2_u64(u64 n)
static __always_inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
- return (n != 0 && ((n & (n - 1)) == 0));
+ return n - 1 < (n ^ (n - 1));
}
/**
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index b92008641242..d48bf0ad26f4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -73,7 +73,7 @@ struct lsm_static_calls_table {
/**
* struct lsm_id - Identify a Linux Security Module.
- * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @name: name of the LSM, must be approved by the LSM maintainers
* @id: LSM ID number from uapi/linux/lsm.h
*
* Contains the information that identifies the LSM.
@@ -164,7 +164,7 @@ enum lsm_order {
* @initcall_core: LSM callback for core_initcall() setup, optional
* @initcall_subsys: LSM callback for subsys_initcall() setup, optional
* @initcall_fs: LSM callback for fs_initcall setup, optional
- * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_device: LSM callback for device_initcall() setup, optional
* @initcall_late: LSM callback for late_initcall() setup, optional
*/
struct lsm_info {
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index e1555e06e7e5..07c1bfbdb8c4 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -70,14 +70,33 @@ struct cmdq_cb_data {
struct cmdq_pkt *pkt;
};
+struct cmdq_mbox_priv {
+ u8 shift_pa;
+ dma_addr_t mminfra_offset;
+};
+
struct cmdq_pkt {
void *va_base;
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
+ struct cmdq_mbox_priv priv; /* for generating instruction */
};
/**
+ * cmdq_get_mbox_priv() - get the private data of mailbox channel
+ * @chan: mailbox channel
+ * @priv: pointer to store the private data of mailbox channel
+ *
+ * While generating the GCE instruction to command buffer, the private data
+ * of GCE hardware may need to be referenced, such as the shift bits of
+ * physical address.
+ *
+ * This function should be called before generating the GCE instruction.
+ */
+void cmdq_get_mbox_priv(struct mbox_chan *chan, struct cmdq_mbox_priv *priv);
+
+/**
* cmdq_get_shift_pa() - get the shift bits of physical address
* @chan: mailbox channel
*
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 66f98a3da8d8..7b8aad47121e 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -129,13 +129,6 @@ struct maple_arange_64 {
struct maple_metadata meta;
};
-struct maple_alloc {
- unsigned long total;
- unsigned char node_count;
- unsigned int request_count;
- struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
-};
-
struct maple_topiary {
struct maple_pnode *parent;
struct maple_enode *next; /* Overlaps the pivot */
@@ -306,7 +299,6 @@ struct maple_node {
};
struct maple_range_64 mr64;
struct maple_arange_64 ma64;
- struct maple_alloc alloc;
};
};
@@ -536,7 +528,6 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
void maple_tree_init(void);
void mas_destroy(struct ma_state *mas);
-int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
void *mas_prev(struct ma_state *mas, unsigned long min);
void *mas_prev_range(struct ma_state *mas, unsigned long max);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 42d6d47e445b..5d1203b9af20 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -29,7 +29,6 @@ struct mdio_device {
struct device dev;
struct mii_bus *bus;
- char modalias[MDIO_NAME_SIZE];
int (*bus_match)(struct device *dev, const struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
@@ -648,6 +647,19 @@ static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
mask, set);
}
+static inline int __mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
+ u16 regnum)
+{
+ return __mdiobus_c45_read(mdiodev->bus, mdiodev->addr, devad, regnum);
+}
+
+static inline int __mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
+ u16 regnum, u16 val)
+{
+ return __mdiobus_c45_write(mdiodev->bus, mdiodev->addr, devad, regnum,
+ val);
+}
+
static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
u32 regnum, u16 mask, u16 set)
{
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 221118b5a16e..6ec5e9ac0699 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -598,9 +598,9 @@ extern void *alloc_large_system_hash(const char *tablename,
*/
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
+extern bool hashdist; /* Distribute hashes across NUMA nodes? */
#else
-#define hashdist (0)
+#define hashdist (false)
#endif
#ifdef CONFIG_MEMTEST
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0651865a4564..1baee139999f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -65,7 +65,7 @@ struct mem_cgroup_reclaim_cookie {
#define MEM_CGROUP_ID_SHIFT 16
-struct mem_cgroup_id {
+struct mem_cgroup_private_id {
int id;
refcount_t ref;
};
@@ -191,7 +191,7 @@ struct mem_cgroup {
struct cgroup_subsys_state css;
/* Private memcg ID. Used to ID objects that outlive the cgroup */
- struct mem_cgroup_id id;
+ struct mem_cgroup_private_id id;
/* Accounted resources */
struct page_counter memory; /* Both v1 & v2 */
@@ -557,13 +557,15 @@ static inline bool mem_cgroup_disabled(void)
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
- unsigned long *low)
+ unsigned long *low,
+ unsigned long *usage)
{
- *min = *low = 0;
+ *min = *low = *usage = 0;
if (mem_cgroup_disabled())
return;
+ *usage = page_counter_read(&memcg->memory);
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
@@ -819,23 +821,21 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*)(struct task_struct *, void *), void *arg);
-static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
return 0;
return memcg->id.id;
}
-struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id);
-#ifdef CONFIG_SHRINKER_DEBUG
-static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
{
- return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+ return memcg ? cgroup_id(memcg->css.cgroup) : 0;
}
-struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
-#endif
+struct mem_cgroup *mem_cgroup_get_from_id(u64 id);
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
@@ -893,7 +893,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
return true;
- return !!(memcg->css.flags & CSS_ONLINE);
+ return css_is_online(&memcg->css);
}
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
@@ -919,8 +919,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -949,7 +947,11 @@ static inline void mod_memcg_page_state(struct page *page,
rcu_read_unlock();
}
+unsigned long memcg_events(struct mem_cgroup *memcg, int event);
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
+bool memcg_stat_item_valid(int idx);
+bool memcg_vm_event_item_valid(enum vm_event_item idx);
unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx);
@@ -1037,6 +1039,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return id;
}
+void mem_cgroup_flush_workqueue(void);
+
extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
@@ -1102,9 +1106,10 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
- unsigned long *low)
+ unsigned long *low,
+ unsigned long *usage)
{
- *min = *low = 0;
+ *min = *low = *usage = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1277,29 +1282,27 @@ static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
{
}
-static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
{
return 0;
}
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+static inline struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
{
WARN_ON_ONCE(id);
/* XXX: This should always return root_mem_cgroup */
return NULL;
}
-#ifdef CONFIG_SHRINKER_DEBUG
-static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
{
return 0;
}
-static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+static inline struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
{
return NULL;
}
-#endif
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
@@ -1328,11 +1331,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return 0;
}
-static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return 0;
-}
-
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
@@ -1373,6 +1371,21 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
return 0;
}
+static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
+{
+ return 0;
+}
+
+static inline bool memcg_stat_item_valid(int idx)
+{
+ return false;
+}
+
+static inline bool memcg_vm_event_item_valid(enum vm_event_item idx)
+{
+ return false;
+}
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
@@ -1436,6 +1449,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return 0;
}
+static inline void mem_cgroup_flush_workqueue(void) { }
+
static inline int mem_cgroup_init(void) { return 0; }
#endif /* CONFIG_MEMCG */
@@ -1727,7 +1742,7 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_virt(void *p);
static inline void count_objcg_events(struct obj_cgroup *objcg,
enum vm_event_item idx,
@@ -1799,7 +1814,7 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return -1;
}
-static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+static inline struct mem_cgroup *mem_cgroup_from_virt(void *p)
{
return NULL;
}
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
index 7b5e11cf905f..d333dcdbeae7 100644
--- a/include/linux/memory-failure.h
+++ b/include/linux/memory-failure.h
@@ -4,8 +4,6 @@
#include <linux/interval_tree.h>
-struct pfn_address_space;
-
struct pfn_address_space {
struct interval_tree_node node;
struct address_space *mapping;
@@ -13,7 +11,18 @@ struct pfn_address_space {
unsigned long pfn, pgoff_t *pgoff);
};
+#ifdef CONFIG_MEMORY_FAILURE
int register_pfn_address_space(struct pfn_address_space *pfn_space);
void unregister_pfn_address_space(struct pfn_address_space *pfn_space);
+#else
+static inline int register_pfn_address_space(struct pfn_address_space *pfn_space)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void unregister_pfn_address_space(struct pfn_address_space *pfn_space)
+{
+}
+#endif /* CONFIG_MEMORY_FAILURE */
#endif /* _LINUX_MEMORY_FAILURE_H */
diff --git a/include/linux/mfd/rohm-bd72720.h b/include/linux/mfd/rohm-bd72720.h
new file mode 100644
index 000000000000..ae7343bcab06
--- /dev/null
+++ b/include/linux/mfd/rohm-bd72720.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2025 ROHM Semiconductors.
+ *
+ * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ */
+
+#ifndef _MFD_BD72720_H
+#define _MFD_BD72720_H
+
+#include <linux/regmap.h>
+
+enum {
+ BD72720_BUCK1,
+ BD72720_BUCK2,
+ BD72720_BUCK3,
+ BD72720_BUCK4,
+ BD72720_BUCK5,
+ BD72720_BUCK6,
+ BD72720_BUCK7,
+ BD72720_BUCK8,
+ BD72720_BUCK9,
+ BD72720_BUCK10,
+ BD72720_BUCK11,
+ BD72720_LDO1,
+ BD72720_LDO2,
+ BD72720_LDO3,
+ BD72720_LDO4,
+ BD72720_LDO5,
+ BD72720_LDO6,
+ BD72720_LDO7,
+ BD72720_LDO8,
+ BD72720_LDO9,
+ BD72720_LDO10,
+ BD72720_LDO11,
+ BD72720_REGULATOR_AMOUNT,
+};
+
+/* BD72720 interrupts */
+#define BD72720_INT_LONGPUSH_MASK BIT(0)
+#define BD72720_INT_MIDPUSH_MASK BIT(1)
+#define BD72720_INT_SHORTPUSH_MASK BIT(2)
+#define BD72720_INT_PUSH_MASK BIT(3)
+#define BD72720_INT_HALL_DET_MASK BIT(4)
+#define BD72720_INT_HALL_TGL_MASK BIT(5)
+#define BD72720_INT_WDOG_MASK BIT(6)
+#define BD72720_INT_SWRESET_MASK BIT(7)
+#define BD72720_INT_SEQ_DONE_MASK BIT(0)
+#define BD72720_INT_PGFAULT_MASK BIT(4)
+#define BD72720_INT_BUCK1_DVS_MASK BIT(0)
+#define BD72720_INT_BUCK2_DVS_MASK BIT(1)
+#define BD72720_INT_BUCK3_DVS_MASK BIT(2)
+#define BD72720_INT_BUCK4_DVS_MASK BIT(3)
+#define BD72720_INT_BUCK5_DVS_MASK BIT(4)
+#define BD72720_INT_BUCK6_DVS_MASK BIT(5)
+#define BD72720_INT_BUCK7_DVS_MASK BIT(6)
+#define BD72720_INT_BUCK8_DVS_MASK BIT(7)
+#define BD72720_INT_BUCK9_DVS_MASK BIT(0)
+#define BD72720_INT_BUCK10_DVS_MASK BIT(1)
+#define BD72720_INT_LDO1_DVS_MASK BIT(4)
+#define BD72720_INT_LDO2_DVS_MASK BIT(5)
+#define BD72720_INT_LDO3_DVS_MASK BIT(6)
+#define BD72720_INT_LDO4_DVS_MASK BIT(7)
+#define BD72720_INT_VBUS_RMV_MASK BIT(0)
+#define BD72720_INT_VBUS_DET_MASK BIT(1)
+#define BD72720_INT_VBUS_MON_RES_MASK BIT(2)
+#define BD72720_INT_VBUS_MON_DET_MASK BIT(3)
+#define BD72720_INT_VSYS_MON_RES_MASK BIT(0)
+#define BD72720_INT_VSYS_MON_DET_MASK BIT(1)
+#define BD72720_INT_VSYS_UV_RES_MASK BIT(2)
+#define BD72720_INT_VSYS_UV_DET_MASK BIT(3)
+#define BD72720_INT_VSYS_LO_RES_MASK BIT(4)
+#define BD72720_INT_VSYS_LO_DET_MASK BIT(5)
+#define BD72720_INT_VSYS_OV_RES_MASK BIT(6)
+#define BD72720_INT_VSYS_OV_DET_MASK BIT(7)
+#define BD72720_INT_BAT_ILIM_MASK BIT(0)
+#define BD72720_INT_CHG_DONE_MASK BIT(1)
+#define BD72720_INT_EXTEMP_TOUT_MASK BIT(2)
+#define BD72720_INT_CHG_WDT_EXP_MASK BIT(3)
+#define BD72720_INT_BAT_MNT_OUT_MASK BIT(4)
+#define BD72720_INT_BAT_MNT_IN_MASK BIT(5)
+#define BD72720_INT_CHG_TRNS_MASK BIT(7)
+#define BD72720_INT_VBAT_MON_RES_MASK BIT(0)
+#define BD72720_INT_VBAT_MON_DET_MASK BIT(1)
+#define BD72720_INT_VBAT_SHT_RES_MASK BIT(2)
+#define BD72720_INT_VBAT_SHT_DET_MASK BIT(3)
+#define BD72720_INT_VBAT_LO_RES_MASK BIT(4)
+#define BD72720_INT_VBAT_LO_DET_MASK BIT(5)
+#define BD72720_INT_VBAT_OV_RES_MASK BIT(6)
+#define BD72720_INT_VBAT_OV_DET_MASK BIT(7)
+#define BD72720_INT_BAT_RMV_MASK BIT(0)
+#define BD72720_INT_BAT_DET_MASK BIT(1)
+#define BD72720_INT_DBAT_DET_MASK BIT(2)
+#define BD72720_INT_BAT_TEMP_TRNS_MASK BIT(3)
+#define BD72720_INT_LOBTMP_RES_MASK BIT(4)
+#define BD72720_INT_LOBTMP_DET_MASK BIT(5)
+#define BD72720_INT_OVBTMP_RES_MASK BIT(6)
+#define BD72720_INT_OVBTMP_DET_MASK BIT(7)
+#define BD72720_INT_OCUR1_RES_MASK BIT(0)
+#define BD72720_INT_OCUR1_DET_MASK BIT(1)
+#define BD72720_INT_OCUR2_RES_MASK BIT(2)
+#define BD72720_INT_OCUR2_DET_MASK BIT(3)
+#define BD72720_INT_OCUR3_RES_MASK BIT(4)
+#define BD72720_INT_OCUR3_DET_MASK BIT(5)
+#define BD72720_INT_CC_MON1_DET_MASK BIT(0)
+#define BD72720_INT_CC_MON2_DET_MASK BIT(1)
+#define BD72720_INT_CC_MON3_DET_MASK BIT(2)
+#define BD72720_INT_GPIO1_IN_MASK BIT(4)
+#define BD72720_INT_GPIO2_IN_MASK BIT(5)
+#define BD72720_INT_VF125_RES_MASK BIT(0)
+#define BD72720_INT_VF125_DET_MASK BIT(1)
+#define BD72720_INT_VF_RES_MASK BIT(2)
+#define BD72720_INT_VF_DET_MASK BIT(3)
+#define BD72720_INT_RTC0_MASK BIT(4)
+#define BD72720_INT_RTC1_MASK BIT(5)
+#define BD72720_INT_RTC2_MASK BIT(6)
+
+enum {
+ /*
+ * The IRQs excluding GPIO1 and GPIO2 are ordered in a same way as the
+ * respective IRQ bits in status and mask registers are ordered.
+ *
+ * The BD72720_INT_GPIO1_IN and BD72720_INT_GPIO2_IN are IRQs which can
+ * be used by other devices. Let's have GPIO1 and GPIO2 as first IRQs
+ * here so we can use the regmap-IRQ with standard device tree xlate
+ * while devices connected to the BD72720 IRQ input pins can refer to
+ * the first two interrupt numbers in their device tree. If we placed
+ * BD72720_INT_GPIO1_IN and BD72720_INT_GPIO2_IN after the CC_MON_DET
+ * interrupts (like they are in the registers), the devices using
+ * BD72720 as an IRQ parent should refer the interrupts starting with
+ * an offset which might not be trivial to understand.
+ */
+ BD72720_INT_GPIO1_IN,
+ BD72720_INT_GPIO2_IN,
+ BD72720_INT_LONGPUSH,
+ BD72720_INT_MIDPUSH,
+ BD72720_INT_SHORTPUSH,
+ BD72720_INT_PUSH,
+ BD72720_INT_HALL_DET,
+ BD72720_INT_HALL_TGL,
+ BD72720_INT_WDOG,
+ BD72720_INT_SWRESET,
+ BD72720_INT_SEQ_DONE,
+ BD72720_INT_PGFAULT,
+ BD72720_INT_BUCK1_DVS,
+ BD72720_INT_BUCK2_DVS,
+ BD72720_INT_BUCK3_DVS,
+ BD72720_INT_BUCK4_DVS,
+ BD72720_INT_BUCK5_DVS,
+ BD72720_INT_BUCK6_DVS,
+ BD72720_INT_BUCK7_DVS,
+ BD72720_INT_BUCK8_DVS,
+ BD72720_INT_BUCK9_DVS,
+ BD72720_INT_BUCK10_DVS,
+ BD72720_INT_LDO1_DVS,
+ BD72720_INT_LDO2_DVS,
+ BD72720_INT_LDO3_DVS,
+ BD72720_INT_LDO4_DVS,
+ BD72720_INT_VBUS_RMV,
+ BD72720_INT_VBUS_DET,
+ BD72720_INT_VBUS_MON_RES,
+ BD72720_INT_VBUS_MON_DET,
+ BD72720_INT_VSYS_MON_RES,
+ BD72720_INT_VSYS_MON_DET,
+ BD72720_INT_VSYS_UV_RES,
+ BD72720_INT_VSYS_UV_DET,
+ BD72720_INT_VSYS_LO_RES,
+ BD72720_INT_VSYS_LO_DET,
+ BD72720_INT_VSYS_OV_RES,
+ BD72720_INT_VSYS_OV_DET,
+ BD72720_INT_BAT_ILIM,
+ BD72720_INT_CHG_DONE,
+ BD72720_INT_EXTEMP_TOUT,
+ BD72720_INT_CHG_WDT_EXP,
+ BD72720_INT_BAT_MNT_OUT,
+ BD72720_INT_BAT_MNT_IN,
+ BD72720_INT_CHG_TRNS,
+ BD72720_INT_VBAT_MON_RES,
+ BD72720_INT_VBAT_MON_DET,
+ BD72720_INT_VBAT_SHT_RES,
+ BD72720_INT_VBAT_SHT_DET,
+ BD72720_INT_VBAT_LO_RES,
+ BD72720_INT_VBAT_LO_DET,
+ BD72720_INT_VBAT_OV_RES,
+ BD72720_INT_VBAT_OV_DET,
+ BD72720_INT_BAT_RMV,
+ BD72720_INT_BAT_DET,
+ BD72720_INT_DBAT_DET,
+ BD72720_INT_BAT_TEMP_TRNS,
+ BD72720_INT_LOBTMP_RES,
+ BD72720_INT_LOBTMP_DET,
+ BD72720_INT_OVBTMP_RES,
+ BD72720_INT_OVBTMP_DET,
+ BD72720_INT_OCUR1_RES,
+ BD72720_INT_OCUR1_DET,
+ BD72720_INT_OCUR2_RES,
+ BD72720_INT_OCUR2_DET,
+ BD72720_INT_OCUR3_RES,
+ BD72720_INT_OCUR3_DET,
+ BD72720_INT_CC_MON1_DET,
+ BD72720_INT_CC_MON2_DET,
+ BD72720_INT_CC_MON3_DET,
+ BD72720_INT_VF125_RES,
+ BD72720_INT_VF125_DET,
+ BD72720_INT_VF_RES,
+ BD72720_INT_VF_DET,
+ BD72720_INT_RTC0,
+ BD72720_INT_RTC1,
+ BD72720_INT_RTC2,
+};
+
+/*
+ * BD72720 Registers:
+ * The BD72720 has two sets of registers behind two different I2C slave
+ * addresses. "Common" registers being behind 0x4b, the charger registers
+ * being behind 0x4c.
+ */
+/* Registers behind I2C slave 0x4b */
+enum {
+ BD72720_REG_PRODUCT_ID,
+ BD72720_REG_MANUFACTURER_ID,
+ BD72720_REG_PMIC_REV_NUM,
+ BD72720_REG_NVM_REV_NUM,
+ BD72720_REG_BOOTSRC = 0x10,
+ BD72720_REG_RESETSRC_1,
+ BD72720_REG_RESETSRC_2,
+ BD72720_REG_RESETSRC_3,
+ BD72720_REG_RESETSRC_4,
+ BD72720_REG_RESETSRC_5,
+ BD72720_REG_RESETSRC_6,
+ BD72720_REG_RESETSRC_7,
+ BD72720_REG_POWER_STATE,
+ BD72720_REG_PS_CFG,
+ BD72720_REG_PS_CTRL_1,
+ BD72720_REG_PS_CTRL_2,
+ BD72720_REG_RCVCFG,
+ BD72720_REG_RCVNUM,
+ BD72720_REG_CRDCFG,
+ BD72720_REG_REX_CTRL,
+
+ BD72720_REG_BUCK1_ON,
+ BD72720_REG_BUCK1_MODE,
+ /* Deep idle vsel */
+ BD72720_REG_BUCK1_VSEL_DI,
+ /* Idle vsel */
+ BD72720_REG_BUCK1_VSEL_I,
+ /* Suspend vsel */
+ BD72720_REG_BUCK1_VSEL_S,
+ /* Run boot vsel */
+ BD72720_REG_BUCK1_VSEL_RB,
+ /* Run0 ... run3 vsel */
+ BD72720_REG_BUCK1_VSEL_RB0,
+ BD72720_REG_BUCK1_VSEL_RB1,
+ BD72720_REG_BUCK1_VSEL_RB2,
+ BD72720_REG_BUCK1_VSEL_RB3,
+
+ BD72720_REG_BUCK2_ON,
+ BD72720_REG_BUCK2_MODE,
+ BD72720_REG_BUCK2_VSEL_DI,
+ BD72720_REG_BUCK2_VSEL_I,
+ BD72720_REG_BUCK2_VSEL_S,
+ /* Run vsel */
+ BD72720_REG_BUCK2_VSEL_R,
+
+ BD72720_REG_BUCK3_ON,
+ BD72720_REG_BUCK3_MODE,
+ BD72720_REG_BUCK3_VSEL_DI,
+ BD72720_REG_BUCK3_VSEL_I,
+ BD72720_REG_BUCK3_VSEL_S,
+ BD72720_REG_BUCK3_VSEL_R,
+
+ BD72720_REG_BUCK4_ON,
+ BD72720_REG_BUCK4_MODE,
+ BD72720_REG_BUCK4_VSEL_DI,
+ BD72720_REG_BUCK4_VSEL_I,
+ BD72720_REG_BUCK4_VSEL_S,
+ BD72720_REG_BUCK4_VSEL_R,
+
+ BD72720_REG_BUCK5_ON,
+ BD72720_REG_BUCK5_MODE,
+ BD72720_REG_BUCK5_VSEL,
+
+ BD72720_REG_BUCK6_ON,
+ BD72720_REG_BUCK6_MODE,
+ BD72720_REG_BUCK6_VSEL,
+
+ BD72720_REG_BUCK7_ON,
+ BD72720_REG_BUCK7_MODE,
+ BD72720_REG_BUCK7_VSEL,
+
+ BD72720_REG_BUCK8_ON,
+ BD72720_REG_BUCK8_MODE,
+ BD72720_REG_BUCK8_VSEL,
+
+ BD72720_REG_BUCK9_ON,
+ BD72720_REG_BUCK9_MODE,
+ BD72720_REG_BUCK9_VSEL,
+
+ BD72720_REG_BUCK10_ON,
+ BD72720_REG_BUCK10_MODE,
+ BD72720_REG_BUCK10_VSEL,
+
+ BD72720_REG_LDO1_ON,
+ BD72720_REG_LDO1_MODE1,
+ BD72720_REG_LDO1_MODE2,
+ BD72720_REG_LDO1_VSEL_DI,
+ BD72720_REG_LDO1_VSEL_I,
+ BD72720_REG_LDO1_VSEL_S,
+ BD72720_REG_LDO1_VSEL_RB,
+ BD72720_REG_LDO1_VSEL_R0,
+ BD72720_REG_LDO1_VSEL_R1,
+ BD72720_REG_LDO1_VSEL_R2,
+ BD72720_REG_LDO1_VSEL_R3,
+
+ BD72720_REG_LDO2_ON,
+ BD72720_REG_LDO2_MODE,
+ BD72720_REG_LDO2_VSEL_DI,
+ BD72720_REG_LDO2_VSEL_I,
+ BD72720_REG_LDO2_VSEL_S,
+ BD72720_REG_LDO2_VSEL_R,
+
+ BD72720_REG_LDO3_ON,
+ BD72720_REG_LDO3_MODE,
+ BD72720_REG_LDO3_VSEL_DI,
+ BD72720_REG_LDO3_VSEL_I,
+ BD72720_REG_LDO3_VSEL_S,
+ BD72720_REG_LDO3_VSEL_R,
+
+ BD72720_REG_LDO4_ON,
+ BD72720_REG_LDO4_MODE,
+ BD72720_REG_LDO4_VSEL_DI,
+ BD72720_REG_LDO4_VSEL_I,
+ BD72720_REG_LDO4_VSEL_S,
+ BD72720_REG_LDO4_VSEL_R,
+
+ BD72720_REG_LDO5_ON,
+ BD72720_REG_LDO5_MODE,
+ BD72720_REG_LDO5_VSEL,
+
+ BD72720_REG_LDO6_ON,
+ BD72720_REG_LDO6_MODE,
+ BD72720_REG_LDO6_VSEL,
+
+ BD72720_REG_LDO7_ON,
+ BD72720_REG_LDO7_MODE,
+ BD72720_REG_LDO7_VSEL,
+
+ BD72720_REG_LDO8_ON,
+ BD72720_REG_LDO8_MODE,
+ BD72720_REG_LDO8_VSEL,
+
+ BD72720_REG_LDO9_ON,
+ BD72720_REG_LDO9_MODE,
+ BD72720_REG_LDO9_VSEL,
+
+ BD72720_REG_LDO10_ON,
+ BD72720_REG_LDO10_MODE,
+ BD72720_REG_LDO10_VSEL,
+
+ BD72720_REG_LDO11_ON,
+ BD72720_REG_LDO11_MODE,
+ BD72720_REG_LDO11_VSEL,
+
+ BD72720_REG_GPIO1_ON = 0x8b,
+ BD72720_REG_GPIO2_ON,
+ BD72720_REG_GPIO3_ON,
+ BD72720_REG_GPIO4_ON,
+ BD72720_REG_GPIO5_ON,
+
+ BD72720_REG_GPIO1_CTRL,
+ BD72720_REG_GPIO2_CTRL,
+#define BD72720_GPIO_IRQ_TYPE_MASK GENMASK(6, 4)
+#define BD72720_GPIO_IRQ_TYPE_FALLING 0x0
+#define BD72720_GPIO_IRQ_TYPE_RISING 0x1
+#define BD72720_GPIO_IRQ_TYPE_BOTH 0x2
+#define BD72720_GPIO_IRQ_TYPE_HIGH 0x3
+#define BD72720_GPIO_IRQ_TYPE_LOW 0x4
+ BD72720_REG_GPIO3_CTRL,
+ BD72720_REG_GPIO4_CTRL,
+ BD72720_REG_GPIO5_CTRL,
+#define BD72720_GPIO_DRIVE_MASK BIT(1)
+#define BD72720_GPIO_HIGH BIT(0)
+
+ BD72720_REG_EPDEN_CTRL,
+ BD72720_REG_GATECNT_CTRL,
+ BD72720_REG_LED_CTRL,
+
+ BD72720_REG_PWRON_CFG1,
+ BD72720_REG_PWRON_CFG2,
+
+ BD72720_REG_OUT32K,
+ BD72720_REG_CONF,
+ BD72720_REG_HALL_STAT,
+
+ BD72720_REG_RTC_SEC = 0xa0,
+#define BD72720_REG_RTC_START BD72720_REG_RTC_SEC
+ BD72720_REG_RTC_MIN,
+ BD72720_REG_RTC_HOUR,
+ BD72720_REG_RTC_WEEK,
+ BD72720_REG_RTC_DAY,
+ BD72720_REG_RTC_MON,
+ BD72720_REG_RTC_YEAR,
+
+ BD72720_REG_RTC_ALM0_SEC,
+#define BD72720_REG_RTC_ALM_START BD72720_REG_RTC_ALM0_SEC
+ BD72720_REG_RTC_ALM0_MIN,
+ BD72720_REG_RTC_ALM0_HOUR,
+ BD72720_REG_RTC_ALM0_WEEK,
+ BD72720_REG_RTC_ALM0_MON,
+ BD72720_REG_RTC_ALM0_YEAR,
+
+ BD72720_REG_RTC_ALM1_SEC,
+ BD72720_REG_RTC_ALM1_MIN,
+ BD72720_REG_RTC_ALM1_HOUR,
+ BD72720_REG_RTC_ALM1_WEEK,
+ BD72720_REG_RTC_ALM1_MON,
+ BD72720_REG_RTC_ALM1_YEAR,
+
+ BD72720_REG_RTC_ALM0_EN,
+ BD72720_REG_RTC_ALM1_EN,
+ BD72720_REG_RTC_ALM2,
+
+ BD72720_REG_INT_LVL1_EN = 0xc0,
+#define BD72720_MASK_LVL1_EN_ALL GENMASK(7, 0)
+ BD72720_REG_INT_PS1_EN,
+ BD72720_REG_INT_PS2_EN,
+ BD72720_REG_INT_DVS1_EN,
+ BD72720_REG_INT_DVS2_EN,
+ BD72720_REG_INT_VBUS_EN,
+ BD72720_REG_INT_VSYS_EN,
+ BD72720_REG_INT_CHG_EN,
+ BD72720_REG_INT_BAT1_EN,
+ BD72720_REG_INT_BAT2_EN,
+ BD72720_REG_INT_IBAT_EN,
+ BD72720_REG_INT_ETC1_EN,
+ BD72720_REG_INT_ETC2_EN,
+
+ /*
+ * The _STAT registers inform IRQ line state, and are used to ack IRQ.
+ * The _SRC registers below indicate current state of the function
+ * connected to the line.
+ */
+ BD72720_REG_INT_LVL1_STAT,
+ BD72720_REG_INT_PS1_STAT,
+ BD72720_REG_INT_PS2_STAT,
+ BD72720_REG_INT_DVS1_STAT,
+ BD72720_REG_INT_DVS2_STAT,
+ BD72720_REG_INT_VBUS_STAT,
+ BD72720_REG_INT_VSYS_STAT,
+ BD72720_REG_INT_CHG_STAT,
+ BD72720_REG_INT_BAT1_STAT,
+ BD72720_REG_INT_BAT2_STAT,
+ BD72720_REG_INT_IBAT_STAT,
+ BD72720_REG_INT_ETC1_STAT,
+ BD72720_REG_INT_ETC2_STAT,
+
+ BD72720_REG_INT_LVL1_SRC,
+ BD72720_REG_INT_PS1_SRC,
+ BD72720_REG_INT_PS2_SRC,
+ BD72720_REG_INT_DVS1_SRC,
+ BD72720_REG_INT_DVS2_SRC,
+ BD72720_REG_INT_VBUS_SRC,
+#define BD72720_MASK_DCIN_DET BIT(1)
+ BD72720_REG_INT_VSYS_SRC,
+ BD72720_REG_INT_CHG_SRC,
+ BD72720_REG_INT_BAT1_SRC,
+ BD72720_REG_INT_BAT2_SRC,
+ BD72720_REG_INT_IBAT_SRC,
+ BD72720_REG_INT_ETC1_SRC,
+ BD72720_REG_INT_ETC2_SRC,
+};
+
+/* Register masks */
+#define BD72720_MASK_DEEP_IDLE_EN BIT(0)
+#define BD72720_MASK_IDLE_EN BIT(1)
+#define BD72720_MASK_SUSPEND_EN BIT(2)
+#define BD72720_MASK_RUN_B_EN BIT(3)
+#define BD72720_MASK_RUN_0_EN BIT(4)
+#define BD72720_MASK_RUN_1_EN BIT(5)
+#define BD72720_MASK_RUN_2_EN BIT(6)
+#define BD72720_MASK_RUN_3_EN BIT(7)
+
+#define BD72720_MASK_RAMP_UP_DELAY GENMASK(7, 6)
+#define BD72720_MASK_BUCK_VSEL GENMASK(7, 0)
+#define BD72720_MASK_LDO12346_VSEL GENMASK(6, 0)
+#define BD72720_MASK_LDO_VSEL GENMASK(7, 0)
+
+#define BD72720_I2C4C_ADDR_OFFSET 0x100
+
+/* Registers behind I2C slave 0x4c */
+enum {
+ BD72720_REG_CHG_STATE = BD72720_I2C4C_ADDR_OFFSET,
+ BD72720_REG_CHG_LAST_STATE,
+ BD72720_REG_CHG_VBUS_STAT,
+ BD72720_REG_CHG_VSYS_STAT,
+ BD72720_REG_CHG_BAT_TEMP_STAT,
+ BD72720_REG_CHG_WDT_STAT,
+ BD72720_REG_CHG_ILIM_STAT,
+ BD72720_REG_CHG_CHG_STAT,
+ BD72720_REG_CHG_EN,
+ BD72720_REG_CHG_INIT,
+ BD72720_REG_CHG_CTRL,
+ BD72720_REG_CHG_SET_1,
+ BD72720_REG_CHG_SET_2,
+ BD72720_REG_CHG_SET_3,
+ BD72720_REG_CHG_VPRE,
+ BD72720_REG_CHG_VBAT_1,
+ BD72720_REG_CHG_VBAT_2,
+ BD72720_REG_CHG_VBAT_3,
+ BD72720_REG_CHG_VBAT_4,
+ BD72720_REG_CHG_BAT_SET_1,
+ BD72720_REG_CHG_BAT_SET_2,
+ BD72720_REG_CHG_BAT_SET_3,
+ BD72720_REG_CHG_IPRE,
+ BD72720_REG_CHG_IFST_TERM,
+ BD72720_REG_CHG_VSYS_REG,
+ BD72720_REG_CHG_VBUS_SET,
+ BD72720_REG_CHG_WDT_PRE,
+ BD72720_REG_CHG_WDT_FST,
+ BD72720_REG_CHG_LED_CTRL,
+ BD72720_REG_CHG_CFG_1,
+ BD72720_REG_CHG_IFST_1,
+ BD72720_REG_CHG_IFST_2,
+ BD72720_REG_CHG_IFST_3,
+ BD72720_REG_CHG_IFST_4,
+ BD72720_REG_CHG_S_CFG_1,
+ BD72720_REG_CHG_S_CFG_2,
+ BD72720_REG_RS_VBUS,
+ BD72720_REG_RS_IBUS,
+ BD72720_REG_RS_VSYS,
+ BD72720_REG_VSYS_STATE_STAT, /* 0x27 + offset*/
+
+ BD72720_REG_VM_VBAT_U = BD72720_I2C4C_ADDR_OFFSET + 0x30,
+ BD72720_REG_VM_VBAT_L,
+ BD72720_REG_VM_OCV_PRE_U,
+ BD72720_REG_VM_OCV_PRE_L,
+ BD72720_REG_VM_OCV_PST_U,
+ BD72720_REG_VM_OCV_PST_L,
+ BD72720_REG_VM_OCV_PWRON_U,
+ BD72720_REG_VM_OCV_PWRON_L,
+ BD72720_REG_VM_DVBAT_IMP_U,
+ BD72720_REG_VM_DVBAT_IMP_L,
+ BD72720_REG_VM_SA_VBAT_U,
+ BD72720_REG_VM_SA_VBAT_L,
+ BD72720_REG_VM_SA_VBAT_MIN_U,
+ BD72720_REG_VM_SA_VBAT_MIN_L,
+ BD72720_REG_VM_SA_VBAT_MAX_U,
+ BD72720_REG_VM_SA_VBAT_MAX_L,
+ BD72720_REG_REX_SA_VBAT_U,
+ BD72720_REG_REX_SA_VBAT_L,
+ BD72720_REG_VM_VSYS_U,
+ BD72720_REG_VM_VSYS_L,
+ BD72720_REG_VM_SA_VSYS_U,
+ BD72720_REG_VM_SA_VSYS_L,
+ BD72720_REG_VM_SA_VSYS_MIN_U,
+ BD72720_REG_VM_SA_VSYS_MIN_L,
+ BD72720_REG_VM_SA_VSYS_MAX_U,
+ BD72720_REG_VM_SA_VSYS_MAX_L,
+ BD72720_REG_VM_SA2_VSYS_U,
+ BD72720_REG_VM_SA2_VSYS_L,
+ BD72720_REG_VM_VBUS_U,
+#define BD72720_MASK_VDCIN_U GENMASK(3, 0)
+ BD72720_REG_VM_VBUS_L,
+ BD72720_REG_VM_BATID_U,
+ BD72720_REG_VM_BATID_L,
+ BD72720_REG_VM_BATID_NOLOAD_U,
+ BD72720_REG_VM_BATID_NOLOAD_L,
+ BD72720_REG_VM_BATID_OFS_U,
+ BD72720_REG_VM_BATID_OFS_L,
+ BD72720_REG_VM_VTH_U,
+ BD72720_REG_VM_VTH_L,
+ BD72720_REG_VM_VTH_CORR_U,
+ BD72720_REG_VM_VTH_CORR_L,
+ BD72720_REG_VM_BTMP_U,
+ BD72720_REG_VM_BTMP_L,
+ BD72720_REG_VM_BTMP_IMP_U,
+ BD72720_REG_VM_BTMP_IMP_L,
+ BD72720_REG_VM_VF_U,
+ BD72720_REG_VM_VF_L,
+ BD72720_REG_VM_BATID_TH_U,
+ BD72720_REG_VM_BATID_TH_L,
+ BD72720_REG_VM_BTMP_OV_THR,
+ BD72720_REG_VM_BTMP_OV_DUR,
+ BD72720_REG_VM_BTMP_LO_THR,
+ BD72720_REG_VM_BTMP_LO_DUR,
+ BD72720_REG_ALM_VBAT_TH_U,
+ BD72720_REG_ALM_VBAT_TH_L,
+ BD72720_REG_ALM_VSYS_TH,
+ BD72720_REG_ALM_VBUS_TH,
+ BD72720_REG_ALM_VF_TH,
+ BD72720_REG_VSYS_MAX,
+ BD72720_REG_VSYS_MIN,
+ BD72720_REG_VM_VSYS_SA_MINMAX_CTRL,
+ BD72720_REG_VM_SA_CFG, /* 0x6c + offset*/
+
+ BD72720_REG_CC_CURCD_U = BD72720_I2C4C_ADDR_OFFSET + 0x70,
+ BD72720_REG_CC_CURCD_L,
+ BD72720_REG_CC_CURCD_IMP_U,
+ BD72720_REG_CC_CURCD_IMP_L,
+ BD72720_REG_CC_SA_CURCD_U,
+ BD72720_REG_CC_SA_CURCD_L,
+ BD72720_REG_CC_OCUR_MON,
+ BD72720_REG_CC_CCNTD_3,
+ BD72720_REG_CC_CCNTD_2,
+ BD72720_REG_CC_CCNTD_1,
+ BD72720_REG_CC_CCNTD_0,
+ BD72720_REG_REX_CCNTD_3,
+ BD72720_REG_REX_CCNTD_2,
+ BD72720_REG_REX_CCNTD_1,
+ BD72720_REG_REX_CCNTD_0,
+ BD72720_REG_FULL_CCNTD_3,
+ BD72720_REG_FULL_CCNTD_2,
+ BD72720_REG_FULL_CCNTD_1,
+ BD72720_REG_FULL_CCNTD_0,
+ BD72720_REG_CCNTD_CHG_3,
+ BD72720_REG_CCNTD_CHG_2,
+ BD72720_REG_CC_STAT,
+ BD72720_REG_CC_CTRL,
+ BD72720_REG_CC_OCUR_THR_1,
+ BD72720_REG_CC_OCUR_THR_2,
+ BD72720_REG_CC_OCUR_THR_3,
+ BD72720_REG_REX_CURCD_TH,
+ BD72720_REG_CC_BATCAP1_TH_U,
+ BD72720_REG_CC_BATCAP1_TH_L,
+ BD72720_REG_CC_BATCAP2_TH_U,
+ BD72720_REG_CC_BATCAP2_TH_L,
+ BD72720_REG_CC_BATCAP3_TH_U,
+ BD72720_REG_CC_BATCAP3_TH_L,
+ BD72720_REG_CC_CCNTD_CTRL,
+ BD72720_REG_CC_SA_CFG, /* 0x92 + offset*/
+ BD72720_REG_IMPCHK_CTRL = BD72720_I2C4C_ADDR_OFFSET + 0xa0,
+};
+
+#endif /* __LINUX_MFD_BD72720_H */
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 579e8dcfcca4..0a284919a6c3 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -16,6 +16,7 @@ enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_BD71837,
ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_BD72720,
ROHM_CHIP_TYPE_BD96801,
ROHM_CHIP_TYPE_BD96802,
ROHM_CHIP_TYPE_BD96805,
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index d785e101fe79..4480c631110a 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -40,6 +40,7 @@ enum sec_device_type {
S2DOS05,
S2MPA01,
S2MPG10,
+ S2MPG11,
S2MPS11X,
S2MPS13X,
S2MPS14X,
@@ -69,7 +70,6 @@ struct sec_pmic_dev {
int device_type;
int irq;
- struct regmap_irq_chip_data *irq_data;
};
struct sec_platform_data {
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 8402a5f8e18a..6eab95de6fa8 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -166,6 +166,111 @@ enum s2mpg10_irq {
S2MPG10_IRQ_NR,
};
+enum s2mpg11_common_irq {
+ /* Top-level (common) block */
+ S2MPG11_COMMON_IRQ_PMIC,
+ S2MPG11_COMMON_IRQ_UNUSED,
+};
+
+enum s2mpg11_irq {
+ /* PMIC */
+ S2MPG11_IRQ_PWRONF,
+ S2MPG11_IRQ_PWRONR,
+ S2MPG11_IRQ_PIF_TIMEOUT_MIF,
+ S2MPG11_IRQ_PIF_TIMEOUTS,
+ S2MPG11_IRQ_WTSR,
+ S2MPG11_IRQ_SPD_ABNORMAL_STOP,
+ S2MPG11_IRQ_SPD_PARITY_ERR,
+#define S2MPG11_IRQ_PWRONF_MASK BIT(0)
+#define S2MPG11_IRQ_PWRONR_MASK BIT(1)
+#define S2MPG11_IRQ_PIF_TIMEOUT_MIF_MASK BIT(3)
+#define S2MPG11_IRQ_PIF_TIMEOUTS_MASK BIT(4)
+#define S2MPG11_IRQ_WTSR_MASK BIT(5)
+#define S2MPG11_IRQ_SPD_ABNORMAL_STOP_MASK BIT(6)
+#define S2MPG11_IRQ_SPD_PARITY_ERR_MASK BIT(7)
+
+ S2MPG11_IRQ_140C,
+ S2MPG11_IRQ_120C,
+ S2MPG11_IRQ_TSD,
+ S2MPG11_IRQ_WRST,
+ S2MPG11_IRQ_NTC_CYCLE_DONE,
+ S2MPG11_IRQ_PMETER_OVERF,
+#define S2MPG11_IRQ_INT140C_MASK BIT(0)
+#define S2MPG11_IRQ_INT120C_MASK BIT(1)
+#define S2MPG11_IRQ_TSD_MASK BIT(2)
+#define S2MPG11_IRQ_WRST_MASK BIT(5)
+#define S2MPG11_IRQ_NTC_CYCLE_DONE_MASK BIT(6)
+#define S2MPG11_IRQ_PMETER_OVERF_MASK BIT(7)
+
+ S2MPG11_IRQ_OCP_B1S,
+ S2MPG11_IRQ_OCP_B2S,
+ S2MPG11_IRQ_OCP_B3S,
+ S2MPG11_IRQ_OCP_B4S,
+ S2MPG11_IRQ_OCP_B5S,
+ S2MPG11_IRQ_OCP_B6S,
+ S2MPG11_IRQ_OCP_B7S,
+ S2MPG11_IRQ_OCP_B8S,
+#define S2MPG11_IRQ_OCP_B1S_MASK BIT(0)
+#define S2MPG11_IRQ_OCP_B2S_MASK BIT(1)
+#define S2MPG11_IRQ_OCP_B3S_MASK BIT(2)
+#define S2MPG11_IRQ_OCP_B4S_MASK BIT(3)
+#define S2MPG11_IRQ_OCP_B5S_MASK BIT(4)
+#define S2MPG11_IRQ_OCP_B6S_MASK BIT(5)
+#define S2MPG11_IRQ_OCP_B7S_MASK BIT(6)
+#define S2MPG11_IRQ_OCP_B8S_MASK BIT(7)
+
+ S2MPG11_IRQ_OCP_B9S,
+ S2MPG11_IRQ_OCP_B10S,
+ S2MPG11_IRQ_OCP_BDS,
+ S2MPG11_IRQ_OCP_BAS,
+ S2MPG11_IRQ_OCP_BBS,
+ S2MPG11_IRQ_WLWP_ACC,
+ S2MPG11_IRQ_SPD_SRP_PKT_RST,
+#define S2MPG11_IRQ_OCP_B9S_MASK BIT(0)
+#define S2MPG11_IRQ_OCP_B10S_MASK BIT(1)
+#define S2MPG11_IRQ_OCP_BDS_MASK BIT(2)
+#define S2MPG11_IRQ_OCP_BAS_MASK BIT(3)
+#define S2MPG11_IRQ_OCP_BBS_MASK BIT(4)
+#define S2MPG11_IRQ_WLWP_ACC_MASK BIT(5)
+#define S2MPG11_IRQ_SPD_SRP_PKT_RST_MASK BIT(7)
+
+ S2MPG11_IRQ_PWR_WARN_CH0,
+ S2MPG11_IRQ_PWR_WARN_CH1,
+ S2MPG11_IRQ_PWR_WARN_CH2,
+ S2MPG11_IRQ_PWR_WARN_CH3,
+ S2MPG11_IRQ_PWR_WARN_CH4,
+ S2MPG11_IRQ_PWR_WARN_CH5,
+ S2MPG11_IRQ_PWR_WARN_CH6,
+ S2MPG11_IRQ_PWR_WARN_CH7,
+#define S2MPG11_IRQ_PWR_WARN_CH0_MASK BIT(0)
+#define S2MPG11_IRQ_PWR_WARN_CH1_MASK BIT(1)
+#define S2MPG11_IRQ_PWR_WARN_CH2_MASK BIT(2)
+#define S2MPG11_IRQ_PWR_WARN_CH3_MASK BIT(3)
+#define S2MPG11_IRQ_PWR_WARN_CH4_MASK BIT(4)
+#define S2MPG11_IRQ_PWR_WARN_CH5_MASK BIT(5)
+#define S2MPG11_IRQ_PWR_WARN_CH6_MASK BIT(6)
+#define S2MPG11_IRQ_PWR_WARN_CH7_MASK BIT(7)
+
+ S2MPG11_IRQ_NTC_WARN_CH0,
+ S2MPG11_IRQ_NTC_WARN_CH1,
+ S2MPG11_IRQ_NTC_WARN_CH2,
+ S2MPG11_IRQ_NTC_WARN_CH3,
+ S2MPG11_IRQ_NTC_WARN_CH4,
+ S2MPG11_IRQ_NTC_WARN_CH5,
+ S2MPG11_IRQ_NTC_WARN_CH6,
+ S2MPG11_IRQ_NTC_WARN_CH7,
+#define S2MPG11_IRQ_NTC_WARN_CH0_MASK BIT(0)
+#define S2MPG11_IRQ_NTC_WARN_CH1_MASK BIT(1)
+#define S2MPG11_IRQ_NTC_WARN_CH2_MASK BIT(2)
+#define S2MPG11_IRQ_NTC_WARN_CH3_MASK BIT(3)
+#define S2MPG11_IRQ_NTC_WARN_CH4_MASK BIT(4)
+#define S2MPG11_IRQ_NTC_WARN_CH5_MASK BIT(5)
+#define S2MPG11_IRQ_NTC_WARN_CH6_MASK BIT(6)
+#define S2MPG11_IRQ_NTC_WARN_CH7_MASK BIT(7)
+
+ S2MPG11_IRQ_NR,
+};
+
enum s2mps11_irq {
S2MPS11_IRQ_PWRONF,
S2MPS11_IRQ_PWRONR,
diff --git a/include/linux/mfd/samsung/s2mpg10.h b/include/linux/mfd/samsung/s2mpg10.h
index 9f5919b89a3c..8e5cf21cbd5a 100644
--- a/include/linux/mfd/samsung/s2mpg10.h
+++ b/include/linux/mfd/samsung/s2mpg10.h
@@ -290,6 +290,30 @@ enum s2mpg10_pmic_reg {
S2MPG10_PMIC_LDO_SENSE4,
};
+/* Rail controlled externally, based on PCTRLSELx */
+#define S2MPG10_PMIC_CTRL_ENABLE_EXT BIT(0)
+
+/* For S2MPG10_PMIC_PCTRLSELx */
+#define S2MPG10_PCTRLSEL_PWREN 0x1 /* PWREN pin */
+#define S2MPG10_PCTRLSEL_PWREN_TRG 0x2 /* PWREN_TRG bit in MIMICKING_CTRL */
+#define S2MPG10_PCTRLSEL_PWREN_MIF 0x3 /* PWREN_MIF pin */
+#define S2MPG10_PCTRLSEL_PWREN_MIF_TRG 0x4 /* PWREN_MIF_TRG bit in MIMICKING_CTRL */
+#define S2MPG10_PCTRLSEL_AP_ACTIVE_N 0x5 /* ~AP_ACTIVE_N pin */
+#define S2MPG10_PCTRLSEL_AP_ACTIVE_N_TRG 0x6 /* ~AP_ACTIVE_N_TRG bit in MIMICKING_CTRL */
+#define S2MPG10_PCTRLSEL_CPUCL1_EN 0x7 /* CPUCL1_EN pin */
+#define S2MPG10_PCTRLSEL_CPUCL1_EN2 0x8 /* CPUCL1_EN & PWREN pins */
+#define S2MPG10_PCTRLSEL_CPUCL2_EN 0x9 /* CPUCL2_EN pin */
+#define S2MPG10_PCTRLSEL_CPUCL2_EN2 0xa /* CPUCL2_E2 & PWREN pins */
+#define S2MPG10_PCTRLSEL_TPU_EN 0xb /* TPU_EN pin */
+#define S2MPG10_PCTRLSEL_TPU_EN2 0xc /* TPU_EN & ~AP_ACTIVE_N pins */
+#define S2MPG10_PCTRLSEL_TCXO_ON 0xd /* TCXO_ON pin */
+#define S2MPG10_PCTRLSEL_TCXO_ON2 0xe /* TCXO_ON & ~AP_ACTIVE_N pins */
+
+/* For S2MPG10_PMIC_PCTRLSELx of LDO20M */
+#define S2MPG10_PCTRLSEL_LDO20M_EN2 0x1 /* VLDO20M_EN & LDO20M_SFR */
+#define S2MPG10_PCTRLSEL_LDO20M_EN 0x2 /* VLDO20M_EN pin */
+#define S2MPG10_PCTRLSEL_LDO20M_SFR 0x3 /* LDO20M_SFR bit in LDO_CTRL1 register */
+
/* Meter registers (type 0xa00) */
enum s2mpg10_meter_reg {
S2MPG10_METER_CTRL1,
@@ -407,6 +431,16 @@ enum s2mpg10_meter_reg {
/* S2MPG10 regulator IDs */
enum s2mpg10_regulators {
+ S2MPG10_BUCK1,
+ S2MPG10_BUCK2,
+ S2MPG10_BUCK3,
+ S2MPG10_BUCK4,
+ S2MPG10_BUCK5,
+ S2MPG10_BUCK6,
+ S2MPG10_BUCK7,
+ S2MPG10_BUCK8,
+ S2MPG10_BUCK9,
+ S2MPG10_BUCK10,
S2MPG10_LDO1,
S2MPG10_LDO2,
S2MPG10_LDO3,
@@ -438,16 +472,6 @@ enum s2mpg10_regulators {
S2MPG10_LDO29,
S2MPG10_LDO30,
S2MPG10_LDO31,
- S2MPG10_BUCK1,
- S2MPG10_BUCK2,
- S2MPG10_BUCK3,
- S2MPG10_BUCK4,
- S2MPG10_BUCK5,
- S2MPG10_BUCK6,
- S2MPG10_BUCK7,
- S2MPG10_BUCK8,
- S2MPG10_BUCK9,
- S2MPG10_BUCK10,
S2MPG10_REGULATOR_MAX,
};
diff --git a/include/linux/mfd/samsung/s2mpg11.h b/include/linux/mfd/samsung/s2mpg11.h
new file mode 100644
index 000000000000..66daa3bafa6e
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpg11.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015 Samsung Electronics
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_S2MPG11_H
+#define __LINUX_MFD_S2MPG11_H
+
+/* Common registers (type 0x000) */
+enum s2mpg11_common_reg {
+ S2MPG11_COMMON_CHIPID,
+ S2MPG11_COMMON_INT,
+ S2MPG11_COMMON_INT_MASK,
+ S2MPG11_COMMON_SPD_CTRL1 = 0x0a,
+ S2MPG11_COMMON_SPD_CTRL2,
+ S2MPG11_COMMON_SPD_CTRL3,
+ S2MPG11_COMMON_MON1SEL = 0x1a,
+ S2MPG11_COMMON_MON2SEL,
+ S2MPG11_COMMON_MONR,
+ S2MPG11_COMMON_DEBUG_CTRL1,
+ S2MPG11_COMMON_DEBUG_CTRL2,
+ S2MPG11_COMMON_DEBUG_CTRL3,
+ S2MPG11_COMMON_DEBUG_CTRL4,
+ S2MPG11_COMMON_DEBUG_CTRL5,
+ S2MPG11_COMMON_DEBUG_CTRL6,
+ S2MPG11_COMMON_TEST_MODE1,
+ S2MPG11_COMMON_SPD_DEBUG1,
+ S2MPG11_COMMON_SPD_DEBUG2,
+ S2MPG11_COMMON_SPD_DEBUG3,
+ S2MPG11_COMMON_SPD_DEBUG4,
+};
+
+/* For S2MPG11_COMMON_INT and S2MPG11_COMMON_INT_MASK */
+#define S2MPG11_COMMON_INT_SRC GENMASK(2, 0)
+#define S2MPG11_COMMON_INT_SRC_PMIC BIT(0)
+
+/* PMIC registers (type 0x100) */
+enum s2mpg11_pmic_reg {
+ S2MPG11_PMIC_INT1,
+ S2MPG11_PMIC_INT2,
+ S2MPG11_PMIC_INT3,
+ S2MPG11_PMIC_INT4,
+ S2MPG11_PMIC_INT5,
+ S2MPG11_PMIC_INT6,
+ S2MPG11_PMIC_INT1M,
+ S2MPG11_PMIC_INT2M,
+ S2MPG11_PMIC_INT3M,
+ S2MPG11_PMIC_INT4M,
+ S2MPG11_PMIC_INT5M,
+ S2MPG11_PMIC_INT6M,
+ S2MPG11_PMIC_STATUS1,
+ S2MPG11_PMIC_OFFSRC,
+ S2MPG11_PMIC_COMMON_CTRL1,
+ S2MPG11_PMIC_COMMON_CTRL2,
+ S2MPG11_PMIC_COMMON_CTRL3,
+ S2MPG11_PMIC_MIMICKING_CTRL,
+ S2MPG11_PMIC_B1S_CTRL,
+ S2MPG11_PMIC_B1S_OUT1,
+ S2MPG11_PMIC_B1S_OUT2,
+ S2MPG11_PMIC_B2S_CTRL,
+ S2MPG11_PMIC_B2S_OUT1,
+ S2MPG11_PMIC_B2S_OUT2,
+ S2MPG11_PMIC_B3S_CTRL,
+ S2MPG11_PMIC_B3S_OUT1,
+ S2MPG11_PMIC_B3S_OUT2,
+ S2MPG11_PMIC_B4S_CTRL,
+ S2MPG11_PMIC_B4S_OUT,
+ S2MPG11_PMIC_B5S_CTRL,
+ S2MPG11_PMIC_B5S_OUT,
+ S2MPG11_PMIC_B6S_CTRL,
+ S2MPG11_PMIC_B6S_OUT1,
+ S2MPG11_PMIC_B6S_OUT2,
+ S2MPG11_PMIC_B7S_CTRL,
+ S2MPG11_PMIC_B7S_OUT1,
+ S2MPG11_PMIC_B7S_OUT2,
+ S2MPG11_PMIC_B8S_CTRL,
+ S2MPG11_PMIC_B8S_OUT1,
+ S2MPG11_PMIC_B8S_OUT2,
+ S2MPG11_PMIC_B9S_CTRL,
+ S2MPG11_PMIC_B9S_OUT1,
+ S2MPG11_PMIC_B9S_OUT2,
+ S2MPG11_PMIC_B10S_CTRL,
+ S2MPG11_PMIC_B10S_OUT,
+ S2MPG11_PMIC_BUCKD_CTRL,
+ S2MPG11_PMIC_BUCKD_OUT,
+ S2MPG11_PMIC_BUCKA_CTRL,
+ S2MPG11_PMIC_BUCKA_OUT,
+ S2MPG11_PMIC_BB_CTRL,
+ S2MPG11_PMIC_BB_OUT1,
+ S2MPG11_PMIC_BB_OUT2,
+ S2MPG11_PMIC_BUCK1S_USONIC,
+ S2MPG11_PMIC_BUCK2S_USONIC,
+ S2MPG11_PMIC_BUCK3S_USONIC,
+ S2MPG11_PMIC_BUCK4S_USONIC,
+ S2MPG11_PMIC_BUCK5S_USONIC,
+ S2MPG11_PMIC_BUCK6S_USONIC,
+ S2MPG11_PMIC_BUCK7S_USONIC,
+ S2MPG11_PMIC_BUCK8S_USONIC,
+ S2MPG11_PMIC_BUCK9S_USONIC,
+ S2MPG11_PMIC_BUCK10S_USONIC,
+ S2MPG11_PMIC_BUCKD_USONIC,
+ S2MPG11_PMIC_BUCKA_USONIC,
+ S2MPG11_PMIC_BB_USONIC,
+ S2MPG11_PMIC_L1S_CTRL1,
+ S2MPG11_PMIC_L1S_CTRL2,
+ S2MPG11_PMIC_L2S_CTRL1,
+ S2MPG11_PMIC_L2S_CTRL2,
+ S2MPG11_PMIC_L3S_CTRL,
+ S2MPG11_PMIC_L4S_CTRL,
+ S2MPG11_PMIC_L5S_CTRL,
+ S2MPG11_PMIC_L6S_CTRL,
+ S2MPG11_PMIC_L7S_CTRL,
+ S2MPG11_PMIC_L8S_CTRL,
+ S2MPG11_PMIC_L9S_CTRL,
+ S2MPG11_PMIC_L10S_CTRL,
+ S2MPG11_PMIC_L11S_CTRL,
+ S2MPG11_PMIC_L12S_CTRL,
+ S2MPG11_PMIC_L13S_CTRL,
+ S2MPG11_PMIC_L14S_CTRL,
+ S2MPG11_PMIC_L15S_CTRL,
+ S2MPG11_PMIC_LDO_CTRL1,
+ S2MPG11_PMIC_LDO_DSCH1,
+ S2MPG11_PMIC_LDO_DSCH2,
+ S2MPG11_PMIC_DVS_RAMP1,
+ S2MPG11_PMIC_DVS_RAMP2,
+ S2MPG11_PMIC_DVS_RAMP3,
+ S2MPG11_PMIC_DVS_RAMP4,
+ S2MPG11_PMIC_DVS_RAMP5,
+ S2MPG11_PMIC_DVS_RAMP6,
+ /* Nothing @ 0x5a */
+ S2MPG11_PMIC_DVS_SYNC_CTRL1 = 0x5c,
+ S2MPG11_PMIC_DVS_SYNC_CTRL2,
+ S2MPG11_PMIC_OFF_CTRL1,
+ S2MPG11_PMIC_OFF_CTRL2,
+ S2MPG11_PMIC_OFF_CTRL3,
+ S2MPG11_PMIC_SEQ_CTRL1,
+ S2MPG11_PMIC_SEQ_CTRL2,
+ S2MPG11_PMIC_SEQ_CTRL3,
+ S2MPG11_PMIC_SEQ_CTRL4,
+ S2MPG11_PMIC_SEQ_CTRL5,
+ S2MPG11_PMIC_SEQ_CTRL6,
+ S2MPG11_PMIC_SEQ_CTRL7,
+ S2MPG11_PMIC_SEQ_CTRL8,
+ S2MPG11_PMIC_SEQ_CTRL9,
+ S2MPG11_PMIC_SEQ_CTRL10,
+ S2MPG11_PMIC_SEQ_CTRL11,
+ S2MPG11_PMIC_SEQ_CTRL12,
+ S2MPG11_PMIC_SEQ_CTRL13,
+ S2MPG11_PMIC_SEQ_CTRL14,
+ S2MPG11_PMIC_SEQ_CTRL15,
+ S2MPG11_PMIC_SEQ_CTRL16,
+ S2MPG11_PMIC_SEQ_CTRL17,
+ S2MPG11_PMIC_SEQ_CTRL18,
+ S2MPG11_PMIC_SEQ_CTRL19,
+ S2MPG11_PMIC_SEQ_CTRL20,
+ S2MPG11_PMIC_SEQ_CTRL21,
+ S2MPG11_PMIC_SEQ_CTRL22,
+ S2MPG11_PMIC_SEQ_CTRL23,
+ S2MPG11_PMIC_SEQ_CTRL24,
+ S2MPG11_PMIC_SEQ_CTRL25,
+ S2MPG11_PMIC_SEQ_CTRL26,
+ S2MPG11_PMIC_SEQ_CTRL27,
+ S2MPG11_PMIC_OFF_SEQ_CTRL1,
+ S2MPG11_PMIC_OFF_SEQ_CTRL2,
+ S2MPG11_PMIC_OFF_SEQ_CTRL3,
+ S2MPG11_PMIC_OFF_SEQ_CTRL4,
+ S2MPG11_PMIC_OFF_SEQ_CTRL5,
+ S2MPG11_PMIC_OFF_SEQ_CTRL6,
+ S2MPG11_PMIC_OFF_SEQ_CTRL7,
+ S2MPG11_PMIC_OFF_SEQ_CTRL8,
+ S2MPG11_PMIC_OFF_SEQ_CTRL9,
+ S2MPG11_PMIC_OFF_SEQ_CTRL10,
+ S2MPG11_PMIC_OFF_SEQ_CTRL11,
+ S2MPG11_PMIC_OFF_SEQ_CTRL12,
+ S2MPG11_PMIC_OFF_SEQ_CTRL13,
+ S2MPG11_PMIC_OFF_SEQ_CTRL14,
+ S2MPG11_PMIC_OFF_SEQ_CTRL15,
+ S2MPG11_PMIC_OFF_SEQ_CTRL16,
+ S2MPG11_PMIC_OFF_SEQ_CTRL17,
+ S2MPG11_PMIC_PCTRLSEL1,
+ S2MPG11_PMIC_PCTRLSEL2,
+ S2MPG11_PMIC_PCTRLSEL3,
+ S2MPG11_PMIC_PCTRLSEL4,
+ S2MPG11_PMIC_PCTRLSEL5,
+ S2MPG11_PMIC_PCTRLSEL6,
+ S2MPG11_PMIC_DCTRLSEL1,
+ S2MPG11_PMIC_DCTRLSEL2,
+ S2MPG11_PMIC_DCTRLSEL3,
+ S2MPG11_PMIC_DCTRLSEL4,
+ S2MPG11_PMIC_DCTRLSEL5,
+ S2MPG11_PMIC_GPIO_CTRL1,
+ S2MPG11_PMIC_GPIO_CTRL2,
+ S2MPG11_PMIC_GPIO_CTRL3,
+ S2MPG11_PMIC_GPIO_CTRL4,
+ S2MPG11_PMIC_GPIO_CTRL5,
+ S2MPG11_PMIC_GPIO_CTRL6,
+ S2MPG11_PMIC_GPIO_CTRL7,
+ S2MPG11_PMIC_B2S_OCP_WARN,
+ S2MPG11_PMIC_B2S_OCP_WARN_X,
+ S2MPG11_PMIC_B2S_OCP_WARN_Y,
+ S2MPG11_PMIC_B2S_OCP_WARN_Z,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN_X,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN_Y,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN_Z,
+ S2MPG11_PMIC_BUCK_OCP_EN1,
+ S2MPG11_PMIC_BUCK_OCP_EN2,
+ S2MPG11_PMIC_BUCK_OCP_PD_EN1,
+ S2MPG11_PMIC_BUCK_OCP_PD_EN2,
+ S2MPG11_PMIC_BUCK_OCP_CTRL1,
+ S2MPG11_PMIC_BUCK_OCP_CTRL2,
+ S2MPG11_PMIC_BUCK_OCP_CTRL3,
+ S2MPG11_PMIC_BUCK_OCP_CTRL4,
+ S2MPG11_PMIC_BUCK_OCP_CTRL5,
+ S2MPG11_PMIC_BUCK_OCP_CTRL6,
+ S2MPG11_PMIC_BUCK_OCP_CTRL7,
+ S2MPG11_PMIC_PIF_CTRL,
+ S2MPG11_PMIC_BUCK_HR_MODE1,
+ S2MPG11_PMIC_BUCK_HR_MODE2,
+ S2MPG11_PMIC_FAULTOUT_CTRL,
+ S2MPG11_PMIC_LDO_SENSE1,
+ S2MPG11_PMIC_LDO_SENSE2,
+};
+
+/* For S2MPG11_PMIC_PCTRLSELx */
+#define S2MPG11_PCTRLSEL_PWREN 0x1 /* PWREN pin */
+#define S2MPG11_PCTRLSEL_PWREN_TRG 0x2 /* PWREN_TRG bit in MIMICKING_CTRL */
+#define S2MPG11_PCTRLSEL_PWREN_MIF 0x3 /* PWREN_MIF pin */
+#define S2MPG11_PCTRLSEL_PWREN_MIF_TRG 0x4 /* PWREN_MIF_TRG bit in MIMICKING_CTRL */
+#define S2MPG11_PCTRLSEL_AP_ACTIVE_N 0x5 /* ~AP_ACTIVE_N pin */
+#define S2MPG11_PCTRLSEL_AP_ACTIVE_N_TRG 0x6 /* ~AP_ACTIVE_N_TRG bit in MIMICKING_CTRL */
+#define S2MPG11_PCTRLSEL_G3D_EN 0x7 /* G3D_EN pin */
+#define S2MPG11_PCTRLSEL_G3D_EN2 0x8 /* G3D_EN & ~AP_ACTIVE_N pins */
+#define S2MPG11_PCTRLSEL_AOC_VDD 0x9 /* AOC_VDD pin */
+#define S2MPG11_PCTRLSEL_AOC_RET 0xa /* AOC_RET pin */
+#define S2MPG11_PCTRLSEL_UFS_EN 0xb /* UFS_EN pin */
+#define S2MPG11_PCTRLSEL_LDO13S_EN 0xc /* VLDO13S_EN pin */
+
+/* Meter registers (type 0xa00) */
+enum s2mpg11_meter_reg {
+ S2MPG11_METER_CTRL1,
+ S2MPG11_METER_CTRL2,
+ S2MPG11_METER_CTRL3,
+ S2MPG11_METER_CTRL4,
+ S2MPG11_METER_CTRL5,
+ S2MPG11_METER_BUCKEN1,
+ S2MPG11_METER_BUCKEN2,
+ S2MPG11_METER_MUXSEL0,
+ S2MPG11_METER_MUXSEL1,
+ S2MPG11_METER_MUXSEL2,
+ S2MPG11_METER_MUXSEL3,
+ S2MPG11_METER_MUXSEL4,
+ S2MPG11_METER_MUXSEL5,
+ S2MPG11_METER_MUXSEL6,
+ S2MPG11_METER_MUXSEL7,
+ S2MPG11_METER_LPF_C0_0,
+ S2MPG11_METER_LPF_C0_1,
+ S2MPG11_METER_LPF_C0_2,
+ S2MPG11_METER_LPF_C0_3,
+ S2MPG11_METER_LPF_C0_4,
+ S2MPG11_METER_LPF_C0_5,
+ S2MPG11_METER_LPF_C0_6,
+ S2MPG11_METER_LPF_C0_7,
+ S2MPG11_METER_NTC_LPF_C0_0,
+ S2MPG11_METER_NTC_LPF_C0_1,
+ S2MPG11_METER_NTC_LPF_C0_2,
+ S2MPG11_METER_NTC_LPF_C0_3,
+ S2MPG11_METER_NTC_LPF_C0_4,
+ S2MPG11_METER_NTC_LPF_C0_5,
+ S2MPG11_METER_NTC_LPF_C0_6,
+ S2MPG11_METER_NTC_LPF_C0_7,
+ S2MPG11_METER_PWR_WARN0,
+ S2MPG11_METER_PWR_WARN1,
+ S2MPG11_METER_PWR_WARN2,
+ S2MPG11_METER_PWR_WARN3,
+ S2MPG11_METER_PWR_WARN4,
+ S2MPG11_METER_PWR_WARN5,
+ S2MPG11_METER_PWR_WARN6,
+ S2MPG11_METER_PWR_WARN7,
+ S2MPG11_METER_NTC_L_WARN0,
+ S2MPG11_METER_NTC_L_WARN1,
+ S2MPG11_METER_NTC_L_WARN2,
+ S2MPG11_METER_NTC_L_WARN3,
+ S2MPG11_METER_NTC_L_WARN4,
+ S2MPG11_METER_NTC_L_WARN5,
+ S2MPG11_METER_NTC_L_WARN6,
+ S2MPG11_METER_NTC_L_WARN7,
+ S2MPG11_METER_NTC_H_WARN0,
+ S2MPG11_METER_NTC_H_WARN1,
+ S2MPG11_METER_NTC_H_WARN2,
+ S2MPG11_METER_NTC_H_WARN3,
+ S2MPG11_METER_NTC_H_WARN4,
+ S2MPG11_METER_NTC_H_WARN5,
+ S2MPG11_METER_NTC_H_WARN6,
+ S2MPG11_METER_NTC_H_WARN7,
+ S2MPG11_METER_PWR_HYS1,
+ S2MPG11_METER_PWR_HYS2,
+ S2MPG11_METER_PWR_HYS3,
+ S2MPG11_METER_PWR_HYS4,
+ S2MPG11_METER_NTC_HYS1,
+ S2MPG11_METER_NTC_HYS2,
+ S2MPG11_METER_NTC_HYS3,
+ S2MPG11_METER_NTC_HYS4,
+ /* Nothing @ 0x3f */
+ S2MPG11_METER_ACC_DATA_CH0_1 = 0x40,
+ S2MPG11_METER_ACC_DATA_CH0_2,
+ S2MPG11_METER_ACC_DATA_CH0_3,
+ S2MPG11_METER_ACC_DATA_CH0_4,
+ S2MPG11_METER_ACC_DATA_CH0_5,
+ S2MPG11_METER_ACC_DATA_CH0_6,
+ S2MPG11_METER_ACC_DATA_CH1_1,
+ S2MPG11_METER_ACC_DATA_CH1_2,
+ S2MPG11_METER_ACC_DATA_CH1_3,
+ S2MPG11_METER_ACC_DATA_CH1_4,
+ S2MPG11_METER_ACC_DATA_CH1_5,
+ S2MPG11_METER_ACC_DATA_CH1_6,
+ S2MPG11_METER_ACC_DATA_CH2_1,
+ S2MPG11_METER_ACC_DATA_CH2_2,
+ S2MPG11_METER_ACC_DATA_CH2_3,
+ S2MPG11_METER_ACC_DATA_CH2_4,
+ S2MPG11_METER_ACC_DATA_CH2_5,
+ S2MPG11_METER_ACC_DATA_CH2_6,
+ S2MPG11_METER_ACC_DATA_CH3_1,
+ S2MPG11_METER_ACC_DATA_CH3_2,
+ S2MPG11_METER_ACC_DATA_CH3_3,
+ S2MPG11_METER_ACC_DATA_CH3_4,
+ S2MPG11_METER_ACC_DATA_CH3_5,
+ S2MPG11_METER_ACC_DATA_CH3_6,
+ S2MPG11_METER_ACC_DATA_CH4_1,
+ S2MPG11_METER_ACC_DATA_CH4_2,
+ S2MPG11_METER_ACC_DATA_CH4_3,
+ S2MPG11_METER_ACC_DATA_CH4_4,
+ S2MPG11_METER_ACC_DATA_CH4_5,
+ S2MPG11_METER_ACC_DATA_CH4_6,
+ S2MPG11_METER_ACC_DATA_CH5_1,
+ S2MPG11_METER_ACC_DATA_CH5_2,
+ S2MPG11_METER_ACC_DATA_CH5_3,
+ S2MPG11_METER_ACC_DATA_CH5_4,
+ S2MPG11_METER_ACC_DATA_CH5_5,
+ S2MPG11_METER_ACC_DATA_CH5_6,
+ S2MPG11_METER_ACC_DATA_CH6_1,
+ S2MPG11_METER_ACC_DATA_CH6_2,
+ S2MPG11_METER_ACC_DATA_CH6_3,
+ S2MPG11_METER_ACC_DATA_CH6_4,
+ S2MPG11_METER_ACC_DATA_CH6_5,
+ S2MPG11_METER_ACC_DATA_CH6_6,
+ S2MPG11_METER_ACC_DATA_CH7_1,
+ S2MPG11_METER_ACC_DATA_CH7_2,
+ S2MPG11_METER_ACC_DATA_CH7_3,
+ S2MPG11_METER_ACC_DATA_CH7_4,
+ S2MPG11_METER_ACC_DATA_CH7_5,
+ S2MPG11_METER_ACC_DATA_CH7_6,
+ S2MPG11_METER_ACC_COUNT_1,
+ S2MPG11_METER_ACC_COUNT_2,
+ S2MPG11_METER_ACC_COUNT_3,
+ S2MPG11_METER_LPF_DATA_CH0_1,
+ S2MPG11_METER_LPF_DATA_CH0_2,
+ S2MPG11_METER_LPF_DATA_CH0_3,
+ S2MPG11_METER_LPF_DATA_CH1_1,
+ S2MPG11_METER_LPF_DATA_CH1_2,
+ S2MPG11_METER_LPF_DATA_CH1_3,
+ S2MPG11_METER_LPF_DATA_CH2_1,
+ S2MPG11_METER_LPF_DATA_CH2_2,
+ S2MPG11_METER_LPF_DATA_CH2_3,
+ S2MPG11_METER_LPF_DATA_CH3_1,
+ S2MPG11_METER_LPF_DATA_CH3_2,
+ S2MPG11_METER_LPF_DATA_CH3_3,
+ S2MPG11_METER_LPF_DATA_CH4_1,
+ S2MPG11_METER_LPF_DATA_CH4_2,
+ S2MPG11_METER_LPF_DATA_CH4_3,
+ S2MPG11_METER_LPF_DATA_CH5_1,
+ S2MPG11_METER_LPF_DATA_CH5_2,
+ S2MPG11_METER_LPF_DATA_CH5_3,
+ S2MPG11_METER_LPF_DATA_CH6_1,
+ S2MPG11_METER_LPF_DATA_CH6_2,
+ S2MPG11_METER_LPF_DATA_CH6_3,
+ S2MPG11_METER_LPF_DATA_CH7_1,
+ S2MPG11_METER_LPF_DATA_CH7_2,
+ S2MPG11_METER_LPF_DATA_CH7_3,
+ /* Nothing @ 0x8b 0x8c */
+ S2MPG11_METER_LPF_DATA_NTC0_1 = 0x8d,
+ S2MPG11_METER_LPF_DATA_NTC0_2,
+ S2MPG11_METER_LPF_DATA_NTC1_1,
+ S2MPG11_METER_LPF_DATA_NTC1_2,
+ S2MPG11_METER_LPF_DATA_NTC2_1,
+ S2MPG11_METER_LPF_DATA_NTC2_2,
+ S2MPG11_METER_LPF_DATA_NTC3_1,
+ S2MPG11_METER_LPF_DATA_NTC3_2,
+ S2MPG11_METER_LPF_DATA_NTC4_1,
+ S2MPG11_METER_LPF_DATA_NTC4_2,
+ S2MPG11_METER_LPF_DATA_NTC5_1,
+ S2MPG11_METER_LPF_DATA_NTC5_2,
+ S2MPG11_METER_LPF_DATA_NTC6_1,
+ S2MPG11_METER_LPF_DATA_NTC6_2,
+ S2MPG11_METER_LPF_DATA_NTC7_1,
+ S2MPG11_METER_LPF_DATA_NTC7_2,
+};
+
+/* S2MPG11 regulator IDs */
+enum s2mpg11_regulators {
+ S2MPG11_BUCKBOOST,
+ S2MPG11_BUCK1,
+ S2MPG11_BUCK2,
+ S2MPG11_BUCK3,
+ S2MPG11_BUCK4,
+ S2MPG11_BUCK5,
+ S2MPG11_BUCK6,
+ S2MPG11_BUCK7,
+ S2MPG11_BUCK8,
+ S2MPG11_BUCK9,
+ S2MPG11_BUCK10,
+ S2MPG11_BUCKD,
+ S2MPG11_BUCKA,
+ S2MPG11_LDO1,
+ S2MPG11_LDO2,
+ S2MPG11_LDO3,
+ S2MPG11_LDO4,
+ S2MPG11_LDO5,
+ S2MPG11_LDO6,
+ S2MPG11_LDO7,
+ S2MPG11_LDO8,
+ S2MPG11_LDO9,
+ S2MPG11_LDO10,
+ S2MPG11_LDO11,
+ S2MPG11_LDO12,
+ S2MPG11_LDO13,
+ S2MPG11_LDO14,
+ S2MPG11_LDO15,
+ S2MPG11_REGULATOR_MAX,
+};
+
+#endif /* __LINUX_MFD_S2MPG11_H */
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 5f70d3b5d1b1..097ef4dfcdac 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -667,7 +667,7 @@ static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq,
return -ENODEV;
return request_threaded_irq(irq + wm8350->irq_base, NULL,
- handler, flags, name, data);
+ handler, flags | IRQF_ONESHOT, name, data);
}
static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index d7f46a8fbfa1..b37fe39cef27 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -962,6 +962,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
}
+static inline u8 get_cqe_lro_num_seg(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->srqn) >> 24;
+}
+
#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1c54aa6f74fb..e2d067b1e67b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1149,6 +1149,7 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+int mlx5_lag_query_bond_speed(struct mlx5_core_dev *dev, u32 *speed);
bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index e9dcd4bf355d..775cb0c56865 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1071,7 +1071,9 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 esw_shared_ingress_acl[0x1];
u8 esw_uplink_ingress_acl[0x1];
u8 root_ft_on_other_esw[0x1];
- u8 reserved_at_a[0xf];
+ u8 reserved_at_a[0x1];
+ u8 esw_vport_state_max_tx_speed[0x1];
+ u8 reserved_at_c[0xd];
u8 esw_functions_changed[0x1];
u8 reserved_at_1a[0x1];
u8 ecpf_vport_exists[0x1];
@@ -5445,7 +5447,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 reserved_at_40[0x20];
- u8 reserved_at_60[0x18];
+ u8 max_tx_speed[0x10];
+ u8 reserved_at_70[0x8];
u8 admin_state[0x4];
u8 state[0x4];
};
@@ -7778,7 +7781,7 @@ struct mlx5_ifc_modify_vport_state_in_bits {
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_at_60[0x10];
+ u8 max_tx_speed[0x10];
u8 ingress_connect[0x1];
u8 egress_connect[0x1];
u8 ingress_connect_valid[0x1];
@@ -11006,7 +11009,9 @@ struct mlx5_ifc_qcam_access_reg_cap_mask {
};
struct mlx5_ifc_qcam_qos_feature_cap_mask {
- u8 qcam_qos_feature_cap_mask_127_to_1[0x7F];
+ u8 qcam_qos_feature_cap_mask_127_to_5[0x7B];
+ u8 qetcr_qshr_max_bw_val_msb[0x1];
+ u8 qcam_qos_feature_cap_mask_3_to_1[0x3];
u8 qpts_trust_both[0x1];
};
@@ -11962,8 +11967,7 @@ struct mlx5_ifc_ets_tcn_config_reg_bits {
u8 reserved_at_20[0xc];
u8 max_bw_units[0x4];
- u8 reserved_at_30[0x8];
- u8 max_bw_value[0x8];
+ u8 max_bw_value[0x10];
};
struct mlx5_ifc_ets_global_config_reg_bits {
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 1df9d9a57bbc..12d366b12e2e 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -112,7 +112,7 @@ enum mlx5e_ext_link_mode {
MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
- MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
+ MLX5E_1600GAUI_8_1600GBASE_CR8_KR8 = 23,
MLX5E_EXT_LINK_MODES_NUMBER,
};
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index f876bfc0669c..dfa2fe32217a 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -41,6 +41,8 @@
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
+#define MLX5_MAX_TX_SPEED_UNIT 100
+
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
@@ -58,6 +60,10 @@ enum {
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state);
+int mlx5_query_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 op_mod,
+ u16 vport, u8 other_vport, u32 *max_tx_speed);
+int mlx5_modify_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 other_vport, u16 max_tx_speed);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, bool other, u8 *addr);
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f0d5be9dc736..dc1ad71a2a70 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -36,6 +36,7 @@
#include <linux/rcuwait.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/iommu-debug-pagealloc.h>
struct mempolicy;
struct anon_vma;
@@ -45,6 +46,7 @@ struct pt_regs;
struct folio_batch;
void arch_mm_preinit(void);
+void mm_core_init_early(void);
void mm_core_init(void);
void init_mm_internals(void);
@@ -359,7 +361,7 @@ enum {
DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
-#if defined(CONFIG_X86_USER_SHADOW_STACK)
+#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_RISCV_USER_CFI)
/*
* VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
* support core mm.
@@ -460,7 +462,8 @@ enum {
#define VM_PKEY_BIT4 VM_NONE
#endif /* CONFIG_ARCH_PKEY_BITS > 4 */
#endif /* CONFIG_ARCH_HAS_PKEYS */
-#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
+#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
+ defined(CONFIG_RISCV_USER_CFI)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
#else
#define VM_SHADOW_STACK VM_NONE
@@ -1007,10 +1010,7 @@ static inline void vma_flag_set_atomic(struct vm_area_struct *vma,
{
unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
- /* mmap read lock/VMA read lock must be held. */
- if (!rwsem_is_locked(&vma->vm_mm->mmap_lock))
- vma_assert_locked(vma);
-
+ vma_assert_stabilised(vma);
if (__vma_flag_atomic_valid(vma, bit))
set_bit((__force int)bit, bitmap);
}
@@ -2905,6 +2905,13 @@ static inline unsigned long get_mm_rss(struct mm_struct *mm)
get_mm_counter(mm, MM_SHMEMPAGES);
}
+static inline unsigned long get_mm_rss_sum(struct mm_struct *mm)
+{
+ return get_mm_counter_sum(mm, MM_FILEPAGES) +
+ get_mm_counter_sum(mm, MM_ANONPAGES) +
+ get_mm_counter_sum(mm, MM_SHMEMPAGES);
+}
+
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
return max(mm->hiwater_rss, get_mm_rss(mm));
@@ -2979,15 +2986,8 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl);
-static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl)
-{
- pte_t *ptep;
- __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
- return ptep;
-}
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl);
#ifdef __PAGETABLE_P4D_FOLDED
static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
@@ -3341,31 +3341,15 @@ static inline bool pagetable_pte_ctor(struct mm_struct *mm,
return true;
}
-pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
-static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
- pmd_t *pmdvalp)
-{
- pte_t *pte;
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
- __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
- return pte;
-}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
}
-pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp);
-static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp)
-{
- pte_t *pte;
-
- __cond_lock(RCU, __cond_lock(*ptlp,
- pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
- return pte;
-}
+pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
@@ -3540,7 +3524,7 @@ static inline unsigned long get_num_physpages(void)
}
/*
- * Using memblock node mappings, an architecture may initialise its
+ * FIXME: Using memblock node mappings, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in an
* architecture independent manner.
*
@@ -3555,7 +3539,7 @@ static inline unsigned long get_num_physpages(void)
* memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
-void free_area_init(unsigned long *max_zone_pfn);
+void arch_zone_limits_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
@@ -4137,12 +4121,16 @@ extern void __kernel_map_pages(struct page *page, int numpages, int enable);
#ifdef CONFIG_DEBUG_PAGEALLOC
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
+ iommu_debug_check_unmapped(page, numpages);
+
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 1);
}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
{
+ iommu_debug_check_unmapped(page, numpages);
+
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
@@ -4198,6 +4186,61 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
+#ifndef clear_pages
+/**
+ * clear_pages() - clear a page range for kernel-internal use.
+ * @addr: start address
+ * @npages: number of pages
+ *
+ * Use clear_user_pages() instead when clearing a page range to be
+ * mapped to user space.
+ *
+ * Does absolutely no exception handling.
+ *
+ * Note that even though the clearing operation is preemptible, clear_pages()
+ * does not (and on architectures where it reduces to a few long-running
+ * instructions, might not be able to) call cond_resched() to check if
+ * rescheduling is required.
+ *
+ * When running under preemptible models this is not a problem. Under
+ * cooperatively scheduled models, however, the caller is expected to
+ * limit @npages to no more than PROCESS_PAGES_NON_PREEMPT_BATCH.
+ */
+static inline void clear_pages(void *addr, unsigned int npages)
+{
+ do {
+ clear_page(addr);
+ addr += PAGE_SIZE;
+ } while (--npages);
+}
+#endif
+
+#ifndef PROCESS_PAGES_NON_PREEMPT_BATCH
+#ifdef clear_pages
+/*
+ * The architecture defines clear_pages(), and we assume that it is
+ * generally "fast". So choose a batch size large enough to allow the processor
+ * headroom for optimizing the operation and yet small enough that we see
+ * reasonable preemption latency for when this optimization is not possible
+ * (ex. slow microarchitectures, memory bandwidth saturation.)
+ *
+ * With a value of 32MB and assuming a memory bandwidth of ~10GBps, this should
+ * result in worst case preemption latency of around 3ms when clearing pages.
+ *
+ * (See comment above clear_pages() for why preemption latency is a concern
+ * here.)
+ */
+#define PROCESS_PAGES_NON_PREEMPT_BATCH (SZ_32M >> PAGE_SHIFT)
+#else /* !clear_pages */
+/*
+ * The architecture does not provide a clear_pages() implementation. Assume
+ * that clear_page() -- which clear_pages() will fallback to -- is relatively
+ * slow and choose a small value for PROCESS_PAGES_NON_PREEMPT_BATCH.
+ */
+#define PROCESS_PAGES_NON_PREEMPT_BATCH 1
+#endif
+#endif
+
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
extern int in_gate_area_no_mm(unsigned long addr);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 78950eb8926d..8731606d8d36 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -752,8 +752,18 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
}
#endif
-#define VMA_LOCK_OFFSET 0x40000000
-#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
+/*
+ * While __vma_enter_locked() is working to ensure are no read-locks held on a
+ * VMA (either while acquiring a VMA write lock or marking a VMA detached) we
+ * set the VM_REFCNT_EXCLUDE_READERS_FLAG in vma->vm_refcnt to indiciate to
+ * vma_start_read() that the reference count should be left alone.
+ *
+ * See the comment describing vm_refcnt in vm_area_struct for details as to
+ * which values the VMA reference count can be.
+ */
+#define VM_REFCNT_EXCLUDE_READERS_BIT (30)
+#define VM_REFCNT_EXCLUDE_READERS_FLAG (1U << VM_REFCNT_EXCLUDE_READERS_BIT)
+#define VM_REFCNT_LIMIT (VM_REFCNT_EXCLUDE_READERS_FLAG - 1)
struct vma_numab_state {
/*
@@ -935,10 +945,10 @@ struct vm_area_struct {
/*
* Can only be written (using WRITE_ONCE()) while holding both:
* - mmap_lock (in write mode)
- * - vm_refcnt bit at VMA_LOCK_OFFSET is set
+ * - vm_refcnt bit at VM_REFCNT_EXCLUDE_READERS_FLAG is set
* Can be read reliably while holding one of:
* - mmap_lock (in read or write mode)
- * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
+ * - vm_refcnt bit at VM_REFCNT_EXCLUDE_READERS_BIT is set or vm_refcnt > 1
* Can be read unreliably (using READ_ONCE()) for pessimistic bailout
* while holding nothing (except RCU to keep the VMA struct allocated).
*
@@ -980,7 +990,44 @@ struct vm_area_struct {
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
#ifdef CONFIG_PER_VMA_LOCK
- /* Unstable RCU readers are allowed to read this. */
+ /*
+ * Used to keep track of firstly, whether the VMA is attached, secondly,
+ * if attached, how many read locks are taken, and thirdly, if the
+ * VM_REFCNT_EXCLUDE_READERS_FLAG is set, whether any read locks held
+ * are currently in the process of being excluded.
+ *
+ * This value can be equal to:
+ *
+ * 0 - Detached. IMPORTANT: when the refcnt is zero, readers cannot
+ * increment it.
+ *
+ * 1 - Attached and either unlocked or write-locked. Write locks are
+ * identified via __is_vma_write_locked() which checks for equality of
+ * vma->vm_lock_seq and mm->mm_lock_seq.
+ *
+ * >1, < VM_REFCNT_EXCLUDE_READERS_FLAG - Read-locked or (unlikely)
+ * write-locked with other threads having temporarily incremented the
+ * reference count prior to determining it is write-locked and
+ * decrementing it again.
+ *
+ * VM_REFCNT_EXCLUDE_READERS_FLAG - Detached, pending
+ * __vma_end_exclude_readers() completion which will decrement the
+ * reference count to zero. IMPORTANT - at this stage no further readers
+ * can increment the reference count. It can only be reduced.
+ *
+ * VM_REFCNT_EXCLUDE_READERS_FLAG + 1 - A thread is either write-locking
+ * an attached VMA and has yet to invoke __vma_end_exclude_readers(),
+ * OR a thread is detaching a VMA and is waiting on a single spurious
+ * reader in order to decrement the reference count. IMPORTANT - as
+ * above, no further readers can increment the reference count.
+ *
+ * > VM_REFCNT_EXCLUDE_READERS_FLAG + 1 - A thread is either
+ * write-locking or detaching a VMA is waiting on readers to
+ * exit. IMPORTANT - as above, no further readers can increment the
+ * reference count.
+ *
+ * NOTE: Unstable RCU readers are allowed to read this.
+ */
refcount_t vm_refcnt ____cacheline_aligned_in_smp;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map vmlock_dep_map;
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index a82aa80c0ba4..11bf319d78ec 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -88,4 +88,9 @@ struct tlbflush_unmap_batch {
#endif
};
+struct lazy_mmu_state {
+ u8 enable_count;
+ u8 pause_count;
+};
+
#endif /* _LINUX_MM_TYPES_TASK_H */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index d53f72dba7fe..93eca48bc443 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -78,6 +78,43 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
#ifdef CONFIG_PER_VMA_LOCK
+#ifdef CONFIG_LOCKDEP
+#define __vma_lockdep_map(vma) (&vma->vmlock_dep_map)
+#else
+#define __vma_lockdep_map(vma) NULL
+#endif
+
+/*
+ * VMA locks do not behave like most ordinary locks found in the kernel, so we
+ * cannot quite have full lockdep tracking in the way we would ideally prefer.
+ *
+ * Read locks act as shared locks which exclude an exclusive lock being
+ * taken. We therefore mark these accordingly on read lock acquire/release.
+ *
+ * Write locks are acquired exclusively per-VMA, but released in a shared
+ * fashion, that is upon vma_end_write_all(), we update the mmap's seqcount such
+ * that write lock is released.
+ *
+ * We therefore cannot track write locks per-VMA, nor do we try. Mitigating this
+ * is the fact that, of course, we do lockdep-track the mmap lock rwsem which
+ * must be held when taking a VMA write lock.
+ *
+ * We do, however, want to indicate that during either acquisition of a VMA
+ * write lock or detachment of a VMA that we require the lock held be exclusive,
+ * so we utilise lockdep to do so.
+ */
+#define __vma_lockdep_acquire_read(vma) \
+ lock_acquire_shared(__vma_lockdep_map(vma), 0, 1, NULL, _RET_IP_)
+#define __vma_lockdep_release_read(vma) \
+ lock_release(__vma_lockdep_map(vma), _RET_IP_)
+#define __vma_lockdep_acquire_exclusive(vma) \
+ lock_acquire_exclusive(__vma_lockdep_map(vma), 0, 0, NULL, _RET_IP_)
+#define __vma_lockdep_release_exclusive(vma) \
+ lock_release(__vma_lockdep_map(vma), _RET_IP_)
+/* Only meaningful if CONFIG_LOCK_STAT is defined. */
+#define __vma_lockdep_stat_mark_acquired(vma) \
+ lock_acquired(__vma_lockdep_map(vma), _RET_IP_)
+
static inline void mm_lock_seqcount_init(struct mm_struct *mm)
{
seqcount_init(&mm->mm_lock_seq);
@@ -115,36 +152,81 @@ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_key;
- lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
+ lockdep_init_map(__vma_lockdep_map(vma), "vm_lock", &lockdep_key, 0);
#endif
if (reset_refcnt)
refcount_set(&vma->vm_refcnt, 0);
vma->vm_lock_seq = UINT_MAX;
}
-static inline bool is_vma_writer_only(int refcnt)
+/*
+ * This function determines whether the input VMA reference count describes a
+ * VMA which has excluded all VMA read locks.
+ *
+ * In the case of a detached VMA, we may incorrectly indicate that readers are
+ * excluded when one remains, because in that scenario we target a refcount of
+ * VM_REFCNT_EXCLUDE_READERS_FLAG, rather than the attached target of
+ * VM_REFCNT_EXCLUDE_READERS_FLAG + 1.
+ *
+ * However, the race window for that is very small so it is unlikely.
+ *
+ * Returns: true if readers are excluded, false otherwise.
+ */
+static inline bool __vma_are_readers_excluded(int refcnt)
{
/*
- * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
- * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
- * a detached vma happens only in vma_mark_detached() and is a rare
- * case, therefore most of the time there will be no unnecessary wakeup.
+ * See the comment describing the vm_area_struct->vm_refcnt field for
+ * details of possible refcnt values.
*/
- return (refcnt & VMA_LOCK_OFFSET) && refcnt <= VMA_LOCK_OFFSET + 1;
+ return (refcnt & VM_REFCNT_EXCLUDE_READERS_FLAG) &&
+ refcnt <= VM_REFCNT_EXCLUDE_READERS_FLAG + 1;
+}
+
+/*
+ * Actually decrement the VMA reference count.
+ *
+ * The function returns the reference count as it was immediately after the
+ * decrement took place. If it returns zero, the VMA is now detached.
+ */
+static inline __must_check unsigned int
+__vma_refcount_put_return(struct vm_area_struct *vma)
+{
+ int oldcnt;
+
+ if (__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt))
+ return 0;
+
+ return oldcnt - 1;
}
+/**
+ * vma_refcount_put() - Drop reference count in VMA vm_refcnt field due to a
+ * read-lock being dropped.
+ * @vma: The VMA whose reference count we wish to decrement.
+ *
+ * If we were the last reader, wake up threads waiting to obtain an exclusive
+ * lock.
+ */
static inline void vma_refcount_put(struct vm_area_struct *vma)
{
- /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
+ /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt. */
struct mm_struct *mm = vma->vm_mm;
- int oldcnt;
+ int newcnt;
- rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
- if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
+ __vma_lockdep_release_read(vma);
+ newcnt = __vma_refcount_put_return(vma);
- if (is_vma_writer_only(oldcnt - 1))
- rcuwait_wake_up(&mm->vma_writer_wait);
- }
+ /*
+ * __vma_start_exclude_readers() may be sleeping waiting for readers to
+ * drop their reference count, so wake it up if we were the last reader
+ * blocking it from being acquired.
+ *
+ * We may be raced by other readers temporarily incrementing the
+ * reference count, though the race window is very small, this might
+ * cause spurious wakeups.
+ */
+ if (newcnt && __vma_are_readers_excluded(newcnt))
+ rcuwait_wake_up(&mm->vma_writer_wait);
}
/*
@@ -159,10 +241,10 @@ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int
mmap_assert_locked(vma->vm_mm);
if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
- VMA_REF_LIMIT)))
+ VM_REFCNT_LIMIT)))
return false;
- rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+ __vma_lockdep_acquire_read(vma);
return true;
}
@@ -182,21 +264,31 @@ static inline void vma_end_read(struct vm_area_struct *vma)
vma_refcount_put(vma);
}
-/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
{
+ const struct mm_struct *mm = vma->vm_mm;
+
+ /* We must hold an exclusive write lock for this access to be valid. */
mmap_assert_write_locked(vma->vm_mm);
+ return mm->mm_lock_seq.sequence;
+}
+/*
+ * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
+ * write lock is held.
+ *
+ * Returns true if write-locked, otherwise false.
+ */
+static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
+{
/*
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
- return (vma->vm_lock_seq == *mm_lock_seq);
+ return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
}
-int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
- int state);
+int __vma_start_write(struct vm_area_struct *vma, int state);
/*
* Begin writing to a VMA.
@@ -205,12 +297,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
*/
static inline void vma_start_write(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
+ if (__is_vma_write_locked(vma))
return;
- __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
+ __vma_start_write(vma, TASK_UNINTERRUPTIBLE);
}
/**
@@ -229,26 +319,110 @@ static inline void vma_start_write(struct vm_area_struct *vma)
static inline __must_check
int vma_start_write_killable(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
+ if (__is_vma_write_locked(vma))
return 0;
- return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
+
+ return __vma_start_write(vma, TASK_KILLABLE);
}
+/**
+ * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
}
+/**
+ * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
+ * lock and is not detached.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
+ unsigned int refcnt;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ if (!lock_is_held(__vma_lockdep_map(vma)))
+ vma_assert_write_locked(vma);
+ return;
+ }
+
+ /*
+ * See the comment describing the vm_area_struct->vm_refcnt field for
+ * details of possible refcnt values.
+ */
+ refcnt = refcount_read(&vma->vm_refcnt);
+
+ /*
+ * In this case we're either read-locked, write-locked with temporary
+ * readers, or in the midst of excluding readers, all of which means
+ * we're locked.
+ */
+ if (refcnt > 1)
+ return;
+
+ /* It is a bug for the VMA to be detached here. */
+ VM_WARN_ON_ONCE_VMA(!refcnt, vma);
+
+ /*
+ * OK, the VMA has a reference count of 1 which means it is either
+ * unlocked and attached or write-locked, so assert that it is
+ * write-locked.
+ */
+ vma_assert_write_locked(vma);
+}
+
+/**
+ * vma_assert_stabilised() - assert that this VMA cannot be changed from
+ * underneath us either by having a VMA or mmap lock held.
+ * @vma: The VMA whose stability we wish to assess.
+ *
+ * If lockdep is enabled we can precisely ensure stability via either an mmap
+ * lock owned by us or a specific VMA lock.
+ *
+ * With lockdep disabled we may sometimes race with other threads acquiring the
+ * mmap read lock simultaneous with our VMA read lock.
+ */
+static inline void vma_assert_stabilised(struct vm_area_struct *vma)
+{
+ /*
+ * If another thread owns an mmap lock, it may go away at any time, and
+ * thus is no guarantee of stability.
+ *
+ * If lockdep is enabled we can accurately determine if an mmap lock is
+ * held and owned by us. Otherwise we must approximate.
+ *
+ * It doesn't necessarily mean we are not stabilised however, as we may
+ * hold a VMA read lock (not a write lock as this would require an owned
+ * mmap lock).
+ *
+ * If (assuming lockdep is not enabled) we were to assert a VMA read
+ * lock first we may also run into issues, as other threads can hold VMA
+ * read locks simlutaneous to us.
+ *
+ * Therefore if lockdep is not enabled we risk a false negative (i.e. no
+ * assert fired). If accurate checking is required, enable lockdep.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ if (lockdep_is_held(&vma->vm_mm->mmap_lock))
+ return;
+ } else {
+ if (rwsem_is_locked(&vma->vm_mm->mmap_lock))
+ return;
+ }
- VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
- !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ /*
+ * We're not stabilised by the mmap lock, so assert that we're
+ * stabilised by a VMA lock.
+ */
+ vma_assert_locked(vma);
+}
+
+static inline bool vma_is_attached(struct vm_area_struct *vma)
+{
+ return refcount_read(&vma->vm_refcnt);
}
/*
@@ -258,12 +432,12 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
*/
static inline void vma_assert_attached(struct vm_area_struct *vma)
{
- WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
+ WARN_ON_ONCE(!vma_is_attached(vma));
}
static inline void vma_assert_detached(struct vm_area_struct *vma)
{
- WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
+ WARN_ON_ONCE(vma_is_attached(vma));
}
static inline void vma_mark_attached(struct vm_area_struct *vma)
@@ -273,7 +447,28 @@ static inline void vma_mark_attached(struct vm_area_struct *vma)
refcount_set_release(&vma->vm_refcnt, 1);
}
-void vma_mark_detached(struct vm_area_struct *vma);
+void __vma_exclude_readers_for_detach(struct vm_area_struct *vma);
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_attached(vma);
+
+ /*
+ * The VMA still being attached (refcnt > 0) - is unlikely, because the
+ * vma has been already write-locked and readers can increment vm_refcnt
+ * only temporarily before they check vm_lock_seq, realize the vma is
+ * locked and drop back the vm_refcnt. That is a narrow window for
+ * observing a raised vm_refcnt.
+ *
+ * See the comment describing the vm_area_struct->vm_refcnt field for
+ * details of possible refcnt values.
+ */
+ if (likely(!__vma_refcount_put_return(vma)))
+ return;
+
+ __vma_exclude_readers_for_detach(vma);
+}
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
@@ -327,6 +522,12 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
mmap_assert_locked(vma->vm_mm);
}
+static inline void vma_assert_stabilised(struct vm_area_struct *vma)
+{
+ /* If no VMA locks, then either mmap lock suffices to stabilise. */
+ mmap_assert_locked(vma->vm_mm);
+}
+
#endif /* CONFIG_PER_VMA_LOCK */
static inline void mmap_write_lock(struct mm_struct *mm)
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index fed1f5f4a8d3..4534bf462aac 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -78,6 +78,7 @@ struct sdio_driver {
int (*probe)(struct sdio_func *, const struct sdio_device_id *);
void (*remove)(struct sdio_func *);
+ void (*shutdown)(struct sdio_func *);
struct device_driver drv;
};
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 14a45979cccc..ab60ffba08f5 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -47,6 +47,15 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
BUG(); \
} \
} while (0)
+#define VM_WARN_ON_PAGE(cond, page) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(page, "VM_WARN_ON_PAGE(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
@@ -122,6 +131,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index ac01dc4eb2ce..ed3dd0f3fe19 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -24,7 +24,7 @@ static inline void leave_mm(void) { }
#ifndef task_cpu_possible_mask
# define task_cpu_possible_mask(p) cpu_possible_mask
# define task_cpu_possible(cpu, p) true
-# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
+# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_DOMAIN)
#else
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fc5d6c88d2f0..3e51190a55e4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1534,14 +1534,27 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
-void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
- enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
+
+enum kswapd_clear_hopeless_reason {
+ KSWAPD_CLEAR_HOPELESS_OTHER = 0,
+ KSWAPD_CLEAR_HOPELESS_KSWAPD,
+ KSWAPD_CLEAR_HOPELESS_DIRECT,
+ KSWAPD_CLEAR_HOPELESS_PCP,
+};
+
+void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
+ enum zone_type highest_zoneidx);
+void kswapd_try_clear_hopeless(struct pglist_data *pgdat,
+ unsigned int order, int highest_zoneidx);
+void kswapd_clear_hopeless(pg_data_t *pgdat, enum kswapd_clear_hopeless_reason reason);
+bool kswapd_test_hopeless(pg_data_t *pgdat);
+
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
@@ -2286,9 +2299,7 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#define pfn_to_nid(pfn) (0)
#endif
-void sparse_init(void);
#else
-#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 24eb5a88a5c5..5b1725fe9707 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -609,7 +609,6 @@ struct platform_device_id {
kernel_ulong_t driver_data;
};
-#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
diff --git a/include/linux/module.h b/include/linux/module.h
index d80c3ea57472..14f391b186c6 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -151,16 +151,10 @@ extern void cleanup_module(void);
#define __init_or_module
#define __initdata_or_module
#define __initconst_or_module
-#define __INIT_OR_MODULE .text
-#define __INITDATA_OR_MODULE .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
#else
#define __init_or_module __init
#define __initdata_or_module __initdata
#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
struct module_kobject *lookup_or_create_module_kobject(const char *name);
@@ -748,6 +742,15 @@ static inline void __module_get(struct module *module)
__mod ? __mod->name : "kernel"; \
})
+static inline const unsigned char *module_buildid(struct module *mod)
+{
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ return mod->build_id;
+#else
+ return NULL;
+#endif
+}
+
/* Dereference module function descriptor */
void *dereference_module_function_descriptor(struct module *mod, void *ptr);
@@ -770,8 +773,6 @@ static inline bool is_livepatch_module(struct module *mod)
#endif
}
-void set_module_sig_enforced(void);
-
void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data);
#else /* !CONFIG_MODULES... */
@@ -866,10 +867,6 @@ static inline bool module_requested_async_probing(struct module *module)
}
-static inline void set_module_sig_enforced(void)
-{
-}
-
/* Dereference module function descriptor */
static inline
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
@@ -925,6 +922,8 @@ static inline bool retpoline_module_ok(bool has_retpoline)
#ifdef CONFIG_MODULE_SIG
bool is_module_sig_enforced(void);
+void set_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
@@ -935,6 +934,10 @@ static inline bool is_module_sig_enforced(void)
return false;
}
+static inline void set_module_sig_enforced(void)
+{
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 915f32f7d888..7d22d4c4ea2e 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -2,9 +2,14 @@
#ifndef _LINUX_MODULE_PARAMS_H
#define _LINUX_MODULE_PARAMS_H
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/stringify.h>
-#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
/*
* The maximum module name length, including the NUL byte.
@@ -355,8 +360,8 @@ static inline void kernel_param_unlock(struct module *mod)
/**
* __core_param_cb - similar like core_param, with a set/get ops instead of type.
* @name: the name of the cmdline and sysfs parameter (often the same as var)
- * @var: the variable
* @ops: the set & get operations for this parameter.
+ * @arg: the variable
* @perm: visibility in sysfs
*
* Ideally this should be called 'core_param_cb', but the name has been
@@ -390,7 +395,7 @@ static inline void kernel_param_unlock(struct module *mod)
* @name1: parameter name 1
* @name2: parameter name 2
*
- * Returns true if the two parameter names are equal.
+ * Returns: true if the two parameter names are equal.
* Dashes (-) are considered equal to underscores (_).
*/
extern bool parameq(const char *name1, const char *name2);
@@ -402,6 +407,10 @@ extern bool parameq(const char *name1, const char *name2);
* @n: the length to compare
*
* Similar to parameq(), except it compares @n characters.
+ *
+ * Returns: true if the first @n characters of the two parameter names
+ * are equal.
+ * Dashes (-) are considered equal to underscores (_).
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8003e3218c46..fa41eed62868 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -49,12 +49,12 @@ typedef struct arch_msi_msg_data {
#endif
/**
- * msi_msg - Representation of a MSI message
+ * struct msi_msg - Representation of a MSI message
* @address_lo: Low 32 bits of msi message address
- * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @arch_addr_lo: Architecture specific shadow of @address_lo
* @address_hi: High 32 bits of msi message address
* (only used when device supports it)
- * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @arch_addr_hi: Architecture specific shadow of @address_hi
* @data: MSI message data (usually 16 bits)
* @arch_data: Architecture specific shadow of @data
*/
@@ -91,7 +91,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ * struct pci_msi_desc - PCI/MSI specific MSI descriptor data
*
* @msi_mask: [PCI MSI] MSI cached mask bits
* @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
@@ -101,6 +101,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
* @can_mask: [PCI MSI/X] Masking supported?
* @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
* @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @msi_attrib: [PCI MSI/X] Compound struct of MSI/X attributes
* @mask_pos: [PCI MSI] Mask register position
* @mask_base: [PCI MSI-X] Mask register base address
*/
@@ -169,7 +170,7 @@ struct msi_desc_data {
* Only used if iommu_msi_shift != 0
* @iommu_msi_shift: Indicates how many bits of the original address should be
* preserved when using iommu_msi_iova.
- * @sysfs_attr: Pointer to sysfs device attribute
+ * @sysfs_attrs: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
@@ -220,7 +221,7 @@ enum msi_desc_filter {
/**
* struct msi_dev_domain - The internals of MSI domain info per device
* @store: Xarray for storing MSI descriptor pointers
- * @irqdomain: Pointer to a per device interrupt domain
+ * @domain: Pointer to a per device interrupt domain
*/
struct msi_dev_domain {
struct xarray store;
@@ -702,7 +703,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
-u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
+u32 pci_msi_map_rid_ctlr_node(struct irq_domain *domain, struct pci_dev *pdev,
+ struct fwnode_handle **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bf535f0118bb..ecaa0440f6ec 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -182,13 +182,13 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
- unsigned int subclass, struct lockdep_map *nest_lock);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -211,10 +211,10 @@ do { \
_mutex_lock_killable(lock, subclass, NULL)
#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock) __acquires(lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock);
+extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -232,7 +232,7 @@ extern void mutex_lock_io(struct mutex *lock);
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
@@ -242,17 +242,27 @@ extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
-extern int mutex_trylock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
-extern void mutex_unlock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock) __releases(lock);
-extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock);
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f515fde8..80975935ec48 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -38,7 +38,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+context_lock_struct(mutex) {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +59,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>
-struct mutex {
+context_lock_struct(mutex) {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d99b0fbc1942..d4e6e00bb90a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1831,6 +1831,8 @@ enum netdev_reg_state {
*
* @mpls_features: Mask of features inheritable by MPLS
* @gso_partial_features: value(s) from NETIF_F_GSO\*
+ * @mangleid_features: Mask of features requiring MANGLEID, will be
+ * disabled together with the latter.
*
* @ifindex: interface index
* @group: The group the device belongs to
@@ -2219,6 +2221,7 @@ struct net_device {
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
+ netdev_features_t mangleid_features;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -5163,8 +5166,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
-/* RSS keys are 40 or 52 bytes long */
-#define NETDEV_RSS_KEY_LEN 52
+#define NETDEV_RSS_KEY_LEN 256
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 34ce5d2f37a2..9ee7014400e8 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
-#include <asm/byteorder.h>
-#include <net/gre.h>
-#include <net/pptp.h>
struct nf_ct_gre {
unsigned int stream_timeout;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e947af6a3684..d87be1f25273 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -598,6 +598,10 @@ enum {
#define FATTR4_WORD2_TIME_DELEG_ACCESS BIT(FATTR4_TIME_DELEG_ACCESS - 64)
#define FATTR4_WORD2_TIME_DELEG_MODIFY BIT(FATTR4_TIME_DELEG_MODIFY - 64)
#define FATTR4_WORD2_OPEN_ARGUMENTS BIT(FATTR4_OPEN_ARGUMENTS - 64)
+#define FATTR4_WORD2_ACL_TRUEFORM BIT(FATTR4_ACL_TRUEFORM - 64)
+#define FATTR4_WORD2_ACL_TRUEFORM_SCOPE BIT(FATTR4_ACL_TRUEFORM_SCOPE - 64)
+#define FATTR4_WORD2_POSIX_DEFAULT_ACL BIT(FATTR4_POSIX_DEFAULT_ACL - 64)
+#define FATTR4_WORD2_POSIX_ACCESS_ACL BIT(FATTR4_POSIX_ACCESS_ACL - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c58b870f31ee..4daee27fa5eb 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -115,9 +115,7 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */
-#if IS_ENABLED(CONFIG_NFS_V4_1)
wait_queue_head_t cl_lock_waitq;
-#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
/* Our own IP address, as a null-terminated string.
@@ -259,6 +257,10 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ spinlock_t delegations_lock;
+ struct list_head delegations_return;
+ struct list_head delegations_lru;
+ struct list_head delegations_delayed;
atomic_long_t nr_active_delegations;
unsigned int delegation_hash_mask;
struct hlist_head *delegation_hash_table;
@@ -266,9 +268,7 @@ struct nfs_server {
struct list_head ss_src_copies;
unsigned long delegation_flags;
-#define NFS4SERV_DELEGRETURN (1)
-#define NFS4SERV_DELEGATION_EXPIRED (2)
-#define NFS4SERV_DELEGRETURN_DELAYED (3)
+#define NFS4SERV_DELEGATION_EXPIRED (1)
unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 79fe2dfb470f..ff1f12aa73d2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -209,6 +209,7 @@ struct nfs4_sequence_args {
};
struct nfs4_sequence_res {
+ const struct nfs4_sequence_slot_ops *sr_slot_ops;
struct nfs4_slot *sr_slot; /* slot used to send request */
unsigned long sr_timestamp;
int sr_status; /* sequence operation status */
@@ -1323,10 +1324,6 @@ struct nfs4_fsid_present_res {
unsigned char renew:1;
};
-#endif /* CONFIG_NFS_V4 */
-
-#ifdef CONFIG_NFS_V4_1
-
struct pnfs_commit_bucket {
struct list_head written;
struct list_head committing;
@@ -1466,7 +1463,7 @@ struct nfs41_free_stateid_res {
struct pnfs_ds_commit_info {
};
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_2
struct nfs42_falloc_args {
@@ -1849,7 +1846,7 @@ struct nfs_rpc_ops {
struct iattr *iattr,
int *);
int (*have_delegation)(struct inode *, fmode_t, int);
- int (*return_delegation)(struct inode *);
+ void (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index bd38648c998d..204c92462f3c 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -157,10 +157,10 @@ static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline bool __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define nodes_or(dst, src1, src2) \
@@ -181,10 +181,10 @@ static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline bool __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES)
diff --git a/include/linux/ns/ns_common_types.h b/include/linux/ns/ns_common_types.h
index b332b019b29c..0014fbc1c626 100644
--- a/include/linux/ns/ns_common_types.h
+++ b/include/linux/ns/ns_common_types.h
@@ -108,11 +108,13 @@ extern const struct proc_ns_operations utsns_operations;
* @ns_tree: namespace tree nodes and active reference count
*/
struct ns_common {
+ struct {
+ refcount_t __ns_ref; /* do not use directly */
+ } ____cacheline_aligned_in_smp;
u32 ns_type;
struct dentry *stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
- refcount_t __ns_ref; /* do not use directly */
union {
struct ns_tree;
struct rcu_head ns_rcu;
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 4d103ac8f5c7..b8710c825d64 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -162,8 +162,7 @@ void nubus_seq_write_rsrc_mem(struct seq_file *m,
unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
/* Declarations relating to driver model objects */
-int nubus_parent_device_register(void);
-int nubus_device_register(struct nubus_board *board);
+int nubus_device_register(struct device *parent, struct nubus_board *board);
int nubus_driver_register(struct nubus_driver *ndrv);
void nubus_driver_unregister(struct nubus_driver *ndrv);
int nubus_proc_show(struct seq_file *m, void *data);
diff --git a/include/linux/of.h b/include/linux/of.h
index 9bbdcf25a2b4..be6ec4916adf 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1485,6 +1485,13 @@ static inline int of_property_read_s32(const struct device_node *np,
#define for_each_compatible_node(dn, type, compatible) \
for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
dn = of_find_compatible_node(dn, type, compatible))
+
+#define for_each_compatible_node_scoped(dn, type, compatible) \
+ for (struct device_node *dn __free(device_node) = \
+ of_find_compatible_node(NULL, type, compatible); \
+ dn; \
+ dn = of_find_compatible_node(dn, type, compatible))
+
#define for_each_matching_node(dn, matches) \
for (dn = of_find_matching_node(NULL, matches); dn; \
dn = of_find_matching_node(dn, matches))
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1c2bc0281807..2a64d8cecaae 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -11,6 +11,30 @@
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
+struct of_imap_parser {
+ struct device_node *node;
+ const __be32 *imap;
+ const __be32 *imap_end;
+ u32 parent_offset;
+};
+
+struct of_imap_item {
+ struct of_phandle_args parent_args;
+ u32 child_imap_count;
+ u32 child_imap[16]; /* Arbitrary size.
+ * Should be #address-cells + #interrupt-cells but
+ * avoid using allocation and so, expect that 16
+ * should be enough
+ */
+};
+
+/*
+ * If the iterator is exited prematurely (break, goto, return) of_node_put() has
+ * to be called on item.parent_args.np
+ */
+#define for_each_of_imap_item(parser, item) \
+ for (; of_imap_parser_one(parser, item);)
+
/*
* Workarounds only applied to 32bit powermac machines
*/
@@ -49,6 +73,11 @@ extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern int of_imap_parser_init(struct of_imap_parser *parser,
+ struct device_node *node,
+ struct of_imap_item *item);
+extern struct of_imap_item *of_imap_parser_one(struct of_imap_parser *parser,
+ struct of_imap_item *item);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
const struct device_node *np,
enum irq_domain_bus_token token);
@@ -92,7 +121,17 @@ static inline void *of_irq_find_parent(struct device_node *child)
{
return NULL;
}
-
+static inline int of_imap_parser_init(struct of_imap_parser *parser,
+ struct device_node *node,
+ struct of_imap_item *item)
+{
+ return -ENOSYS;
+}
+static inline struct of_imap_item *of_imap_parser_one(struct of_imap_parser *parser,
+ struct of_imap_item *item)
+{
+ return NULL;
+}
static inline struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 6de479ebbe5d..ebce402854de 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -145,6 +145,11 @@ enum OID {
OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+ /* NIST FIPS-204 ML-DSA */
+ OID_id_ml_dsa_44, /* 2.16.840.1.101.3.4.3.17 */
+ OID_id_ml_dsa_65, /* 2.16.840.1.101.3.4.3.18 */
+ OID_id_ml_dsa_87, /* 2.16.840.1.101.3.4.3.19 */
+
OID__NR
};
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 736f633b2d5f..a5e95dbce220 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -36,12 +36,6 @@
#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
#define type_min(t) __type_min(typeof(t))
-/*
- * Avoids triggering -Wtype-limits compilation warning,
- * while using unsigned data types to check a < 0.
- */
-#define is_non_negative(a) ((a) > 0 || (a) == 0)
-#define is_negative(a) (!(is_non_negative(a)))
/*
* Allows for effectively applying __must_check to a macro so we can have
@@ -201,9 +195,9 @@ static inline bool __must_check __must_check_overflow(bool overflow)
typeof(d) _d = d; \
unsigned long long _a_full = _a; \
unsigned int _to_shift = \
- is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
+ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
- (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
+ (_to_shift != _s || *_d < 0 || _a < 0 || \
(*_d >> _to_shift) != _a); \
}))
@@ -552,4 +546,46 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
(__member_size((name)->array) / sizeof(*(name)->array) + \
__must_be_array((name)->array))
+/**
+ * typeof_flex_counter() - Return the type of the counter variable of a given
+ * flexible array member annotated by __counted_by().
+ * @FAM: Instance of flexible array member within a given struct.
+ *
+ * Returns: "size_t" if no annotation exists.
+ */
+#define typeof_flex_counter(FAM) \
+ typeof(_Generic(__flex_counter(FAM), \
+ void *: (size_t)0, \
+ default: *__flex_counter(FAM)))
+
+/**
+ * overflows_flex_counter_type() - Check if the counter associated with the
+ * given flexible array member can represent
+ * a value.
+ * @TYPE: Type of the struct that contains the @FAM.
+ * @FAM: Member name of the FAM within @TYPE.
+ * @COUNT: Value to check against the __counted_by annotated @FAM's counter.
+ *
+ * Returns: true if @COUNT can be represented in the @FAM's counter. When
+ * @FAM is not annotated with __counted_by(), always returns true.
+ */
+#define overflows_flex_counter_type(TYPE, FAM, COUNT) \
+ (!overflows_type(COUNT, typeof_flex_counter(((TYPE *)NULL)->FAM)))
+
+/**
+ * __set_flex_counter() - Set the counter associated with the given flexible
+ * array member that has been annoated by __counted_by().
+ * @FAM: Instance of flexible array member within a given struct.
+ * @COUNT: Value to store to the __counted_by annotated @FAM_PTR's counter.
+ *
+ * This is a no-op if no annotation exists. Count needs to be checked with
+ * overflows_flex_counter_type() before using this function.
+ */
+#define __set_flex_counter(FAM, COUNT) \
+({ \
+ *_Generic(__flex_counter(FAM), \
+ void *: &(size_t){ 0 }, \
+ default: __flex_counter(FAM)) = (COUNT); \
+})
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3e2f960e166c..6f8638c9904f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -67,4 +67,6 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
enum pb_isolate_mode mode);
+bool page_is_unmovable(struct zone *zone, struct page *page,
+ enum pb_isolate_mode mode, unsigned long *step);
#endif
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 76c817162d2f..61e876e255e8 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -93,6 +93,7 @@ static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
#endif
extern struct page_ext *page_ext_get(const struct page *page);
+extern struct page_ext *page_ext_from_phys(phys_addr_t phys);
extern void page_ext_put(struct page_ext *page_ext);
extern struct page_ext *page_ext_lookup(unsigned long pfn);
@@ -215,6 +216,11 @@ static inline struct page_ext *page_ext_get(const struct page *page)
return NULL;
}
+static inline struct page_ext *page_ext_from_phys(phys_addr_t phys)
+{
+ return NULL;
+}
+
static inline void page_ext_put(struct page_ext *page_ext)
{
}
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 289620d4aad3..12268a32e8be 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -14,15 +14,18 @@ extern struct static_key_true page_table_check_disabled;
extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
-void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
-void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
-void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
-void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
- unsigned int nr);
-void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
- unsigned int nr);
-void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
- unsigned int nr);
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud);
+void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr);
+void __page_table_check_pmds_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr);
+void __page_table_check_puds_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud, unsigned int nr);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -43,55 +46,59 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
__page_table_check_zero(page, order);
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_clear(mm, pte);
+ __page_table_check_pte_clear(mm, addr, pte);
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_clear(mm, pmd);
+ __page_table_check_pmd_clear(mm, addr, pmd);
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_clear(mm, pud);
+ __page_table_check_pud_clear(mm, addr, pud);
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
- pte_t *ptep, pte_t pte, unsigned int nr)
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_ptes_set(mm, ptep, pte, nr);
+ __page_table_check_ptes_set(mm, addr, ptep, pte, nr);
}
static inline void page_table_check_pmds_set(struct mm_struct *mm,
- pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+ unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmds_set(mm, pmdp, pmd, nr);
+ __page_table_check_pmds_set(mm, addr, pmdp, pmd, nr);
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
- pud_t *pudp, pud_t pud, unsigned int nr)
+ unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_puds_set(mm, pudp, pud, nr);
+ __page_table_check_puds_set(mm, addr, pudp, pud, nr);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -114,30 +121,34 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
{
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
{
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
{
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
{
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
- pte_t *ptep, pte_t pte, unsigned int nr)
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr)
{
}
static inline void page_table_check_pmds_set(struct mm_struct *mm,
- pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+ unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
- pud_t *pudp, pud_t pud, unsigned int nr)
+ unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr)
{
}
@@ -149,7 +160,7 @@ static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
#endif /* CONFIG_PAGE_TABLE_CHECK */
-#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
-#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
+#define page_table_check_pmd_set(mm, addr, pmdp, pmd) page_table_check_pmds_set(mm, addr, pmdp, pmd, 1)
+#define page_table_check_pud_set(mm, addr, pudp, pud) page_table_check_puds_set(mm, addr, pudp, pud, 1)
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/panic.h b/include/linux/panic.h
index a00bc0937698..f1dd417e54b2 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -41,6 +41,14 @@ void abort(void);
* PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
*/
extern atomic_t panic_cpu;
+
+/*
+ * panic_redirect_cpu is used when panic is redirected to a specific CPU via
+ * the panic_force_cpu= boot parameter. It holds the CPU number that originally
+ * triggered the panic before redirection. A value of PANIC_CPU_INVALID means
+ * no redirection has occurred.
+ */
+extern atomic_t panic_redirect_cpu;
#define PANIC_CPU_INVALID -1
bool panic_try_start(void);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 078225b514d4..c0c54baadf04 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -12,7 +12,8 @@
#include <linux/acpi.h>
#ifdef CONFIG_ACPI
-extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
+extern acpi_status pci_acpi_add_root_pm_notifier(struct acpi_device *dev,
+ struct acpi_pci_root *pci_root);
static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
{
return acpi_remove_pm_notifier(dev);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 4286bfdbfdfa..c021c7af175f 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -223,6 +223,13 @@ struct pci_epc_bar_desc {
/**
* struct pci_epc_features - features supported by a EPC device per function
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @dynamic_inbound_mapping: indicate if the EPC device supports updating
+ * inbound mappings for an already configured BAR
+ * (i.e. allow calling pci_epc_set_bar() again
+ * without first calling pci_epc_clear_bar())
+ * @subrange_mapping: indicate if the EPC device can map inbound subranges for a
+ * BAR. This feature depends on @dynamic_inbound_mapping
+ * feature.
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
* @intx_capable: indicate if the endpoint can raise INTx interrupts
@@ -231,6 +238,8 @@ struct pci_epc_bar_desc {
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
+ unsigned int dynamic_inbound_mapping : 1;
+ unsigned int subrange_mapping : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
unsigned int intx_capable : 1;
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 48f68c4dcfa5..7737a7c03260 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -111,6 +111,22 @@ struct pci_epf_driver {
#define to_pci_epf_driver(drv) container_of_const((drv), struct pci_epf_driver, driver)
/**
+ * struct pci_epf_bar_submap - BAR subrange for inbound mapping
+ * @phys_addr: target physical/DMA address for this subrange
+ * @size: the size of the subrange to be mapped
+ *
+ * When pci_epf_bar.num_submap is >0, pci_epf_bar.submap describes the
+ * complete BAR layout. This allows an EPC driver to program multiple
+ * inbound translation windows for a single BAR when supported by the
+ * controller. The array order defines the BAR layout (submap[0] at offset
+ * 0, and each immediately follows the previous one).
+ */
+struct pci_epf_bar_submap {
+ dma_addr_t phys_addr;
+ size_t size;
+};
+
+/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
* @addr: virtual address corresponding to the @phys_addr
@@ -119,6 +135,9 @@ struct pci_epf_driver {
* requirement
* @barno: BAR number
* @flags: flags that are set for the BAR
+ * @num_submap: number of entries in @submap
+ * @submap: array of subrange descriptors allocated by the caller. See
+ * struct pci_epf_bar_submap for the semantics in detail.
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
@@ -127,6 +146,10 @@ struct pci_epf_bar {
size_t mem_size;
enum pci_barno barno;
int flags;
+
+ /* Optional sub-range mapping */
+ unsigned int num_submap;
+ struct pci_epf_bar_submap *submap;
};
/**
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 517e121d2598..873de20a2247 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -20,6 +20,8 @@ struct scatterlist;
* struct p2pdma_provider
*
* A p2pdma provider is a range of MMIO address space available to the CPU.
+ * @owner: Device to which this provider belongs.
+ * @bus_offset: Bus offset for p2p communication.
*/
struct p2pdma_provider {
struct device *owner;
diff --git a/include/linux/pci-pwrctrl.h b/include/linux/pci-pwrctrl.h
index 4aefc7901cd1..1192a2527521 100644
--- a/include/linux/pci-pwrctrl.h
+++ b/include/linux/pci-pwrctrl.h
@@ -31,6 +31,8 @@ struct device_link;
/**
* struct pci_pwrctrl - PCI device power control context.
* @dev: Address of the power controlling device.
+ * @power_on: Callback to power on the power controlling device.
+ * @power_off: Callback to power off the power controlling device.
*
* An object of this type must be allocated by the PCI power control device and
* passed to the pwrctrl subsystem to trigger a bus rescan and setup a device
@@ -38,6 +40,8 @@ struct device_link;
*/
struct pci_pwrctrl {
struct device *dev;
+ int (*power_on)(struct pci_pwrctrl *pwrctrl);
+ int (*power_off)(struct pci_pwrctrl *pwrctrl);
/* private: internal use only */
struct notifier_block nb;
@@ -50,5 +54,15 @@ int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl);
void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl);
int devm_pci_pwrctrl_device_set_ready(struct device *dev,
struct pci_pwrctrl *pwrctrl);
-
+#if IS_ENABLED(CONFIG_PCI_PWRCTRL)
+int pci_pwrctrl_create_devices(struct device *parent);
+void pci_pwrctrl_destroy_devices(struct device *parent);
+int pci_pwrctrl_power_on_devices(struct device *parent);
+void pci_pwrctrl_power_off_devices(struct device *parent);
+#else
+static inline int pci_pwrctrl_create_devices(struct device *parent) { return 0; }
+static void pci_pwrctrl_destroy_devices(struct device *parent) { }
+static inline int pci_pwrctrl_power_on_devices(struct device *parent) { return 0; }
+static void pci_pwrctrl_power_off_devices(struct device *parent) { }
+#endif
#endif /* __PCI_PWRCTRL_H__ */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b5cc0c2b9906..1c270f1d5123 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -248,6 +248,11 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
+ /*
+ * PCIe to PCI bridge does not create RID aliases because the bridge is
+ * integrated with the downstream devices and doesn't use real PCI.
+ */
+ PCI_DEV_FLAGS_PCI_BRIDGE_NO_ALIAS = (__force pci_dev_flags_t) (1 << 14),
};
enum pci_irq_reroute_variant {
@@ -377,6 +382,13 @@ struct pci_dev {
0xffffffff. You only need to change
this if your device has broken DMA
or supports 64-bit transfers. */
+ u64 msi_addr_mask; /* Mask of the bits of bus address for
+ MSI that this device implements.
+ Normally set based on device
+ capabilities. You only need to
+ change this if your device claims
+ to support 64-bit MSI but implements
+ fewer than 64 address bits. */
struct device_dma_parameters dma_parms;
@@ -406,6 +418,7 @@ struct pci_dev {
user sysfs */
unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
bit manually */
+ unsigned int no_bw_notif:1; /* BW notifications may cause issues */
unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -441,7 +454,6 @@ struct pci_dev {
unsigned int is_busmaster:1; /* Is busmaster */
unsigned int no_msi:1; /* May not use MSI */
- unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* Config space access blocked */
unsigned int broken_parity_status:1; /* Generates false positive parity */
unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
@@ -463,6 +475,7 @@ struct pci_dev {
unsigned int is_pciehp:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
+ unsigned int is_cxl:1; /* Compute Express Link (CXL) */
/*
* Devices marked being untrusted are the ones that can potentially
* execute DMA attacks and similar. They are typically connected
@@ -558,6 +571,7 @@ struct pci_dev {
struct pci_tsm *tsm; /* TSM operation state */
#endif
u16 acs_cap; /* ACS Capability offset */
+ u16 acs_capabilities; /* ACS Capabilities */
u8 supported_speeds; /* Supported Link Speeds Vector */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
@@ -791,6 +805,11 @@ static inline bool pci_is_display(struct pci_dev *pdev)
return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
}
+static inline bool pcie_is_cxl(struct pci_dev *pci_dev)
+{
+ return pci_dev->is_cxl;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -854,7 +873,6 @@ struct pci_ops {
void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
- int (*assert_perst)(struct pci_bus *bus, bool assert);
};
/*
@@ -1206,6 +1224,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
int pci_host_probe(struct pci_host_bridge *bridge);
+void pci_probe_flush_workqueue(void);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
@@ -1243,7 +1262,11 @@ void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
void pci_stop_root_bus(struct pci_bus *bus);
void pci_remove_root_bus(struct pci_bus *bus);
-void pci_setup_cardbus(struct pci_bus *bus);
+#ifdef CONFIG_CARDBUS
+void pci_setup_cardbus_bridge(struct pci_bus *bus);
+#else
+static inline void pci_setup_cardbus_bridge(struct pci_bus *bus) { }
+#endif
void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
@@ -2079,6 +2102,8 @@ static inline int pci_has_flag(int flag) { return 0; }
_PCI_NOP_ALL(read, *)
_PCI_NOP_ALL(write,)
+static inline void pci_probe_flush_workqueue(void) { }
+
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a9a089566b7c..406abf629be2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1733,9 +1733,6 @@
#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320
#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321
-#define PCI_VENDOR_ID_ESSENTIAL 0x120f
-#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
-
#define PCI_VENDOR_ID_O2 0x1217
#define PCI_DEVICE_ID_O2_6729 0x6729
#define PCI_DEVICE_ID_O2_6730 0x673a
@@ -2583,6 +2580,8 @@
#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
+#define PCI_VENDOR_ID_ASPEED 0x1a03
+
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
@@ -2950,7 +2949,8 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
-#define PCI_DEVICE_ID_INTEL_HDA_GML 0x3198
+/* In a few of the Intel documents the GML acronym is used for Gemini Lake */
+#define PCI_DEVICE_ID_INTEL_HDA_GLK 0x3198
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
@@ -3143,6 +3143,7 @@
#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_NVL 0xd328
#define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7
#define PCI_DEVICE_ID_INTEL_HDA_PTL_H 0xe328
#define PCI_DEVICE_ID_INTEL_HDA_PTL 0xe428
diff --git a/include/linux/pcs/pcs-mtk-lynxi.h b/include/linux/pcs/pcs-mtk-lynxi.h
index be3b4ab32f4a..1bd4a27a8898 100644
--- a/include/linux/pcs/pcs-mtk-lynxi.h
+++ b/include/linux/pcs/pcs-mtk-lynxi.h
@@ -5,9 +5,8 @@
#include <linux/phylink.h>
#include <linux/regmap.h>
-#define MTK_SGMII_FLAG_PN_SWAP BIT(0)
struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
- struct regmap *regmap,
- u32 ana_rgc3, u32 flags);
+ struct fwnode_handle *fwnode,
+ struct regmap *regmap, u32 ana_rgc3);
void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs);
#endif
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 288f5235649a..c8cb010d655e 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -161,6 +161,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
+#define percpu_rwsem_is_write_held(sem) lockdep_is_held_type(sem, 0)
#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9ded2e582c60..48d851fbd8ea 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -305,6 +305,7 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
#define PERF_PMU_CAP_AUX_PAUSE 0x0200
#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
+#define PERF_PMU_CAP_MEDIATED_VPMU 0x0800
/**
* pmu::scope
@@ -998,6 +999,11 @@ struct perf_event_groups {
u64 index;
};
+struct perf_time_ctx {
+ u64 time;
+ u64 stamp;
+ u64 offset;
+};
/**
* struct perf_event_context - event context structure
@@ -1036,9 +1042,12 @@ struct perf_event_context {
/*
* Context clock, runs when context enabled.
*/
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
+
+ /*
+ * Context clock, runs when in the guest mode.
+ */
+ struct perf_time_ctx timeguest;
/*
* These fields let us detect when two contexts have both
@@ -1171,9 +1180,8 @@ struct bpf_perf_event_data_kern {
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
+ struct perf_time_ctx timeguest;
int active;
};
@@ -1669,6 +1677,8 @@ struct perf_guest_info_callbacks {
unsigned int (*state)(void);
unsigned long (*get_ip)(void);
unsigned int (*handle_intel_pt_intr)(void);
+
+ void (*handle_mediated_pmi)(void);
};
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -1678,6 +1688,7 @@ extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+DECLARE_STATIC_CALL(__perf_guest_handle_mediated_pmi, *perf_guest_cbs->handle_mediated_pmi);
static inline unsigned int perf_guest_state(void)
{
@@ -1694,6 +1705,11 @@ static inline unsigned int perf_guest_handle_intel_pt_intr(void)
return static_call(__perf_guest_handle_intel_pt_intr)();
}
+static inline void perf_guest_handle_mediated_pmi(void)
+{
+ static_call(__perf_guest_handle_mediated_pmi)();
+}
+
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
@@ -1914,6 +1930,13 @@ extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+int perf_create_mediated_pmu(void);
+void perf_release_mediated_pmu(void);
+void perf_load_guest_context(void);
+void perf_put_guest_context(void);
+#endif
+
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 652f287c1ef6..827dca25c0bc 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -225,16 +225,156 @@ static inline int pmd_dirty(pmd_t pmd)
* up to date.
*
* In the general case, no lock is guaranteed to be held between entry and exit
- * of the lazy mode. So the implementation must assume preemption may be enabled
- * and cpu migration is possible; it must take steps to be robust against this.
- * (In practice, for user PTE updates, the appropriate page table lock(s) are
- * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
- * and the mode cannot be used in interrupt context.
+ * of the lazy mode. (In practice, for user PTE updates, the appropriate page
+ * table lock(s) are held, but for kernel PTE updates, no lock is held).
+ * The implementation must therefore assume preemption may be enabled upon
+ * entry to the mode and cpu migration is possible; it must take steps to be
+ * robust against this. An implementation may handle this by disabling
+ * preemption, as a consequence generic code may not sleep while the lazy MMU
+ * mode is active.
+ *
+ * The mode is disabled in interrupt context and calls to the lazy_mmu API have
+ * no effect.
+ *
+ * The lazy MMU mode is enabled for a given block of code using:
+ *
+ * lazy_mmu_mode_enable();
+ * <code>
+ * lazy_mmu_mode_disable();
+ *
+ * Nesting is permitted: <code> may itself use an enable()/disable() pair.
+ * A nested call to enable() has no functional effect; however disable() causes
+ * any batched architectural state to be flushed regardless of nesting. After a
+ * call to disable(), the caller can therefore rely on all previous page table
+ * modifications to have taken effect, but the lazy MMU mode may still be
+ * enabled.
+ *
+ * In certain cases, it may be desirable to temporarily pause the lazy MMU mode.
+ * This can be done using:
+ *
+ * lazy_mmu_mode_pause();
+ * <code>
+ * lazy_mmu_mode_resume();
+ *
+ * pause() ensures that the mode is exited regardless of the nesting level;
+ * resume() re-enters the mode at the same nesting level. Any call to the
+ * lazy_mmu_mode_* API between those two calls has no effect. In particular,
+ * this means that pause()/resume() pairs may nest.
+ *
+ * is_lazy_mmu_mode_active() can be used to check whether the lazy MMU mode is
+ * currently enabled.
*/
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-static inline void arch_enter_lazy_mmu_mode(void) {}
-static inline void arch_leave_lazy_mmu_mode(void) {}
-static inline void arch_flush_lazy_mmu_mode(void) {}
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+/**
+ * lazy_mmu_mode_enable() - Enable the lazy MMU mode.
+ *
+ * Enters a new lazy MMU mode section; if the mode was not already enabled,
+ * enables it and calls arch_enter_lazy_mmu_mode().
+ *
+ * Must be paired with a call to lazy_mmu_mode_disable().
+ *
+ * Has no effect if called:
+ * - While paused - see lazy_mmu_mode_pause()
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_enable(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt() || state->pause_count > 0)
+ return;
+
+ VM_WARN_ON_ONCE(state->enable_count == U8_MAX);
+
+ if (state->enable_count++ == 0)
+ arch_enter_lazy_mmu_mode();
+}
+
+/**
+ * lazy_mmu_mode_disable() - Disable the lazy MMU mode.
+ *
+ * Exits the current lazy MMU mode section. If it is the outermost section,
+ * disables the mode and calls arch_leave_lazy_mmu_mode(). Otherwise (nested
+ * section), calls arch_flush_lazy_mmu_mode().
+ *
+ * Must match a call to lazy_mmu_mode_enable().
+ *
+ * Has no effect if called:
+ * - While paused - see lazy_mmu_mode_pause()
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_disable(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt() || state->pause_count > 0)
+ return;
+
+ VM_WARN_ON_ONCE(state->enable_count == 0);
+
+ if (--state->enable_count == 0)
+ arch_leave_lazy_mmu_mode();
+ else /* Exiting a nested section */
+ arch_flush_lazy_mmu_mode();
+
+}
+
+/**
+ * lazy_mmu_mode_pause() - Pause the lazy MMU mode.
+ *
+ * Pauses the lazy MMU mode; if it is currently active, disables it and calls
+ * arch_leave_lazy_mmu_mode().
+ *
+ * Must be paired with a call to lazy_mmu_mode_resume(). Calls to the
+ * lazy_mmu_mode_* API have no effect until the matching resume() call.
+ *
+ * Has no effect if called:
+ * - While paused (inside another pause()/resume() pair)
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_pause(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt())
+ return;
+
+ VM_WARN_ON_ONCE(state->pause_count == U8_MAX);
+
+ if (state->pause_count++ == 0 && state->enable_count > 0)
+ arch_leave_lazy_mmu_mode();
+}
+
+/**
+ * lazy_mmu_mode_resume() - Resume the lazy MMU mode.
+ *
+ * Resumes the lazy MMU mode; if it was active at the point where the matching
+ * call to lazy_mmu_mode_pause() was made, re-enables it and calls
+ * arch_enter_lazy_mmu_mode().
+ *
+ * Must match a call to lazy_mmu_mode_pause().
+ *
+ * Has no effect if called:
+ * - While paused (inside another pause()/resume() pair)
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_resume(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt())
+ return;
+
+ VM_WARN_ON_ONCE(state->pause_count == 0);
+
+ if (--state->pause_count == 0 && state->enable_count > 0)
+ arch_enter_lazy_mmu_mode();
+}
+#else
+static inline void lazy_mmu_mode_enable(void) {}
+static inline void lazy_mmu_mode_disable(void) {}
+static inline void lazy_mmu_mode_pause(void) {}
+static inline void lazy_mmu_mode_resume(void) {}
#endif
#ifndef pte_batch_hint
@@ -289,7 +429,7 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
- page_table_check_ptes_set(mm, ptep, pte, nr);
+ page_table_check_ptes_set(mm, addr, ptep, pte, nr);
for (;;) {
set_pte(ptep, pte);
@@ -494,7 +634,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, address, pte);
return pte;
}
#endif
@@ -553,7 +693,7 @@ static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
* No need for ptep_get_and_clear(): page table check doesn't care about
* any bits that could have been set by HW concurrently.
*/
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, addr, pte);
}
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
@@ -648,7 +788,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
- page_table_check_pmd_clear(mm, pmd);
+ page_table_check_pmd_clear(mm, address, pmd);
return pmd;
}
@@ -661,7 +801,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
- page_table_check_pud_clear(mm, pud);
+ page_table_check_pud_clear(mm, address, pud);
return pud;
}
diff --git a/include/linux/phy.h b/include/linux/phy.h
index fbbe028cc4b7..6f9979a26892 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,6 +327,7 @@ static inline long rgmii_clock(int speed)
struct device;
struct kernel_hwtstamp_config;
struct phylink;
+struct phy_port;
struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
@@ -645,6 +646,9 @@ struct phy_oatc14_sqi_capability {
* @master_slave_state: Current master/slave configuration
* @mii_ts: Pointer to time stamper callbacks
* @psec: Pointer to Power Sourcing Equipment control struct
+ * @ports: List of PHY ports structures
+ * @n_ports: Number of ports currently attached to the PHY
+ * @max_n_ports: Max number of ports this PHY can expose
* @lock: Mutex for serialization access to PHY
* @state_queue: Work queue for state machine
* @link_down_events: Number of times link was lost
@@ -783,6 +787,10 @@ struct phy_device {
struct mii_timestamper *mii_ts;
struct pse_control *psec;
+ struct list_head ports;
+ int n_ports;
+ int max_n_ports;
+
u8 mdix;
u8 mdix_ctrl;
@@ -802,11 +810,15 @@ struct phy_device {
};
/* Generic phy_device::dev_flags */
-#define PHY_F_NO_IRQ 0x80000000
-#define PHY_F_RXC_ALWAYS_ON 0x40000000
+#define PHY_F_NO_IRQ 0x80000000
+#define PHY_F_RXC_ALWAYS_ON 0x40000000
+#define PHY_F_KEEP_PREAMBLE_BEFORE_SFD 0x20000000
#define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio)
+#define phy_for_each_port(phydev, port) \
+ list_for_each_entry(port, &(phydev)->ports, head)
+
/**
* struct phy_tdr_config - Configuration of a TDR raw test
*
@@ -1507,6 +1519,49 @@ struct phy_driver {
* Returns the time in jiffies until the next update event.
*/
unsigned int (*get_next_update_time)(struct phy_device *dev);
+
+ /**
+ * @attach_mii_port: Attach the given MII port to the PHY device
+ * @dev: PHY device to notify
+ * @port: The port being added
+ *
+ * Called when an MII port that needs to be driven by the PHY is found.
+ *
+ * The port that is being passed may or may not be initialized. If it is
+ * already initialized, it is by the generic port representation from
+ * devicetree, which superseeds any strapping or vendor-specific
+ * properties.
+ *
+ * If the port isn't initialized, the port->mediums and port->lanes
+ * fields must be set, possibly according to strapping information.
+ *
+ * The PHY driver must set the port->interfaces field to indicate the
+ * possible MII modes that this PHY can output on the port.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*attach_mii_port)(struct phy_device *dev, struct phy_port *port);
+
+ /**
+ * @attach_mdi_port: Attach the given MII port to the PHY device
+ * @dev: PHY device to notify
+ * @port: The port being added
+ *
+ * Called when a port that needs to be driven by the PHY is found. The
+ * number of time this will be called depends on phydev->max_n_ports,
+ * which the driver can change in .probe().
+ *
+ * The port that is being passed may or may not be initialized. If it is
+ * already initialized, it is by the generic port representation from
+ * devicetree, which superseeds any strapping or vendor-specific
+ * properties.
+ *
+ * If the port isn't initialized, the port->mediums and port->lanes
+ * fields must be set, possibly according to strapping information.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*attach_mdi_port)(struct phy_device *dev, struct phy_port *port);
};
#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -2097,12 +2152,6 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable, int speed);
-int phy_sfp_connect_phy(void *upstream, struct phy_device *phy);
-void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy);
-void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
-void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
-int phy_sfp_probe(struct phy_device *phydev,
- const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos);
@@ -2310,6 +2359,7 @@ void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
+
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
@@ -2356,10 +2406,6 @@ int phy_register_fixup_for_id(const char *bus_id,
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
-int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
-int phy_unregister_fixup_for_id(const char *bus_id);
-int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
-
int phy_eee_tx_clock_stop_capable(struct phy_device *phydev);
int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
@@ -2400,6 +2446,8 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
+struct phy_port *phy_get_sfp_port(struct phy_device *phydev);
+
extern const struct bus_type mdio_bus_type;
extern const struct class mdio_bus_class;
diff --git a/include/linux/phy/phy-common-props.h b/include/linux/phy/phy-common-props.h
new file mode 100644
index 000000000000..680e13de4558
--- /dev/null
+++ b/include/linux/phy/phy-common-props.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phy-common-props.h -- Common properties for generic PHYs
+ *
+ * Copyright 2025 NXP
+ */
+
+#ifndef __PHY_COMMON_PROPS_H
+#define __PHY_COMMON_PROPS_H
+
+#include <dt-bindings/phy/phy.h>
+
+struct fwnode_handle;
+
+int __must_check phy_get_rx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int supported,
+ unsigned int default_val,
+ unsigned int *val);
+int __must_check phy_get_tx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int supported,
+ unsigned int default_val,
+ unsigned int *val);
+int __must_check phy_get_manual_rx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int *val);
+int __must_check phy_get_manual_tx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int *val);
+
+#endif /* __PHY_COMMON_PROPS_H */
diff --git a/include/linux/phy_port.h b/include/linux/phy_port.h
new file mode 100644
index 000000000000..0ef0f5ce4709
--- /dev/null
+++ b/include/linux/phy_port.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __PHY_PORT_H
+#define __PHY_PORT_H
+
+#include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/phy.h>
+
+struct phy_port;
+
+/**
+ * enum phy_port_parent - The device this port is attached to
+ *
+ * @PHY_PORT_PHY: Indicates that the port is driven by a PHY device
+ */
+enum phy_port_parent {
+ PHY_PORT_PHY,
+};
+
+struct phy_port_ops {
+ /* Sometimes, the link state can be retrieved from physical,
+ * out-of-band channels such as the LOS signal on SFP. These
+ * callbacks allows notifying the port about state changes
+ */
+ void (*link_up)(struct phy_port *port);
+ void (*link_down)(struct phy_port *port);
+
+ /* If the port acts as a Media Independent Interface (Serdes port),
+ * configures the port with the relevant state and mode. When enable is
+ * not set, interface should be ignored
+ */
+ int (*configure_mii)(struct phy_port *port, bool enable, phy_interface_t interface);
+};
+
+/**
+ * struct phy_port - A representation of a network device physical interface
+ *
+ * @head: Used by the port's parent to list ports
+ * @parent_type: The type of device this port is directly connected to
+ * @phy: If the parent is PHY_PORT_PHYDEV, the PHY controlling that port
+ * @ops: Callback ops implemented by the port controller
+ * @pairs: The number of pairs this port has, 0 if not applicable
+ * @mediums: Bitmask of the physical mediums this port provides access to
+ * @supported: The link modes this port can expose, if this port is MDI (not MII)
+ * @interfaces: The MII interfaces this port supports, if this port is MII
+ * @not_described: Indicates to the parent driver if this port isn't described,
+ * so it's up to the parent to filter its capabilities.
+ * @active: Indicates if the port is currently part of the active link.
+ * @is_mii: Indicates if this port is MII (Media Independent Interface),
+ * or MDI (Media Dependent Interface).
+ * @is_sfp: Indicates if this port drives an SFP cage.
+ */
+struct phy_port {
+ struct list_head head;
+ enum phy_port_parent parent_type;
+ union {
+ struct phy_device *phy;
+ };
+
+ const struct phy_port_ops *ops;
+
+ int pairs;
+ unsigned long mediums;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+
+ unsigned int not_described:1;
+ unsigned int active:1;
+ unsigned int is_mii:1;
+ unsigned int is_sfp:1;
+};
+
+struct phy_port *phy_port_alloc(void);
+void phy_port_destroy(struct phy_port *port);
+
+static inline struct phy_device *port_phydev(struct phy_port *port)
+{
+ return port->phy;
+}
+
+struct phy_port *phy_of_parse_port(struct device_node *dn);
+
+static inline bool phy_port_is_copper(struct phy_port *port)
+{
+ return port->mediums == BIT(ETHTOOL_LINK_MEDIUM_BASET);
+}
+
+static inline bool phy_port_is_fiber(struct phy_port *port)
+{
+ return !!(port->mediums & ETHTOOL_MEDIUM_FIBER_BITS);
+}
+
+void phy_port_update_supported(struct phy_port *port);
+int phy_port_restrict_mediums(struct phy_port *port, unsigned long mediums);
+
+int phy_port_get_type(struct phy_port *port);
+
+#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 38363e566ac3..2bc0db3d52ac 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -90,9 +90,10 @@ enum {
MAC_40000FD = BIT(13),
MAC_50000FD = BIT(14),
MAC_56000FD = BIT(15),
- MAC_100000FD = BIT(16),
- MAC_200000FD = BIT(17),
- MAC_400000FD = BIT(18),
+ MAC_80000FD = BIT(16),
+ MAC_100000FD = BIT(17),
+ MAC_200000FD = BIT(18),
+ MAC_400000FD = BIT(19),
};
static inline bool phylink_autoneg_inband(unsigned int mode)
@@ -836,4 +837,9 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
void phylink_decode_usxgmii_word(struct phylink_link_state *state,
uint16_t lpa);
+
+void phylink_replay_link_begin(struct phylink *pl);
+
+void phylink_replay_link_end(struct phylink *pl);
+
#endif
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 69294f79cc88..8080a6fc6c8c 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -2005,6 +2005,14 @@ struct lightbar_params_v2_colors {
struct rgb_s color[8]; /* 0-3 are Google colors */
} __ec_todo_packed;
+struct lightbar_params_v3 {
+ /*
+ * Number of LEDs reported by the EC.
+ * May be less than the actual number of LEDs in the lightbar.
+ */
+ uint8_t reported_led_num;
+} __ec_todo_packed;
+
/* Lightbar program. */
#define EC_LB_PROG_LEN 192
struct lightbar_program {
@@ -2012,6 +2020,17 @@ struct lightbar_program {
uint8_t data[EC_LB_PROG_LEN];
} __ec_todo_unpacked;
+/*
+ * Lightbar program for large sequences. Sequences are sent in pieces, with
+ * increasing offset. The sequences are still limited by the amount reserved in
+ * EC RAM.
+ */
+struct lightbar_program_ex {
+ uint8_t size;
+ uint16_t offset;
+ uint8_t data[];
+} __ec_todo_packed;
+
struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
@@ -2058,6 +2077,7 @@ struct ec_params_lightbar {
struct lightbar_params_v2_colors set_v2par_colors;
struct lightbar_program set_program;
+ struct lightbar_program_ex set_program_ex;
};
} __ec_todo_packed;
@@ -2086,6 +2106,8 @@ struct ec_response_lightbar {
struct lightbar_params_v2_thresholds get_params_v2_thlds;
struct lightbar_params_v2_colors get_params_v2_colors;
+ struct lightbar_params_v3 get_params_v3;
+
struct __ec_todo_unpacked {
uint32_t num;
uint32_t flags;
@@ -2143,6 +2165,8 @@ enum lightbar_command {
LIGHTBAR_CMD_SET_PARAMS_V2_THRESHOLDS = 31,
LIGHTBAR_CMD_GET_PARAMS_V2_COLORS = 32,
LIGHTBAR_CMD_SET_PARAMS_V2_COLORS = 33,
+ LIGHTBAR_CMD_GET_PARAMS_V3 = 34,
+ LIGHTBAR_CMD_SET_PROGRAM_EX = 35,
LIGHTBAR_NUM_CMDS
};
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index b9c8520b4bd3..509c5592aab0 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -59,7 +59,8 @@ struct davinci_mcasp_pdata {
bool i2s_accurate_sck;
/* McASP specific fields */
- int tdm_slots;
+ int tdm_slots_tx;
+ int tdm_slots_rx;
u8 op_mode;
u8 dismod;
u8 num_serializer;
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
deleted file mode 100644
index 7d21e0c41037..000000000000
--- a/include/linux/platform_data/hwmon-s3c.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C - HWMon interface for ADC
-*/
-
-#ifndef __HWMON_S3C_H__
-#define __HWMON_S3C_H__
-
-/**
- * s3c_hwmon_chcfg - channel configuration
- * @name: The name to give this channel.
- * @mult: Multiply the ADC value read by this.
- * @div: Divide the value from the ADC by this.
- *
- * The value read from the ADC is converted to a value that
- * hwmon expects (mV) by result = (value_read * @mult) / @div.
- */
-struct s3c_hwmon_chcfg {
- const char *name;
- unsigned int mult;
- unsigned int div;
-};
-
-/**
- * s3c_hwmon_pdata - HWMON platform data
- * @in: One configuration for each possible channel used.
- */
-struct s3c_hwmon_pdata {
- struct s3c_hwmon_chcfg *in[8];
-};
-
-#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/mipi-i3c-hci.h b/include/linux/platform_data/mipi-i3c-hci.h
new file mode 100644
index 000000000000..ab7395f455f9
--- /dev/null
+++ b/include/linux/platform_data/mipi-i3c-hci.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef INCLUDE_PLATFORM_DATA_MIPI_I3C_HCI_H
+#define INCLUDE_PLATFORM_DATA_MIPI_I3C_HCI_H
+
+#include <linux/compiler_types.h>
+
+/**
+ * struct mipi_i3c_hci_platform_data - Platform-dependent data for mipi_i3c_hci
+ * @base_regs: Register set base address (to support multi-bus instances)
+ */
+struct mipi_i3c_hci_platform_data {
+ void __iomem *base_regs;
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 419491d4abca..516538b5a527 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -29,6 +29,7 @@
#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
+#define ASUS_WMI_METHODID_NOTIF 0x00100021 /* Notify method */
#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 98a899858ece..afcaaa37a812 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -681,10 +681,10 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+ bool work_in_progress; /* Owned by the PM core */
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
- bool work_in_progress:1; /* Owned by the PM core */
bool smart_suspend:1; /* Owned by the PM core */
bool must_resume:1; /* Owned by the PM core */
bool may_skip_resume:1; /* Set by subsystems */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index e86f3b731da2..9e1892525eac 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -44,8 +44,9 @@ posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
}
#endif
-int posix_acl_to_xattr(struct user_namespace *user_ns,
- const struct posix_acl *acl, void *buffer, size_t size);
+extern void *posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
+ size_t *sizep, gfp_t gfp);
+
static inline const char *posix_acl_xattr_name(int type)
{
switch (type) {
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c5b30054cd01..7729fef249e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -31,6 +31,16 @@
#include <asm/processor.h>
#include <linux/context_tracking_irq.h>
+token_context_lock(RCU, __reentrant_ctx_lock);
+token_context_lock_instance(RCU, RCU_SCHED);
+token_context_lock_instance(RCU, RCU_BH);
+
+/*
+ * A convenience macro that can be used for RCU-protected globals or struct
+ * members; adds type qualifier __rcu, and also enforces __guarded_by(RCU).
+ */
+#define __rcu_guarded __rcu __guarded_by(RCU)
+
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
@@ -175,36 +185,7 @@ void rcu_tasks_torture_stats_print(char *tt, char *tf);
# define synchronize_rcu_tasks synchronize_rcu
# endif
-# ifdef CONFIG_TASKS_TRACE_RCU
-// Bits for ->trc_reader_special.b.need_qs field.
-#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
-#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
-
-u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
-void rcu_tasks_trace_qs_blkd(struct task_struct *t);
-
-# define rcu_tasks_trace_qs(t) \
- do { \
- int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
- \
- if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
- likely(!___rttq_nesting)) { \
- rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
- } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
- !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
- rcu_tasks_trace_qs_blkd(t); \
- } \
- } while (0)
-void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
-# else
-# define rcu_tasks_trace_qs(t) do { } while (0)
-# endif
-
-#define rcu_tasks_qs(t, preempt) \
-do { \
- rcu_tasks_classic_qs((t), (preempt)); \
- rcu_tasks_trace_qs(t); \
-} while (0)
+#define rcu_tasks_qs(t, preempt) rcu_tasks_classic_qs((t), (preempt))
# ifdef CONFIG_TASKS_RUDE_RCU
void synchronize_rcu_tasks_rude(void);
@@ -425,7 +406,8 @@ static inline void rcu_preempt_sleep_check(void) { }
// See RCU_LOCKDEP_WARN() for an explanation of the double call to
// debug_lockdep_rcu_enabled().
-static inline bool lockdep_assert_rcu_helper(bool c)
+static __always_inline bool lockdep_assert_rcu_helper(bool c, const struct __ctx_lock_RCU *ctx)
+ __assumes_shared_ctx_lock(RCU) __assumes_shared_ctx_lock(ctx)
{
return debug_lockdep_rcu_enabled() &&
(c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
@@ -438,7 +420,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
*/
#define lockdep_assert_in_rcu_read_lock() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map), RCU))
/**
* lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
@@ -448,7 +430,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* actual rcu_read_lock_bh() is required.
*/
#define lockdep_assert_in_rcu_read_lock_bh() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map), RCU_BH))
/**
* lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
@@ -458,7 +440,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* instead an actual rcu_read_lock_sched() is required.
*/
#define lockdep_assert_in_rcu_read_lock_sched() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map), RCU_SCHED))
/**
* lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
@@ -476,17 +458,17 @@ static inline bool lockdep_assert_rcu_helper(bool c)
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
!lock_is_held(&rcu_bh_lock_map) && \
!lock_is_held(&rcu_sched_lock_map) && \
- preemptible()))
+ preemptible(), RCU))
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
-#define lockdep_assert_in_rcu_reader() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() __assume_shared_ctx_lock(RCU)
+#define lockdep_assert_in_rcu_read_lock_bh() __assume_shared_ctx_lock(RCU_BH)
+#define lockdep_assert_in_rcu_read_lock_sched() __assume_shared_ctx_lock(RCU_SCHED)
+#define lockdep_assert_in_rcu_reader() __assume_shared_ctx_lock(RCU)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -506,11 +488,11 @@ static inline bool lockdep_assert_rcu_helper(bool c)
#endif /* #else #ifdef __CHECKER__ */
#define __unrcu_pointer(p, local) \
-({ \
+context_unsafe( \
typeof(*p) *local = (typeof(*p) *__force)(p); \
rcu_check_sparse(p, __rcu); \
- ((typeof(*p) __force __kernel *)(local)); \
-})
+ ((typeof(*p) __force __kernel *)(local)) \
+)
/**
* unrcu_pointer - mark a pointer as not being RCU protected
* @p: pointer needing to lose its __rcu property
@@ -586,7 +568,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
-do { \
+context_unsafe( \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
rcu_check_sparse(p, __rcu); \
\
@@ -594,7 +576,7 @@ do { \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
-} while (0)
+)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
@@ -861,9 +843,10 @@ do { \
* only when acquiring spinlocks that are subject to priority inheritance.
*/
static __always_inline void rcu_read_lock(void)
+ __acquires_shared(RCU)
{
__rcu_read_lock();
- __acquire(RCU);
+ __acquire_shared(RCU);
rcu_lock_acquire(&rcu_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
@@ -891,11 +874,12 @@ static __always_inline void rcu_read_lock(void)
* See rcu_read_lock() for more information.
*/
static inline void rcu_read_unlock(void)
+ __releases_shared(RCU)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
- __release(RCU);
+ __release_shared(RCU);
__rcu_read_unlock();
}
@@ -914,9 +898,11 @@ static inline void rcu_read_unlock(void)
* was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_BH)
{
local_bh_disable();
- __acquire(RCU_BH);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
@@ -928,11 +914,13 @@ static inline void rcu_read_lock_bh(void)
* See rcu_read_lock_bh() for more information.
*/
static inline void rcu_read_unlock_bh(void)
+ __releases_shared(RCU) __releases_shared(RCU_BH)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
+ __release_shared(RCU_BH);
+ __release_shared(RCU);
local_bh_enable();
}
@@ -952,9 +940,11 @@ static inline void rcu_read_unlock_bh(void)
* rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
@@ -962,9 +952,11 @@ static inline void rcu_read_lock_sched(void)
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_lock_sched_notrace(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable_notrace();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
}
/**
@@ -973,22 +965,27 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
* See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable();
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_unlock_sched_notrace(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable_notrace();
}
static __always_inline void rcu_read_lock_dont_migrate(void)
+ __acquires_shared(RCU)
{
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
migrate_disable();
@@ -996,6 +993,7 @@ static __always_inline void rcu_read_lock_dont_migrate(void)
}
static inline void rcu_read_unlock_migrate(void)
+ __releases_shared(RCU)
{
rcu_read_unlock();
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
@@ -1041,10 +1039,10 @@ static inline void rcu_read_unlock_migrate(void)
* ordering guarantees for either the CPU or the compiler.
*/
#define RCU_INIT_POINTER(p, v) \
- do { \
+ context_unsafe( \
rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
- } while (0)
+ )
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
@@ -1192,18 +1190,7 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
-DEFINE_LOCK_GUARD_0(rcu,
- do {
- rcu_read_lock();
- /*
- * sparse doesn't call the cleanup function,
- * so just release immediately and don't track
- * the context. We don't need to anyway, since
- * the whole point of the guard is to not need
- * the explicit unlock.
- */
- __release(RCU);
- } while (0),
- rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+DECLARE_LOCK_GUARD_0_ATTRS(rcu, __acquires_shared(RCU), __releases_shared(RCU))
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index e6c44eb428ab..cee89e51e45c 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -12,27 +12,74 @@
#include <linux/rcupdate.h>
#include <linux/cleanup.h>
-extern struct lockdep_map rcu_trace_lock_map;
+#ifdef CONFIG_TASKS_TRACE_RCU
+extern struct srcu_struct rcu_tasks_trace_srcu_struct;
+#endif // #ifdef CONFIG_TASKS_TRACE_RCU
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
- return lock_is_held(&rcu_trace_lock_map);
+ return srcu_read_lock_held(&rcu_tasks_trace_srcu_struct);
}
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
return 1;
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif // #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t);
+/**
+ * rcu_read_lock_tasks_trace - mark beginning of RCU-trace read-side critical section
+ *
+ * When synchronize_rcu_tasks_trace() is invoked by one task, then that
+ * task is guaranteed to block until all other tasks exit their read-side
+ * critical sections. Similarly, if call_rcu_trace() is invoked on one
+ * task while other tasks are within RCU read-side critical sections,
+ * invocation of the corresponding RCU callback is deferred until after
+ * the all the other tasks exit their critical sections.
+ *
+ * For more details, please see the documentation for
+ * srcu_read_lock_fast(). For a description of how implicit RCU
+ * readers provide the needed ordering for architectures defining the
+ * ARCH_WANTS_NO_INSTR Kconfig option (and thus promising never to trace
+ * code where RCU is not watching), please see the __srcu_read_lock_fast()
+ * (non-kerneldoc) header comment. Otherwise, the smp_mb() below provided
+ * the needed ordering.
+ */
+static inline struct srcu_ctr __percpu *rcu_read_lock_tasks_trace(void)
+{
+ struct srcu_ctr __percpu *ret = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
+
+ rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Provide ordering on noinstr-incomplete architectures.
+ return ret;
+}
+
+/**
+ * rcu_read_unlock_tasks_trace - mark end of RCU-trace read-side critical section
+ * @scp: return value from corresponding rcu_read_lock_tasks_trace().
+ *
+ * Pairs with the preceding call to rcu_read_lock_tasks_trace() that
+ * returned the value passed in via scp.
+ *
+ * For more details, please see the documentation for rcu_read_unlock().
+ * For memory-ordering information, please see the header comment for the
+ * rcu_read_lock_tasks_trace() function.
+ */
+static inline void rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu *scp)
+{
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Provide ordering on noinstr-incomplete architectures.
+ __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
+ srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
+}
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -50,12 +97,15 @@ static inline void rcu_read_lock_trace(void)
{
struct task_struct *t = current;
- WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
- barrier();
- if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
- t->trc_reader_special.b.need_mb)
- smp_mb(); // Pairs with update-side barriers
- rcu_lock_acquire(&rcu_trace_lock_map);
+ rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
+ if (t->trc_reader_nesting++) {
+ // In case we interrupted a Tasks Trace RCU reader.
+ return;
+ }
+ barrier(); // nesting before scp to protect against interrupt handler.
+ t->trc_reader_scp = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Placeholder for more selective ordering
}
/**
@@ -69,26 +119,88 @@ static inline void rcu_read_lock_trace(void)
*/
static inline void rcu_read_unlock_trace(void)
{
- int nesting;
+ struct srcu_ctr __percpu *scp;
struct task_struct *t = current;
- rcu_lock_release(&rcu_trace_lock_map);
- nesting = READ_ONCE(t->trc_reader_nesting) - 1;
- barrier(); // Critical section before disabling.
- // Disable IPI-based setting of .need_qs.
- WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
- if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
- WRITE_ONCE(t->trc_reader_nesting, nesting);
- return; // We assume shallow reader nesting.
+ scp = t->trc_reader_scp;
+ barrier(); // scp before nesting to protect against interrupt handler.
+ if (!--t->trc_reader_nesting) {
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Placeholder for more selective ordering
+ __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
}
- WARN_ON_ONCE(nesting != 0);
- rcu_read_unlock_trace_special(t);
+ srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
}
-void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
-void synchronize_rcu_tasks_trace(void);
-void rcu_barrier_tasks_trace(void);
-struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
+/**
+ * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
+ * @rhp: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a trace rcu-tasks
+ * grace period elapses, in other words after all currently executing
+ * trace rcu-tasks read-side critical sections have completed. These
+ * read-side critical sections are delimited by calls to rcu_read_lock_trace()
+ * and rcu_read_unlock_trace().
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
+{
+ call_srcu(&rcu_tasks_trace_srcu_struct, rhp, func);
+}
+
+/**
+ * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
+ *
+ * Control will return to the caller some time after a trace rcu-tasks
+ * grace period has elapsed, in other words after all currently executing
+ * trace rcu-tasks read-side critical sections have elapsed. These read-side
+ * critical sections are delimited by calls to rcu_read_lock_trace()
+ * and rcu_read_unlock_trace().
+ *
+ * This is a very specialized primitive, intended only for a few uses in
+ * tracing and other situations requiring manipulation of function preambles
+ * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
+ * (yet) intended for heavy use from multiple CPUs.
+ *
+ * See the description of synchronize_rcu() for more detailed information
+ * on memory ordering guarantees.
+ */
+static inline void synchronize_rcu_tasks_trace(void)
+{
+ synchronize_srcu(&rcu_tasks_trace_srcu_struct);
+}
+
+/**
+ * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
+ *
+ * Note that rcu_barrier_tasks_trace() is not obligated to actually wait,
+ * for example, if there are no pending callbacks.
+ */
+static inline void rcu_barrier_tasks_trace(void)
+{
+ srcu_barrier(&rcu_tasks_trace_srcu_struct);
+}
+
+/**
+ * rcu_tasks_trace_expedite_current - Expedite the current Tasks Trace RCU grace period
+ *
+ * Cause the current Tasks Trace RCU grace period to become expedited.
+ * The grace period following the current one might also be expedited.
+ * If there is no current grace period, one might be created. If the
+ * current grace period is currently sleeping, that sleep will complete
+ * before expediting will take effect.
+ */
+static inline void rcu_tasks_trace_expedite_current(void)
+{
+ srcu_expedite_current(&rcu_tasks_trace_srcu_struct);
+}
+
+// Placeholders to enable stepwise transition.
+void __init rcu_tasks_trace_suppress_unused(void);
+
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 80dc023ac2bf..3da377ffb0c2 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -478,9 +478,9 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(true, lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(true, lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags) __cond_acquires(lock);
+ unsigned long *flags) __cond_acquires(true, lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index b0b9be750d93..caff2240bdab 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -359,6 +359,10 @@ typedef void (*regmap_unlock)(void *);
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
+ * @reg_default_cb: Optional callback to return default values for registers
+ * not listed in reg_defaults. This is only used for
+ * REGCACHE_FLAT population; drivers must ensure the readable_reg/
+ * writeable_reg callbacks are defined to handle holes.
*
* @read_flag_mask: Mask to be set in the top bytes of the register when doing
* a read.
@@ -449,6 +453,8 @@ struct regmap_config {
const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
+ int (*reg_default_cb)(struct device *dev, unsigned int reg,
+ unsigned int *def);
enum regcache_type cache_type;
const void *reg_defaults_raw;
unsigned int num_reg_defaults_raw;
@@ -1349,6 +1355,14 @@ static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
}
+static inline int regmap_default_zero_cb(struct device *dev,
+ unsigned int reg,
+ unsigned int *def)
+{
+ *def = 0;
+ return 0;
+}
+
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 978cf593b662..cc6ce709ec86 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -53,6 +53,11 @@ enum regulator_detection_severity {
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
+/* Initialize struct linear_range using voltages, not selectors */
+#define REGULATOR_LINEAR_VRANGE(_offs_uV, _min_uV, _max_uV, _step_uV) \
+ LINEAR_RANGE(_min_uV, ((_min_uV) - (_offs_uV)) / (_step_uV), \
+ ((_max_uV) - (_offs_uV)) / (_step_uV), _step_uV)
+
/**
* struct regulator_ops - regulator operations.
*
@@ -635,6 +640,7 @@ struct regulator_dev {
int ref_cnt;
struct module *owner;
struct device dev;
+ struct device bdev;
struct regulation_constraints *constraints;
struct regulator *supply; /* for tree */
const char *supply_name;
@@ -649,6 +655,7 @@ struct regulator_dev {
struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
+ unsigned int constraints_pending:1;
unsigned int is_switch:1;
/* time when this regulator was disabled last time */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 54701668b3df..006e57fd7ca5 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -53,6 +53,7 @@ enum resctrl_res_level {
RDT_RESOURCE_L2,
RDT_RESOURCE_MBA,
RDT_RESOURCE_SMBA,
+ RDT_RESOURCE_PERF_PKG,
/* Must be the last */
RDT_NUM_RESOURCES,
@@ -131,15 +132,24 @@ enum resctrl_domain_type {
* @list: all instances of this resource
* @id: unique id for this instance
* @type: type of this instance
+ * @rid: resource id for this instance
* @cpu_mask: which CPUs share this resource
*/
struct rdt_domain_hdr {
struct list_head list;
int id;
enum resctrl_domain_type type;
+ enum resctrl_res_level rid;
struct cpumask cpu_mask;
};
+static inline bool domain_header_is_valid(struct rdt_domain_hdr *hdr,
+ enum resctrl_domain_type type,
+ enum resctrl_res_level rid)
+{
+ return !WARN_ON_ONCE(hdr->type != type || hdr->rid != rid);
+}
+
/**
* struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
* @hdr: common header for different domain types
@@ -169,7 +179,7 @@ struct mbm_cntr_cfg {
};
/**
- * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
+ * struct rdt_l3_mon_domain - group of CPUs sharing RDT_RESOURCE_L3 monitoring
* @hdr: common header for different domain types
* @ci_id: cache info id for this domain
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
@@ -183,7 +193,7 @@ struct mbm_cntr_cfg {
* @cntr_cfg: array of assignable counters' configuration (indexed
* by counter ID)
*/
-struct rdt_mon_domain {
+struct rdt_l3_mon_domain {
struct rdt_domain_hdr hdr;
unsigned int ci_id;
unsigned long *rmid_busy_llc;
@@ -261,6 +271,7 @@ enum resctrl_scope {
RESCTRL_L2_CACHE = 2,
RESCTRL_L3_CACHE = 3,
RESCTRL_L3_NODE,
+ RESCTRL_PACKAGE,
};
/**
@@ -284,7 +295,7 @@ enum resctrl_schema_fmt {
* events of monitor groups created via mkdir.
*/
struct resctrl_mon {
- int num_rmid;
+ u32 num_rmid;
unsigned int mbm_cfg_mask;
int num_mbm_cntrs;
bool mbm_cntr_assignable;
@@ -358,10 +369,10 @@ struct resctrl_cpu_defaults {
};
struct resctrl_mon_config_info {
- struct rdt_resource *r;
- struct rdt_mon_domain *d;
- u32 evtid;
- u32 mon_config;
+ struct rdt_resource *r;
+ struct rdt_l3_mon_domain *d;
+ u32 evtid;
+ u32 mon_config;
};
/**
@@ -403,7 +414,8 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
-void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+bool resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
+ unsigned int binary_bits, void *arch_priv);
bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
@@ -498,22 +510,31 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type);
int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
-int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
-void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
void resctrl_online_cpu(unsigned int cpu);
void resctrl_offline_cpu(unsigned int cpu);
+/*
+ * Architecture hook called at beginning of first file system mount attempt.
+ * No locks are held.
+ */
+void resctrl_arch_pre_mount(void);
+
/**
* resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
* for this resource and domain.
* @r: resource that the counter should be read from.
- * @d: domain that the counter should be read from.
+ * @hdr: Header of domain that the counter should be read from.
* @closid: closid that matches the rmid. Depending on the architecture, the
* counter may match traffic of both @closid and @rmid, or @rmid
* only.
* @rmid: rmid of the counter to read.
* @eventid: eventid to read, e.g. L3 occupancy.
+ * @arch_priv: Architecture private data for this event.
+ * The @arch_priv provided by the architecture via
+ * resctrl_enable_mon_event().
* @val: result of the counter read in bytes.
* @arch_mon_ctx: An architecture specific value from
* resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
@@ -529,9 +550,9 @@ void resctrl_offline_cpu(unsigned int cpu);
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
- u64 *val, void *arch_mon_ctx);
+ void *arch_priv, u64 *val, void *arch_mon_ctx);
/**
* resctrl_arch_rmid_read_context_check() - warn about invalid contexts
@@ -576,7 +597,7 @@ struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid,
enum resctrl_event_id eventid);
@@ -589,7 +610,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d);
/**
* resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
@@ -615,7 +636,7 @@ void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
*
* This can be called from any CPU.
*/
-void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
enum resctrl_event_id evtid, u32 rmid, u32 closid,
u32 cntr_id, bool assign);
@@ -638,7 +659,7 @@ void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid, int cntr_id,
enum resctrl_event_id eventid, u64 *val);
@@ -653,7 +674,7 @@ int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid, int cntr_id,
enum resctrl_event_id eventid);
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index acfe07860b34..a5f56faa18d2 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -50,6 +50,17 @@ enum resctrl_event_id {
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+ /* Intel Telemetry Events */
+ PMT_EVENT_ENERGY,
+ PMT_EVENT_ACTIVITY,
+ PMT_EVENT_STALLS_LLC_HIT,
+ PMT_EVENT_C1_RES,
+ PMT_EVENT_UNHALTED_CORE_CYCLES,
+ PMT_EVENT_STALLS_LLC_MISS,
+ PMT_EVENT_AUTO_C6_RES,
+ PMT_EVENT_UNHALTED_REF_CYCLES,
+ PMT_EVENT_UOPS_RETIRED,
+
/* Must be the last */
QOS_NUM_EVENTS,
};
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 67d2bf579942..9b262109726d 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -6,6 +6,7 @@
#define __LINUX_RESTART_BLOCK_H
#include <linux/compiler.h>
+#include <linux/time64.h>
#include <linux/types.h>
struct __kernel_timespec;
@@ -50,8 +51,7 @@ struct restart_block {
struct pollfd __user *ufds;
int nfds;
int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
+ struct timespec64 end_time;
} poll;
};
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 08e664b21f5a..133ccb39137a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -245,16 +245,17 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires_shared(RCU)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
@@ -325,6 +326,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
+ __acquires(__bitlock(0, bkt))
{
unsigned long flags;
@@ -337,6 +339,7 @@ static inline unsigned long rht_lock(struct bucket_table *tbl,
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
+ __acquires(__bitlock(0, bucket))
{
unsigned long flags;
@@ -349,6 +352,7 @@ static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
@@ -424,13 +428,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(0, bkt));
local_irq_restore(flags);
}
@@ -612,6 +617,7 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -666,6 +672,7 @@ restart:
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -676,6 +683,7 @@ static __always_inline void *rhashtable_lookup(
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
@@ -727,6 +735,7 @@ static __always_inline void *rhashtable_lookup_fast(
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -737,6 +746,7 @@ static __always_inline struct rhlist_head *rhltable_lookup(
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index daa92a58585d..8dc0871e5f00 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -92,6 +92,7 @@ struct anon_vma_chain {
};
enum ttu_flags {
+ TTU_USE_SHARED_ZEROPAGE = 0x2, /* for unused pages of large folios */
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
@@ -104,75 +105,8 @@ enum ttu_flags {
};
#ifdef CONFIG_MMU
-static inline void get_anon_vma(struct anon_vma *anon_vma)
-{
- atomic_inc(&anon_vma->refcount);
-}
-
-void __put_anon_vma(struct anon_vma *anon_vma);
-
-static inline void put_anon_vma(struct anon_vma *anon_vma)
-{
- if (atomic_dec_and_test(&anon_vma->refcount))
- __put_anon_vma(anon_vma);
-}
-
-static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
-{
- down_write(&anon_vma->root->rwsem);
-}
-
-static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
-{
- return down_write_trylock(&anon_vma->root->rwsem);
-}
-
-static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
-{
- up_write(&anon_vma->root->rwsem);
-}
-
-static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
-{
- down_read(&anon_vma->root->rwsem);
-}
-
-static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
-{
- return down_read_trylock(&anon_vma->root->rwsem);
-}
-
-static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
-{
- up_read(&anon_vma->root->rwsem);
-}
-
-/*
- * anon_vma helper functions.
- */
void anon_vma_init(void); /* create anon_vma_cachep */
-int __anon_vma_prepare(struct vm_area_struct *);
-void unlink_anon_vmas(struct vm_area_struct *);
-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
-
-static inline int anon_vma_prepare(struct vm_area_struct *vma)
-{
- if (likely(vma->anon_vma))
- return 0;
-
- return __anon_vma_prepare(vma);
-}
-
-static inline void anon_vma_merge(struct vm_area_struct *vma,
- struct vm_area_struct *next)
-{
- VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
- unlink_anon_vmas(next);
-}
-
-struct anon_vma *folio_get_anon_vma(const struct folio *folio);
#ifdef CONFIG_MM_ID
static __always_inline void folio_lock_large_mapcount(struct folio *folio)
@@ -1000,12 +934,8 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-enum rmp_flags {
- RMP_LOCKED = 1 << 0,
- RMP_USE_SHARED_ZEROPAGE = 1 << 1,
-};
-
-void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
+void remove_migration_ptes(struct folio *src, struct folio *dst,
+ enum ttu_flags flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index 2266f4dc77b6..7a01a0760405 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -163,4 +163,15 @@ void rseq_syscall(struct pt_regs *regs);
static inline void rseq_syscall(struct pt_regs *regs) { }
#endif /* !CONFIG_DEBUG_RSEQ */
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+void rseq_syscall_enter_work(long syscall);
+int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3);
+#else /* CONFIG_RSEQ_SLICE_EXTENSION */
+static inline void rseq_syscall_enter_work(long syscall) { }
+static inline int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3)
+{
+ return -ENOTSUPP;
+}
+#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
+
#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index a36b472627de..cbc4a791618b 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -15,6 +15,11 @@ struct rseq_stats {
unsigned long cs;
unsigned long clear;
unsigned long fixup;
+ unsigned long s_granted;
+ unsigned long s_expired;
+ unsigned long s_revoked;
+ unsigned long s_yielded;
+ unsigned long s_aborted;
};
DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
@@ -37,6 +42,7 @@ DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
#ifdef CONFIG_RSEQ
#include <linux/jump_label.h>
#include <linux/rseq.h>
+#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/tracepoint-defs.h>
@@ -75,6 +81,147 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
#define rseq_inline __always_inline
#endif
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+DECLARE_STATIC_KEY_TRUE(rseq_slice_extension_key);
+
+static __always_inline bool rseq_slice_extension_enabled(void)
+{
+ return static_branch_likely(&rseq_slice_extension_key);
+}
+
+extern unsigned int rseq_slice_ext_nsecs;
+bool __rseq_arm_slice_extension_timer(void);
+
+static __always_inline bool rseq_arm_slice_extension_timer(void)
+{
+ if (!rseq_slice_extension_enabled())
+ return false;
+
+ if (likely(!current->rseq.slice.state.granted))
+ return false;
+
+ return __rseq_arm_slice_extension_timer();
+}
+
+static __always_inline void rseq_slice_clear_grant(struct task_struct *t)
+{
+ if (IS_ENABLED(CONFIG_RSEQ_STATS) && t->rseq.slice.state.granted)
+ rseq_stat_inc(rseq_stats.s_revoked);
+ t->rseq.slice.state.granted = false;
+}
+
+static __always_inline bool rseq_grant_slice_extension(bool work_pending)
+{
+ struct task_struct *curr = current;
+ struct rseq_slice_ctrl usr_ctrl;
+ union rseq_slice_state state;
+ struct rseq __user *rseq;
+
+ if (!rseq_slice_extension_enabled())
+ return false;
+
+ /* If not enabled or not a return from interrupt, nothing to do. */
+ state = curr->rseq.slice.state;
+ state.enabled &= curr->rseq.event.user_irq;
+ if (likely(!state.state))
+ return false;
+
+ rseq = curr->rseq.usrptr;
+ scoped_user_rw_access(rseq, efault) {
+
+ /*
+ * Quick check conditions where a grant is not possible or
+ * needs to be revoked.
+ *
+ * 1) Any TIF bit which needs to do extra work aside of
+ * rescheduling prevents a grant.
+ *
+ * 2) A previous rescheduling request resulted in a slice
+ * extension grant.
+ */
+ if (unlikely(work_pending || state.granted)) {
+ /* Clear user control unconditionally. No point for checking */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ rseq_slice_clear_grant(curr);
+ return false;
+ }
+
+ unsafe_get_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
+ if (likely(!(usr_ctrl.request)))
+ return false;
+
+ /* Grant the slice extention */
+ usr_ctrl.request = 0;
+ usr_ctrl.granted = 1;
+ unsafe_put_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
+ }
+
+ rseq_stat_inc(rseq_stats.s_granted);
+
+ curr->rseq.slice.state.granted = true;
+ /* Store expiry time for arming the timer on the way out */
+ curr->rseq.slice.expires = data_race(rseq_slice_ext_nsecs) + ktime_get_mono_fast_ns();
+ /*
+ * This is racy against a remote CPU setting TIF_NEED_RESCHED in
+ * several ways:
+ *
+ * 1)
+ * CPU0 CPU1
+ * clear_tsk()
+ * set_tsk()
+ * clear_preempt()
+ * Raise scheduler IPI on CPU0
+ * --> IPI
+ * fold_need_resched() -> Folds correctly
+ * 2)
+ * CPU0 CPU1
+ * set_tsk()
+ * clear_tsk()
+ * clear_preempt()
+ * Raise scheduler IPI on CPU0
+ * --> IPI
+ * fold_need_resched() <- NOOP as TIF_NEED_RESCHED is false
+ *
+ * #1 is not any different from a regular remote reschedule as it
+ * sets the previously not set bit and then raises the IPI which
+ * folds it into the preempt counter
+ *
+ * #2 is obviously incorrect from a scheduler POV, but it's not
+ * differently incorrect than the code below clearing the
+ * reschedule request with the safety net of the timer.
+ *
+ * The important part is that the clearing is protected against the
+ * scheduler IPI and also against any other interrupt which might
+ * end up waking up a task and setting the bits in the middle of
+ * the operation:
+ *
+ * clear_tsk()
+ * ---> Interrupt
+ * wakeup_on_this_cpu()
+ * set_tsk()
+ * set_preempt()
+ * clear_preempt()
+ *
+ * which would be inconsistent state.
+ */
+ scoped_guard(irq) {
+ clear_tsk_need_resched(curr);
+ clear_preempt_need_resched();
+ }
+ return true;
+
+efault:
+ force_sig(SIGSEGV);
+ return false;
+}
+
+#else /* CONFIG_RSEQ_SLICE_EXTENSION */
+static inline bool rseq_slice_extension_enabled(void) { return false; }
+static inline bool rseq_arm_slice_extension_timer(void) { return false; }
+static inline void rseq_slice_clear_grant(struct task_struct *t) { }
+static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
+
bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
bool rseq_debug_validate_ids(struct task_struct *t);
@@ -359,8 +506,15 @@ bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
if (csaddr)
unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
+
+ /* Open coded, so it's in the same user access region */
+ if (rseq_slice_extension_enabled()) {
+ /* Unconditionally clear it, no point in conditionals */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ }
}
+ rseq_slice_clear_grant(t);
/* Cache the new values */
t->rseq.ids.cpu_cid = ids->cpu_cid;
rseq_stat_inc(rseq_stats.ids);
@@ -456,8 +610,17 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
*/
u64 csaddr;
- if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
- return false;
+ scoped_user_rw_access(rseq, efault) {
+ unsafe_get_user(csaddr, &rseq->rseq_cs, efault);
+
+ /* Open coded, so it's in the same user access region */
+ if (rseq_slice_extension_enabled()) {
+ /* Unconditionally clear it, no point in conditionals */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ }
+ }
+
+ rseq_slice_clear_grant(t);
if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
@@ -473,6 +636,8 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
u32 node_id = cpu_to_node(ids.cpu_id);
return rseq_update_usr(t, regs, &ids, node_id);
+efault:
+ return false;
}
static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)
@@ -527,17 +692,19 @@ static __always_inline void clear_tif_rseq(void) { }
static __always_inline bool
rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
{
- if (likely(!test_tif_rseq(ti_work)))
- return false;
-
- if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
- current->rseq.event.slowpath = true;
- set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
- return true;
+ if (unlikely(test_tif_rseq(ti_work))) {
+ if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
+ current->rseq.event.slowpath = true;
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ return true;
+ }
+ clear_tif_rseq();
}
-
- clear_tif_rseq();
- return false;
+ /*
+ * Arm the slice extension timer if nothing to do anymore and the
+ * task really goes out to user space.
+ */
+ return rseq_arm_slice_extension_timer();
}
#else /* CONFIG_GENERIC_ENTRY */
@@ -611,6 +778,7 @@ static inline void rseq_syscall_exit_to_user_mode(void) { }
static inline void rseq_irqentry_exit_to_user_mode(void) { }
static inline void rseq_exit_to_user_mode_legacy(void) { }
static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
+static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
#endif /* !CONFIG_RSEQ */
#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
index ef0811379c54..da5fa6f40294 100644
--- a/include/linux/rseq_types.h
+++ b/include/linux/rseq_types.h
@@ -73,12 +73,39 @@ struct rseq_ids {
};
/**
+ * union rseq_slice_state - Status information for rseq time slice extension
+ * @state: Compound to access the overall state
+ * @enabled: Time slice extension is enabled for the task
+ * @granted: Time slice extension was granted to the task
+ */
+union rseq_slice_state {
+ u16 state;
+ struct {
+ u8 enabled;
+ u8 granted;
+ };
+};
+
+/**
+ * struct rseq_slice - Status information for rseq time slice extension
+ * @state: Time slice extension state
+ * @expires: The time when a grant expires
+ * @yielded: Indicator for rseq_slice_yield()
+ */
+struct rseq_slice {
+ union rseq_slice_state state;
+ u64 expires;
+ u8 yielded;
+};
+
+/**
* struct rseq_data - Storage for all rseq related data
* @usrptr: Pointer to the registered user space RSEQ memory
* @len: Length of the RSEQ region
- * @sig: Signature of critial section abort IPs
+ * @sig: Signature of critical section abort IPs
* @event: Storage for event management
* @ids: Storage for cached CPU ID and MM CID
+ * @slice: Storage for time slice extension data
*/
struct rseq_data {
struct rseq __user *usrptr;
@@ -86,6 +113,9 @@ struct rseq_data {
u32 sig;
struct rseq_event event;
struct rseq_ids ids;
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+ struct rseq_slice slice;
+#endif
};
#else /* CONFIG_RSEQ */
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 92fd467547e7..58774eb3aecf 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -10,6 +10,10 @@
#define MAX_DA_NAME_LEN 32
#define MAX_DA_RETRY_RACING_EVENTS 3
+#define RV_MON_GLOBAL 0
+#define RV_MON_PER_CPU 1
+#define RV_MON_PER_TASK 2
+
#ifdef CONFIG_RV
#include <linux/array_size.h>
#include <linux/bitops.h>
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 5b87c6f4a243..3390d21c95dd 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -29,16 +29,16 @@ do { \
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
+ extern void do_raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
extern int do_raw_read_trylock(rwlock_t *lock);
- extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
+ extern void do_raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
+# define do_raw_read_lock(rwlock) do {__acquire_shared(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
-# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release_shared(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
@@ -49,8 +49,8 @@ do { \
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+#define read_trylock(lock) _raw_read_trylock(lock)
+#define write_trylock(lock) _raw_write_trylock(lock)
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
@@ -112,12 +112,7 @@ do { \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define write_trylock_irqsave(lock, flags) _raw_write_trylock_irqsave(lock, &(flags))
#ifdef arch_rwlock_is_contended
#define rwlock_is_contended(lock) \
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 31d3d1116323..61a852609eab 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -15,24 +15,24 @@
* Released under the General Public License (GPL).
*/
-void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
-void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-int __lockfunc _raw_read_trylock(rwlock_t *lock);
-int __lockfunc _raw_write_trylock(rwlock_t *lock);
-void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock) __cond_acquires_shared(true, lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock) __cond_acquires(true, lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
@@ -137,6 +137,16 @@ static inline int __raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ local_irq_save(*flags);
+ if (_raw_write_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -145,6 +155,7 @@ static inline int __raw_write_trylock(rwlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline void __raw_read_lock(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -152,6 +163,7 @@ static inline void __raw_read_lock(rwlock_t *lock)
}
static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags;
@@ -163,6 +175,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_read_lock_irq(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -171,6 +184,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
}
static inline void __raw_read_lock_bh(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -178,6 +192,7 @@ static inline void __raw_read_lock_bh(rwlock_t *lock)
}
static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -189,6 +204,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_write_lock_irq(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -197,6 +213,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
}
static inline void __raw_write_lock_bh(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -204,6 +221,7 @@ static inline void __raw_write_lock_bh(rwlock_t *lock)
}
static inline void __raw_write_lock(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -211,6 +229,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
}
static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -220,6 +239,7 @@ static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -227,6 +247,7 @@ static inline void __raw_write_unlock(rwlock_t *lock)
}
static inline void __raw_read_unlock(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -235,6 +256,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -243,6 +265,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
}
static inline void __raw_read_unlock_irq(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -251,6 +274,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
}
static inline void __raw_read_unlock_bh(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -259,6 +283,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -267,6 +292,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
}
static inline void __raw_write_unlock_irq(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -275,6 +301,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
}
static inline void __raw_write_unlock_bh(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 7d81fc6918ee..5353abbfdc0b 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -24,26 +24,29 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
-extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
-extern int rt_read_trylock(rwlock_t *rwlock);
-extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock) __cond_acquires_shared(true, rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases_shared(rwlock);
extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
-extern int rt_write_trylock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock) __cond_acquires(true, rwlock);
extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
static __always_inline void read_lock_bh(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
local_bh_disable();
rt_read_lock(rwlock);
}
static __always_inline void read_lock_irq(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
@@ -55,37 +58,43 @@ static __always_inline void read_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+#define read_trylock(lock) rt_read_trylock(lock)
static __always_inline void read_unlock(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
local_bh_enable();
}
static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void write_lock(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+ __acquires(rwlock)
{
rt_write_lock_nested(rwlock, subclass);
}
@@ -94,12 +103,14 @@ static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
#endif
static __always_inline void write_lock_bh(rwlock_t *rwlock)
+ __acquires(rwlock)
{
local_bh_disable();
rt_write_lock(rwlock);
}
static __always_inline void write_lock_irq(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
@@ -111,36 +122,38 @@ static __always_inline void write_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+#define write_trylock(lock) rt_write_trylock(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = write_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+ __cond_acquires(true, rwlock)
+{
+ *flags = 0;
+ return rt_write_trylock(rwlock);
+}
+#define write_trylock_irqsave(lock, flags) _write_trylock_irqsave(lock, &(flags))
static __always_inline void write_unlock(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
local_bh_enable();
}
static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 1948442e7750..d5e7316401e7 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -22,7 +22,7 @@
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
-typedef struct {
+context_lock_struct(rwlock) {
arch_rwlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -31,7 +31,8 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
@@ -54,13 +55,14 @@ typedef struct {
#include <linux/rwbase_rt.h>
-typedef struct {
+context_lock_struct(rwlock) {
struct rwbase_rt rwbase;
atomic_t readers;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f1aaf676a874..9bf1d93d3d7b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,7 +45,7 @@
* reduce the chance that they will share the same cacheline causing
* cacheline bouncing problem.
*/
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
atomic_long_t count;
/*
* Write owner or one of the read owners as well flags regarding
@@ -76,11 +76,13 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
}
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
}
@@ -148,7 +150,7 @@ extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
#include <linux/rwbase_rt.h>
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
struct rwbase_rt rwbase;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -180,11 +182,13 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
}
static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
@@ -202,6 +206,7 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
*/
static inline void rwsem_assert_held(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held(sem);
@@ -210,6 +215,7 @@ static inline void rwsem_assert_held(const struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held_write(sem);
@@ -220,48 +226,66 @@ static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
/*
* lock for reading
*/
-extern void down_read(struct rw_semaphore *sem);
-extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
-extern int __must_check down_read_killable(struct rw_semaphore *sem);
+extern void down_read(struct rw_semaphore *sem) __acquires_shared(sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-extern int down_read_trylock(struct rw_semaphore *sem);
+extern int down_read_trylock(struct rw_semaphore *sem) __cond_acquires_shared(true, sem);
/*
* lock for writing
*/
-extern void down_write(struct rw_semaphore *sem);
-extern int __must_check down_write_killable(struct rw_semaphore *sem);
+extern void down_write(struct rw_semaphore *sem) __acquires(sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem) __cond_acquires(0, sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-extern int down_write_trylock(struct rw_semaphore *sem);
+extern int down_write_trylock(struct rw_semaphore *sem) __cond_acquires(true, sem);
/*
* release a read lock
*/
-extern void up_read(struct rw_semaphore *sem);
+extern void up_read(struct rw_semaphore *sem) __releases_shared(sem);
/*
* release a write lock
*/
-extern void up_write(struct rw_semaphore *sem);
-
-DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
-DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
-
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
-DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
+extern void up_write(struct rw_semaphore *sem) __releases(sem);
+
+DEFINE_LOCK_GUARD_1(rwsem_read, struct rw_semaphore, down_read(_T->lock), up_read(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _try, down_read_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _intr, down_read_interruptible(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_try, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_intr, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_intr, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_write, struct rw_semaphore, down_write(_T->lock), up_write(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _try, down_write_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _kill, down_write_killable(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T)
/*
* downgrade write lock to read lock
*/
-extern void downgrade_write(struct rw_semaphore *sem);
+extern void downgrade_write(struct rw_semaphore *sem) __releases(sem) __acquires_shared(sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -277,11 +301,11 @@ extern void downgrade_write(struct rw_semaphore *sem);
* lockdep_set_class() at lock initialization time.
* See Documentation/locking/lockdep-design.rst for more details.)
*/
-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
-extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
-extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+extern void down_read_nested(struct rw_semaphore *sem, int subclass) __acquires_shared(sem);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires_shared(0, sem);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass) __acquires(sem);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires(0, sem);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock) __acquires(sem);
# define down_write_nest_lock(sem, nest_lock) \
do { \
@@ -295,8 +319,8 @@ do { \
* [ This API should be avoided as much as possible - the
* proper abstraction for this case is completions. ]
*/
-extern void down_read_non_owner(struct rw_semaphore *sem);
-extern void up_read_non_owner(struct rw_semaphore *sem);
+extern void down_read_non_owner(struct rw_semaphore *sem) __acquires_shared(sem);
+extern void up_read_non_owner(struct rw_semaphore *sem) __releases_shared(sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5f00b5ed0f3b..074ad4ef3d81 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -49,6 +49,7 @@
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
+#include <linux/time64.h>
#ifndef COMPILE_OFFSETS
#include <generated/rq-offsets.h>
#endif
@@ -86,6 +87,7 @@ struct signal_struct;
struct task_delay_info;
struct task_group;
struct task_struct;
+struct timespec64;
struct user_event_mm;
#include <linux/sched/ext.h>
@@ -435,6 +437,9 @@ struct sched_info {
/* When were we last queued to run? */
unsigned long long last_queued;
+ /* Timestamp of max time spent waiting on a runqueue: */
+ struct timespec64 max_run_delay_ts;
+
#endif /* CONFIG_SCHED_INFO */
};
@@ -586,15 +591,10 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
- union {
- /*
- * When !@on_rq this field is vlag.
- * When cfs_rq->curr == se (which implies @on_rq)
- * this field is vprot. See protect_slice().
- */
- s64 vlag;
- u64 vprot;
- };
+ /* Approximated virtual lag: */
+ s64 vlag;
+ /* 'Protected' deadline, to give out minimum quantums: */
+ u64 vprot;
u64 slice;
u64 nr_migrations;
@@ -945,11 +945,7 @@ struct task_struct {
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
- int trc_ipi_to_cpu;
- union rcu_special trc_reader_special;
- struct list_head trc_holdout_list;
- struct list_head trc_blkd_node;
- int trc_blkd_cpu;
+ struct srcu_ctr __percpu *trc_reader_scp;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info;
@@ -960,7 +956,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
@@ -1190,6 +1185,7 @@ struct task_struct {
#ifdef CONFIG_IO_URING
struct io_uring_task *io_uring;
+ struct io_restriction *io_uring_restrict;
#endif
/* Namespaces: */
@@ -1419,6 +1415,10 @@ struct task_struct {
struct page_frag task_frag;
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+ struct lazy_mmu_state lazy_mmu_state;
+#endif
+
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
@@ -1702,6 +1702,47 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+/**
+ * __task_lazy_mmu_mode_active() - Test the lazy MMU mode state for a task.
+ * @tsk: The task to check.
+ *
+ * Test whether @tsk has its lazy MMU mode state set to active (i.e. enabled
+ * and not paused).
+ *
+ * This function only considers the state saved in task_struct; to test whether
+ * current actually is in lazy MMU mode, is_lazy_mmu_mode_active() should be
+ * used instead.
+ *
+ * This function is intended for architectures that implement the lazy MMU
+ * mode; it must not be called from generic code.
+ */
+static inline bool __task_lazy_mmu_mode_active(struct task_struct *tsk)
+{
+ struct lazy_mmu_state *state = &tsk->lazy_mmu_state;
+
+ return state->enable_count > 0 && state->pause_count == 0;
+}
+
+/**
+ * is_lazy_mmu_mode_active() - Test whether we are currently in lazy MMU mode.
+ *
+ * Test whether the current context is in lazy MMU mode. This is true if both:
+ * 1. We are not in interrupt context
+ * 2. Lazy MMU mode is active for the current task
+ *
+ * This function is intended for architectures that implement the lazy MMU
+ * mode; it must not be called from generic code.
+ */
+static inline bool is_lazy_mmu_mode_active(void)
+{
+ if (in_interrupt())
+ return false;
+
+ return __task_lazy_mmu_mode_active(current);
+}
+#endif
+
extern struct pid *cad_pid;
/*
@@ -2098,9 +2139,9 @@ static inline int _cond_resched(void)
_cond_resched(); \
})
-extern int __cond_resched_lock(spinlock_t *lock);
-extern int __cond_resched_rwlock_read(rwlock_t *lock);
-extern int __cond_resched_rwlock_write(rwlock_t *lock);
+extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 5f8fd5b24a2e..e90efaf6d26e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_SCHED_CPUTIME_H
#define _LINUX_SCHED_CPUTIME_H
+#include <linux/static_call_types.h>
#include <linux/sched/signal.h>
/*
@@ -180,4 +181,21 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
extern unsigned long long
task_sched_runtime(struct task_struct *task);
+#ifdef CONFIG_PARAVIRT
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+#endif
+#endif
+
#endif /* _LINUX_SCHED_CPUTIME_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index d8501f4709b5..dc3975ff1b2e 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,13 +2,21 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
-#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
enum hk_type {
+ /* Inverse of boot-time isolcpus= argument */
+ HK_TYPE_DOMAIN_BOOT,
+ /*
+ * Same as HK_TYPE_DOMAIN_BOOT but also includes the
+ * inverse of cpuset isolated partitions. As such it
+ * is always a subset of HK_TYPE_DOMAIN_BOOT.
+ */
HK_TYPE_DOMAIN,
+ /* Inverse of boot-time isolcpus=managed_irq argument */
HK_TYPE_MANAGED_IRQ,
+ /* Inverse of boot-time nohz_full= or isolcpus=nohz arguments */
HK_TYPE_KERNEL_NOISE,
HK_TYPE_MAX,
@@ -31,6 +39,7 @@ extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
extern bool housekeeping_enabled(enum hk_type type);
extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
+extern int housekeeping_update(struct cpumask *isol_mask);
extern void __init housekeeping_init(void);
#else
@@ -58,6 +67,7 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
return true;
}
+static inline int housekeeping_update(struct cpumask *isol_mask) { return 0; }
static inline void housekeeping_init(void) { }
#endif /* CONFIG_CPU_ISOLATION */
@@ -72,9 +82,7 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
- return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
- cpuset_cpu_is_isolated(cpu);
+ return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 7d6449982822..a22248aebcf9 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -737,21 +737,13 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
- unsigned long *flags);
-
-static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
- unsigned long *flags)
-{
- struct sighand_struct *ret;
-
- ret = __lock_task_sighand(task, flags);
- (void)__cond_lock(&task->sighand->siglock, ret);
- return ret;
-}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
+ unsigned long *flags)
+ __acquires(&task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
+ __releases(&task->sighand->siglock)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 525aa2a632b2..41ed884cffc9 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -214,15 +214,19 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
+ __acquires(&p->alloc_lock)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
+ __releases(&p->alloc_lock)
{
spin_unlock(&p->alloc_lock);
}
-DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+DEFINE_LOCK_GUARD_1(task_lock, struct task_struct, task_lock(_T->lock), task_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(task_lock, __acquires(&_T->alloc_lock), __releases(&(*(struct task_struct **)_T)->alloc_lock))
+#define class_task_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_lock, _T)
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 0f28b4623ad4..765bbc3d54be 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -66,6 +66,7 @@ extern void wake_up_q(struct wake_q_head *head);
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
static inline
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock(lock);
@@ -77,6 +78,7 @@ void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irq(lock);
@@ -89,6 +91,7 @@ void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irqrestore(lock, flags);
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
index 27bd372cbfb1..2407d7693b6b 100644
--- a/include/linux/scmi_imx_protocol.h
+++ b/include/linux/scmi_imx_protocol.h
@@ -59,6 +59,8 @@ struct scmi_imx_misc_proto_ops {
u32 *num, u32 *val);
int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
u32 ctrl_id, u32 evt_id, u32 flags);
+ int (*misc_syslog)(const struct scmi_protocol_handle *ph, u16 *size,
+ void *array);
};
/* See LMM_ATTRIBUTES in imx95.rst */
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 1690706206e8..c022403c599a 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -151,6 +151,4 @@ static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
}
#endif
-extern struct screen_info screen_info;
-
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index d6ebf0596510..2fb266ea69fa 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -181,7 +181,6 @@ int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
-__printf(2, 0)
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 221123660e71..5a40252b8334 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -14,6 +14,7 @@
*/
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
* Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
return read_seqcount_begin(&sl->seqcount);
}
@@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
* Return: true if a read section retry is required, else false
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ __releases_shared(sl) __no_context_analysis
{
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
* _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl)
* critical section of given seqlock_t.
*/
static inline void write_sequnlock(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
@@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl)
* other write side sections, can be invoked from softirq contexts.
*/
static inline void write_seqlock_bh(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
* write_seqlock_bh().
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
@@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
* other write sections, can be invoked from hardirq contexts.
*/
static inline void write_seqlock_irq(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl)
* seqlock_t write side section opened with write_seqlock_irq().
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
unsigned long flags;
@@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
*/
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
@@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
* The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock(&sl->lock);
}
@@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock(&sl->lock);
}
@@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
* from softirq contexts.
*/
static inline void read_seqlock_excl_bh(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
}
@@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_bh(&sl->lock);
}
@@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl)
* hardirq context.
*/
static inline void read_seqlock_excl_irq(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
}
@@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
unsigned long flags;
@@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
*/
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irqrestore(&sl->lock, flags);
}
@@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
* parameter of the next read_seqbegin_or_lock() iteration.
*/
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
@@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
* Return: true if a read section retry is required, false otherwise
*/
static inline int need_seqretry(seqlock_t *lock, int seq)
+ __releases_shared(lock) __no_context_analysis
{
return !(seq & 1) && read_seqretry(lock, seq);
}
@@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
* with read_seqbegin_or_lock() and validated by need_seqretry().
*/
static inline void done_seqretry(seqlock_t *lock, int seq)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl(lock);
@@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
*/
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags = 0;
@@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
*/
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
@@ -1225,6 +1249,7 @@ struct ss_tmp {
};
static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+ __no_context_analysis
{
if (sst->lock)
spin_unlock(sst->lock);
@@ -1254,6 +1279,7 @@ extern void __scoped_seqlock_bug(void);
static __always_inline void
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+ __no_context_analysis
{
switch (sst->state) {
case ss_done:
@@ -1296,22 +1322,31 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
}
}
+/*
+ * Context analysis no-op helper to release seqlock at the end of the for-scope;
+ * the alias analysis of the compiler will recognize that the pointer @s is an
+ * alias to @_seqlock passed to read_seqbegin(_seqlock) below.
+ */
+static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s)
+ __releases_shared(*((seqlock_t **)s)) __no_context_analysis {}
+
#define __scoped_seqlock_read(_seqlock, _target, _s) \
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
- { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }, \
+ *__UNIQUE_ID(ctx) __cleanup(__scoped_seqlock_cleanup_ctx) =\
+ (struct ss_tmp *)_seqlock; \
_s.state != ss_done; \
__scoped_seqlock_next(&_s, _seqlock, _target))
/**
- * scoped_seqlock_read (lock, ss_state) - execute the read side critical
- * section without manual sequence
- * counter handling or calls to other
- * helpers
- * @lock: pointer to seqlock_t protecting the data
- * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
- * the type of critical read section
+ * scoped_seqlock_read() - execute the read-side critical section
+ * without manual sequence counter handling
+ * or calls to other helpers
+ * @_seqlock: pointer to seqlock_t protecting the data
+ * @_target: an enum ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless}
+ * indicating the type of critical read section
*
- * Example:
+ * Example::
*
* scoped_seqlock_read (&lock, ss_lock) {
* // read-side critical section
@@ -1323,4 +1358,8 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
#define scoped_seqlock_read(_seqlock, _target) \
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T))
+#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T)
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
index dfdf43e3fa3d..2d5d793ef660 100644
--- a/include/linux/seqlock_types.h
+++ b/include/linux/seqlock_types.h
@@ -81,13 +81,14 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
-typedef struct {
+context_lock_struct(seqlock) {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
-} seqlock_t;
+};
+typedef struct seqlock seqlock_t;
#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 112e48970338..daa4e4944ce3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2813,6 +2813,7 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
skb->data -= len;
+ DEBUG_NET_WARN_ON_ONCE(skb->data < skb->head);
skb->len += len;
return skb->data;
}
@@ -4775,7 +4776,7 @@ static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
}
}
-static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
+static __always_inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
@@ -5001,6 +5002,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_INET_PSP)
SKB_EXT_PSP,
#endif
+#if IS_ENABLED(CONFIG_CAN)
+ SKB_EXT_CAN,
+#endif
SKB_EXT_NUM, /* must be last */
};
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 9e49372ef1a0..05c8486bafac 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -15,7 +15,7 @@
*
* Takes an additional reference on the paged fragment @frag.
*/
-static inline void __skb_frag_ref(skb_frag_t *frag)
+static __always_inline void __skb_frag_ref(skb_frag_t *frag)
{
get_netmem(skb_frag_netmem(frag));
}
@@ -27,14 +27,14 @@ static inline void __skb_frag_ref(skb_frag_t *frag)
*
* Takes an additional reference on the @f'th paged fragment of @skb.
*/
-static inline void skb_frag_ref(struct sk_buff *skb, int f)
+static __always_inline void skb_frag_ref(struct sk_buff *skb, int f)
{
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
bool napi_pp_put_page(netmem_ref netmem);
-static inline void skb_page_unref(netmem_ref netmem, bool recycle)
+static __always_inline void skb_page_unref(netmem_ref netmem, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
if (recycle && napi_pp_put_page(netmem))
@@ -51,7 +51,7 @@ static inline void skb_page_unref(netmem_ref netmem, bool recycle)
* Releases a reference on the paged fragment @frag
* or recycles the page via the page_pool API.
*/
-static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
+static __always_inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
skb_page_unref(skb_frag_netmem(frag), recycle);
}
@@ -63,7 +63,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
*
* Releases a reference on the @f'th paged fragment of @skb.
*/
-static inline void skb_frag_unref(struct sk_buff *skb, int f)
+static __always_inline void skb_frag_unref(struct sk_buff *skb, int f)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 49847888c287..829b281d6c9c 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -97,6 +97,8 @@ struct sk_psock {
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
spinlock_t ingress_lock;
+ /** @msg_tot_len: Total bytes queued in ingress_msg list. */
+ u32 msg_tot_len;
unsigned long state;
struct list_head link;
spinlock_t link_lock;
@@ -141,6 +143,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
+int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags, int *copied_from_self);
bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
@@ -319,6 +323,27 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline u32 sk_psock_get_msg_len_nolock(struct sk_psock *psock)
+{
+ /* Used by ioctl to read msg_tot_len only; lock-free for performance */
+ return READ_ONCE(psock->msg_tot_len);
+}
+
+static inline void sk_psock_msg_len_add_locked(struct sk_psock *psock, int diff)
+{
+ /* Use WRITE_ONCE to ensure correct read in sk_psock_get_msg_len_nolock().
+ * ingress_lock should be held to prevent concurrent updates to msg_tot_len
+ */
+ WRITE_ONCE(psock->msg_tot_len, psock->msg_tot_len + diff);
+}
+
+static inline void sk_psock_msg_len_add(struct sk_psock *psock, int diff)
+{
+ spin_lock_bh(&psock->ingress_lock);
+ sk_psock_msg_len_add_locked(psock, diff);
+ spin_unlock_bh(&psock->ingress_lock);
+}
+
static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
@@ -327,6 +352,7 @@ static inline bool sk_psock_queue_msg(struct sk_psock *psock,
spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
list_add_tail(&msg->list, &psock->ingress_msg);
+ sk_psock_msg_len_add_locked(psock, msg->sg.size);
ret = true;
} else {
sk_msg_free(psock->sk, msg);
@@ -343,18 +369,25 @@ static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
spin_lock_bh(&psock->ingress_lock);
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
- if (msg)
+ if (msg) {
list_del(&msg->list);
+ sk_psock_msg_len_add_locked(psock, -msg->sg.size);
+ }
spin_unlock_bh(&psock->ingress_lock);
return msg;
}
+static inline struct sk_msg *sk_psock_peek_msg_locked(struct sk_psock *psock)
+{
+ return list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+}
+
static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
{
struct sk_msg *msg;
spin_lock_bh(&psock->ingress_lock);
- msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ msg = sk_psock_peek_msg_locked(psock);
spin_unlock_bh(&psock->ingress_lock);
return msg;
}
@@ -521,6 +554,39 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
+/* for tcp only, sk is locked */
+static inline ssize_t sk_psock_msg_inq(struct sock *sk)
+{
+ struct sk_psock *psock;
+ ssize_t inq = 0;
+
+ psock = sk_psock_get(sk);
+ if (likely(psock)) {
+ inq = sk_psock_get_msg_len_nolock(psock);
+ sk_psock_put(sk, psock);
+ }
+ return inq;
+}
+
+/* for udp only, sk is not locked */
+static inline ssize_t sk_msg_first_len(struct sock *sk)
+{
+ struct sk_psock *psock;
+ struct sk_msg *msg;
+ ssize_t inq = 0;
+
+ psock = sk_psock_get(sk);
+ if (likely(psock)) {
+ spin_lock_bh(&psock->ingress_lock);
+ msg = sk_psock_peek_msg_locked(psock);
+ if (msg)
+ inq = msg->sg.size;
+ spin_unlock_bh(&psock->ingress_lock);
+ sk_psock_put(sk, psock);
+ }
+ return inq;
+}
+
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2482992248dc..c5fde8740281 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
+#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
@@ -57,8 +58,9 @@ enum _slab_flag_bits {
#endif
_SLAB_OBJECT_POISON,
_SLAB_CMPXCHG_DOUBLE,
-#ifdef CONFIG_SLAB_OBJ_EXT
_SLAB_NO_OBJ_EXT,
+#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
+ _SLAB_OBJ_EXT_IN_OBJ,
#endif
_SLAB_FLAGS_LAST_BIT
};
@@ -238,10 +240,12 @@ enum _slab_flag_bits {
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Slab created using create_boot_cache */
-#ifdef CONFIG_SLAB_OBJ_EXT
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
+
+#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
+#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ)
#else
-#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
+#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_UNUSED
#endif
/*
@@ -299,24 +303,26 @@ struct kmem_cache_args {
unsigned int usersize;
/**
* @freeptr_offset: Custom offset for the free pointer
- * in &SLAB_TYPESAFE_BY_RCU caches
+ * in caches with &SLAB_TYPESAFE_BY_RCU or @ctor
*
- * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
- * outside of the object. This might cause the object to grow in size.
- * Cache creators that have a reason to avoid this can specify a custom
- * free pointer offset in their struct where the free pointer will be
- * placed.
+ * By default, &SLAB_TYPESAFE_BY_RCU and @ctor caches place the free
+ * pointer outside of the object. This might cause the object to grow
+ * in size. Cache creators that have a reason to avoid this can specify
+ * a custom free pointer offset in their data structure where the free
+ * pointer will be placed.
*
- * Note that placing the free pointer inside the object requires the
- * caller to ensure that no fields are invalidated that are required to
- * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
- * details).
+ * For caches with &SLAB_TYPESAFE_BY_RCU, the caller must ensure that
+ * the free pointer does not overlay fields required to guard against
+ * object recycling (See &SLAB_TYPESAFE_BY_RCU for details).
*
- * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
- * is specified, %use_freeptr_offset must be set %true.
+ * For caches with @ctor, the caller must ensure that the free pointer
+ * does not overlay fields initialized by the constructor.
+ *
+ * Currently, only caches with &SLAB_TYPESAFE_BY_RCU or @ctor
+ * may specify @freeptr_offset.
*
- * Note that @ctor currently isn't supported with custom free pointers
- * as a @ctor requires an external free pointer.
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, @use_freeptr_offset must be set %true.
*/
unsigned int freeptr_offset;
/**
@@ -507,7 +513,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
void kfree(const void *objp);
void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
-size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
@@ -965,6 +970,111 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+/**
+ * __alloc_objs - Allocate objects of a given type using
+ * @KMALLOC: which size-based kmalloc wrapper to allocate with.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type to allocate space for.
+ * @COUNT: how many @TYPE objects to allocate.
+ *
+ * Returns: Newly allocated pointer to (first) @TYPE of @COUNT-many
+ * allocated @TYPE objects, or NULL on failure.
+ */
+#define __alloc_objs(KMALLOC, GFP, TYPE, COUNT) \
+({ \
+ const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \
+ (TYPE *)KMALLOC(__obj_size, GFP); \
+})
+
+/**
+ * __alloc_flex - Allocate an object that has a trailing flexible array
+ * @KMALLOC: kmalloc wrapper function to use for allocation.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type of structure to allocate space for.
+ * @FAM: The name of the flexible array member of @TYPE structure.
+ * @COUNT: how many @FAM elements to allocate space for.
+ *
+ * Returns: Newly allocated pointer to @TYPE with @COUNT-many trailing
+ * @FAM elements, or NULL on failure or if @COUNT cannot be represented
+ * by the member of @TYPE that counts the @FAM elements (annotated via
+ * __counted_by()).
+ */
+#define __alloc_flex(KMALLOC, GFP, TYPE, FAM, COUNT) \
+({ \
+ const size_t __count = (COUNT); \
+ const size_t __obj_size = struct_size_t(TYPE, FAM, __count); \
+ TYPE *__obj_ptr; \
+ if (WARN_ON_ONCE(overflows_flex_counter_type(TYPE, FAM, __count))) \
+ __obj_ptr = NULL; \
+ else \
+ __obj_ptr = KMALLOC(__obj_size, GFP); \
+ if (__obj_ptr) \
+ __set_flex_counter(__obj_ptr->FAM, __count); \
+ __obj_ptr; \
+})
+
+/**
+ * kmalloc_obj - Allocate a single instance of the given type
+ * @VAR_OR_TYPE: Variable or type to allocate.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to a @VAR_OR_TYPE on success, or NULL
+ * on failure.
+ */
+#define kmalloc_obj(VAR_OR_TYPE, GFP) \
+ __alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), 1)
+
+/**
+ * kmalloc_objs - Allocate an array of the given type
+ * @VAR_OR_TYPE: Variable or type to allocate an array of.
+ * @COUNT: How many elements in the array.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to array of @VAR_OR_TYPE on success,
+ * or NULL on failure.
+ */
+#define kmalloc_objs(VAR_OR_TYPE, COUNT, GFP) \
+ __alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), COUNT)
+
+/**
+ * kmalloc_flex - Allocate a single instance of the given flexible structure
+ * @VAR_OR_TYPE: Variable or type to allocate (with its flex array).
+ * @FAM: The name of the flexible array member of the structure.
+ * @COUNT: How many flexible array member elements are desired.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to @VAR_OR_TYPE on success, NULL on
+ * failure. If @FAM has been annotated with __counted_by(), the allocation
+ * will immediately fail if @COUNT is larger than what the type of the
+ * struct's counter variable can represent.
+ */
+#define kmalloc_flex(VAR_OR_TYPE, FAM, COUNT, GFP) \
+ __alloc_flex(kmalloc, GFP, typeof(VAR_OR_TYPE), FAM, COUNT)
+
+/* All kzalloc aliases for kmalloc_(obj|objs|flex). */
+#define kzalloc_obj(P, GFP) \
+ __alloc_objs(kzalloc, GFP, typeof(P), 1)
+#define kzalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kzalloc, GFP, typeof(P), COUNT)
+#define kzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kzalloc, GFP, typeof(P), FAM, COUNT)
+
+/* All kvmalloc aliases for kmalloc_(obj|objs|flex). */
+#define kvmalloc_obj(P, GFP) \
+ __alloc_objs(kvmalloc, GFP, typeof(P), 1)
+#define kvmalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kvmalloc, GFP, typeof(P), COUNT)
+#define kvmalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvmalloc, GFP, typeof(P), FAM, COUNT)
+
+/* All kvzalloc aliases for kmalloc_(obj|objs|flex). */
+#define kvzalloc_obj(P, GFP) \
+ __alloc_objs(kvzalloc, GFP, typeof(P), 1)
+#define kvzalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kvzalloc, GFP, typeof(P), COUNT)
+#define kvzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvzalloc, GFP, typeof(P), FAM, COUNT)
+
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91d0ecf3b8d3..1ebd88026119 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -62,6 +62,7 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd);
void __noreturn panic_smp_self_stop(void);
void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
void crash_smp_send_stop(void);
+int panic_smp_redirect_cpu(int target_cpu, void *msg);
/*
* Call a function on all processors
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index 736f53018017..bda3c528b515 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -126,6 +126,13 @@ int apple_rtkit_wake(struct apple_rtkit *rtk);
int apple_rtkit_shutdown(struct apple_rtkit *rtk);
/*
+ * Put the co-processor into the lowest power state. Note that it usually
+ * is not possible to recover from this state without a full SoC reset.
+ */
+
+int apple_rtkit_poweroff(struct apple_rtkit *rtk);
+
+/*
* Put the co-processor into idle mode
*/
int apple_rtkit_idle(struct apple_rtkit *rtk);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 0c3906e8ad19..a06b5a61f337 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -23,6 +23,8 @@
#define CMDQ_THR_SPR_IDX2 (2)
#define CMDQ_THR_SPR_IDX3 (3)
+#define CMDQ_SUBSYS_INVALID (U8_MAX)
+
struct cmdq_pkt;
enum cmdq_logic_op {
@@ -52,8 +54,20 @@ struct cmdq_operand {
struct cmdq_client_reg {
u8 subsys;
+ phys_addr_t pa_base;
u16 offset;
u16 size;
+
+ /*
+ * Client only uses these functions for MMIO access,
+ * so doesn't need to handle the mminfra_offset.
+ * The mminfra_offset is used for DRAM access and
+ * is handled internally by CMDQ APIs.
+ */
+ int (*pkt_write)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base,
+ u16 offset, u32 value);
+ int (*pkt_write_mask)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base,
+ u16 offset, u32 value, u32 mask);
};
struct cmdq_client {
@@ -122,6 +136,32 @@ void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
/**
+ * cmdq_pkt_write_pa() - append write command to the CMDQ packet with pa_base
+ * @pkt: the CMDQ packet
+ * @subsys: unused parameter
+ * @pa_base: the physical address base of the hardware register
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_write_subsys() - append write command to the CMDQ packet with subsys
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @pa_base: unused parameter
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value);
+
+/**
* cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
@@ -134,6 +174,34 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+/**
+ * cmdq_pkt_write_mask_pa() - append write command with mask to the CMDQ packet with pa
+ * @pkt: the CMDQ packet
+ * @subsys: unused parameter
+ * @pa_base: the physical address base of the hardware register
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_write_mask_subsys() - append write command with mask to the CMDQ packet with subsys
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @pa_base: unused parameter
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value, u32 mask);
+
/*
* cmdq_pkt_read_s() - append read_s command to the CMDQ packet
* @pkt: the CMDQ packet
@@ -418,12 +486,37 @@ static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u3
return -ENOENT;
}
+static inline int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
return -ENOENT;
}
+static inline int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset,
+ u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
static inline int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 reg_idx)
{
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index a532d1e4b1f4..6e1b1202e818 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -122,7 +122,7 @@ struct gpr_ibasic_rsp_accepted_t {
#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
-typedef int (*gpr_port_cb) (struct gpr_resp_pkt *d, void *priv, int op);
+typedef int (*gpr_port_cb) (const struct gpr_resp_pkt *d, void *priv, int op);
struct packet_router;
struct pkt_router_svc {
struct device *dev;
@@ -155,8 +155,8 @@ struct apr_driver {
int (*probe)(struct apr_device *sl);
void (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
- struct apr_resp_pkt *d);
- int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
+ const struct apr_resp_pkt *d);
+ gpr_port_cb gpr_callback;
struct device_driver driver;
const struct apr_device_id *id_table;
};
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 0287f9182c4d..8243ab3a12a8 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -74,13 +74,17 @@
#define LLCC_CAMSRTIP 73
#define LLCC_CAMRTRF 74
#define LLCC_CAMSRTRF 75
+#define LLCC_OOBM_NS 81
+#define LLCC_OOBM_S 82
#define LLCC_VIDEO_APV 83
#define LLCC_COMPUTE1 87
#define LLCC_CPUSS_OPP 88
#define LLCC_CPUSSMPAM 89
+#define LLCC_VIDSC_VSP1 91
#define LLCC_CAM_IPE_STROV 92
#define LLCC_CAM_OFE_STROV 93
#define LLCC_CPUSS_HEU 94
+#define LLCC_PCIE_TCU 97
#define LLCC_MDM_PNG_FIXED 100
/**
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 8ea8230579a2..82372e0db0a1 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -10,19 +10,19 @@
struct device;
struct firmware;
-struct qcom_scm_pas_metadata;
+struct qcom_scm_pas_context;
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
ssize_t qcom_mdt_get_size(const struct firmware *fw);
-int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, phys_addr_t mem_phys,
- struct qcom_scm_pas_metadata *pas_metadata_ctx);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
+int qcom_mdt_pas_load(struct qcom_scm_pas_context *ctx, const struct firmware *fw,
+ const char *firmware, void *mem_region, phys_addr_t *reloc_base);
+
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
@@ -37,13 +37,6 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
return -ENODEV;
}
-static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, phys_addr_t mem_phys,
- struct qcom_scm_pas_metadata *pas_metadata_ctx)
-{
- return -ENODEV;
-}
-
static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id,
void *mem_region, phys_addr_t mem_phys,
@@ -52,6 +45,13 @@ static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
return -ENODEV;
}
+static inline int qcom_mdt_pas_load(struct qcom_scm_pas_context *ctx,
+ const struct firmware *fw, const char *firmware,
+ void *mem_region, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
static inline int qcom_mdt_load_no_init(struct device *dev,
const struct firmware *fw,
const char *fw_name, void *mem_region,
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
index 0a4edfe3d96d..f052e241736c 100644
--- a/include/linux/soc/qcom/ubwc.h
+++ b/include/linux/soc/qcom/ubwc.h
@@ -8,6 +8,7 @@
#define __QCOM_UBWC_H__
#include <linux/bits.h>
+#include <linux/printk.h>
#include <linux/types.h>
struct qcom_ubwc_cfg_data {
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 82390712794c..e4db0924898c 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -20,10 +20,10 @@
.opcode = __opcode, \
}
-#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \
+#define SPI_MEM_DTR_OP_RPT_CMD(__opcode, __buswidth) \
{ \
- .nbytes = 1, \
- .opcode = __opcode, \
+ .nbytes = 2, \
+ .opcode = __opcode | __opcode << 8, \
.buswidth = __buswidth, \
.dtr = true, \
}
@@ -43,6 +43,14 @@
.dtr = true, \
}
+#define SPI_MEM_DTR_OP_RPT_ADDR(__val, __buswidth) \
+ { \
+ .nbytes = 2, \
+ .val = __val | __val << 8, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
#define SPI_MEM_OP_NO_ADDR { }
#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index cb2c2df31089..fd8dce4169f7 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,9 @@
/* Max no. of CS supported per spi device */
#define SPI_DEVICE_CS_CNT_MAX 4
+/* Max no. of data lanes supported per spi device */
+#define SPI_DEVICE_DATA_LANE_CNT_MAX 8
+
struct dma_chan;
struct software_node;
struct ptp_system_timestamp;
@@ -174,6 +177,10 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
* @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
* (optional, NULL when not using a GPIO line)
+ * @tx_lane_map: Map of peripheral lanes (index) to controller lanes (value).
+ * @num_tx_lanes: Number of transmit lanes wired up.
+ * @rx_lane_map: Map of peripheral lanes (index) to controller lanes (value).
+ * @num_rx_lanes: Number of receive lanes wired up.
*
* A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
@@ -242,6 +249,12 @@ struct spi_device {
struct gpio_desc *cs_gpiod[SPI_DEVICE_CS_CNT_MAX]; /* Chip select gpio desc */
+ /* Multi-lane SPI controller support. */
+ u8 tx_lane_map[SPI_DEVICE_DATA_LANE_CNT_MAX];
+ u8 num_tx_lanes;
+ u8 rx_lane_map[SPI_DEVICE_DATA_LANE_CNT_MAX];
+ u8 num_rx_lanes;
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -401,6 +414,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* SPI targets, and are numbered from zero to num_chipselects.
* each target has a chipselect signal, but it's common that not
* every chipselect is connected to a target.
+ * @num_data_lanes: Number of data lanes supported by this controller. Default is 1.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
* @buswidth_override_bits: flags to override for this controller driver
@@ -576,6 +590,14 @@ struct spi_controller {
*/
u16 num_chipselect;
+ /*
+ * Some specialized SPI controllers can have more than one physical
+ * data lane interface per controller (each having it's own serializer).
+ * This specifies the number of data lanes in that case. Other
+ * controllers do not need to set this (defaults to 1).
+ */
+ u16 num_data_lanes;
+
/* Some SPI controllers pose alignment requirements on DMAable
* buffers; let protocol drivers know about these requirements.
*/
@@ -882,6 +904,15 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
+#if IS_ENABLED(CONFIG_OF)
+extern struct spi_controller *of_find_spi_controller_by_node(struct device_node *node);
+#else
+static inline struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif
+
#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER)
extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
@@ -959,6 +990,8 @@ struct spi_res {
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
* (SPI_NBITS_SINGLE) is used.
+ * @multi_lane_mode: How to serialize data on multiple lanes. One of the
+ * SPI_MULTI_LANE_MODE_* values.
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
@@ -1095,6 +1128,12 @@ struct spi_transfer {
unsigned cs_change:1;
unsigned tx_nbits:4;
unsigned rx_nbits:4;
+
+#define SPI_MULTI_LANE_MODE_SINGLE 0 /* only use single lane */
+#define SPI_MULTI_LANE_MODE_STRIPE 1 /* one data word per lane */
+#define SPI_MULTI_LANE_MODE_MIRROR 2 /* same word sent on all lanes */
+ unsigned multi_lane_mode: 2;
+
unsigned timestamped:1;
bool dtr_mode;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d3561c4a080e..e1e2f144af9b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -212,7 +212,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* various methods are defined as nops in the case they are not
* required.
*/
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
#define raw_spin_lock(lock) _raw_spin_lock(lock)
@@ -283,22 +283,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define raw_spin_trylock_bh(lock) \
- __cond_lock(lock, _raw_spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) _raw_spin_trylock_bh(lock)
-#define raw_spin_trylock_irq(lock) \
-({ \
- local_irq_disable(); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_enable(); 0; }); \
-})
+#define raw_spin_trylock_irq(lock) _raw_spin_trylock_irq(lock)
-#define raw_spin_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
#ifndef CONFIG_PREEMPT_RT
/* Include rwlock functions for !RT */
@@ -347,16 +336,19 @@ do { \
#endif
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock(&lock->rlock);
}
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_bh(&lock->rlock);
}
static __always_inline int spin_trylock(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock(&lock->rlock);
}
@@ -364,14 +356,17 @@ static __always_inline int spin_trylock(spinlock_t *lock)
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -379,47 +374,57 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock(&lock->rlock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_bh(&lock->rlock);
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irq(&lock->rlock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
static __always_inline int spin_trylock_bh(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_bh(&lock->rlock);
}
static __always_inline int spin_trylock_irq(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_irq(&lock->rlock);
}
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
/**
* spin_is_locked() - Check whether a spinlock is locked.
@@ -497,23 +502,17 @@ static inline int rwlock_needbreak(rwlock_t *lock)
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
-extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
-#define atomic_dec_and_raw_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
@@ -535,86 +534,144 @@ void free_bucket_spinlocks(spinlock_t *locks);
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
raw_spin_lock_bh(_T->lock),
raw_spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
spin_lock_bh(_T->lock),
spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T)
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T)
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T)
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T)
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T)
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
+
+DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 9ecb0ab504e3..bda5e7a390cd 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -34,8 +34,8 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
unsigned long __lockfunc
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
-int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(true, lock);
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
@@ -84,6 +84,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
preempt_disable();
if (do_raw_spin_trylock(lock)) {
@@ -94,6 +95,26 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return 0;
}
+static __always_inline bool _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ local_irq_disable();
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_enable();
+ return false;
+}
+
+static __always_inline bool _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ local_irq_save(*flags);
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
@@ -102,6 +123,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -113,6 +135,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -121,6 +144,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -128,6 +152,7 @@ static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -137,6 +162,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -145,6 +171,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -153,6 +180,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
}
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -161,6 +189,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -168,6 +197,7 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 819aeba1c87e..a9d5c7c66e03 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,68 +24,120 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
-#define ___LOCK(lock) \
+#define ___LOCK_(lock) \
do { __acquire(lock); (void)(lock); } while (0)
-#define __LOCK(lock) \
- do { preempt_disable(); ___LOCK(lock); } while (0)
+#define ___LOCK_shared(lock) \
+ do { __acquire_shared(lock); (void)(lock); } while (0)
-#define __LOCK_BH(lock) \
- do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
+#define __LOCK(lock, ...) \
+ do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQ(lock) \
- do { local_irq_disable(); __LOCK(lock); } while (0)
+#define __LOCK_BH(lock, ...) \
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQSAVE(lock, flags) \
- do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define __LOCK_IRQ(lock, ...) \
+ do { local_irq_disable(); __LOCK(lock, ##__VA_ARGS__); } while (0)
-#define ___UNLOCK(lock) \
+#define __LOCK_IRQSAVE(lock, flags, ...) \
+ do { local_irq_save(flags); __LOCK(lock, ##__VA_ARGS__); } while (0)
+
+#define ___UNLOCK_(lock) \
do { __release(lock); (void)(lock); } while (0)
-#define __UNLOCK(lock) \
- do { preempt_enable(); ___UNLOCK(lock); } while (0)
+#define ___UNLOCK_shared(lock) \
+ do { __release_shared(lock); (void)(lock); } while (0)
+
+#define __UNLOCK(lock, ...) \
+ do { preempt_enable(); ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_BH(lock) \
+#define __UNLOCK_BH(lock, ...) \
do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
- ___UNLOCK(lock); } while (0)
+ ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_IRQ(lock) \
- do { local_irq_enable(); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQ(lock, ...) \
+ do { local_irq_enable(); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
-#define __UNLOCK_IRQRESTORE(lock, flags) \
- do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQRESTORE(lock, flags, ...) \
+ do { local_irq_restore(flags); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock, shared)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
-#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock, shared)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock, shared)
#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags, shared)
#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+
+static __always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_BH(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQ(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
+static __always_inline int _raw_read_trylock(rwlock_t *lock)
+ __cond_acquires_shared(true, lock)
+{
+ __LOCK(lock, shared);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock(rwlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
#define _raw_spin_unlock(lock) __UNLOCK(lock)
-#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock, shared)
#define _raw_write_unlock(lock) __UNLOCK(lock)
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock, shared)
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock, shared)
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _raw_spin_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
#define _raw_read_unlock_irqrestore(lock, flags) \
- __UNLOCK_IRQRESTORE(lock, flags)
+ __UNLOCK_IRQRESTORE(lock, flags, shared)
#define _raw_write_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index f6499c37157d..373618a4243c 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -36,10 +36,11 @@ extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock)
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
-extern int rt_spin_trylock_bh(spinlock_t *lock);
-extern int rt_spin_trylock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
+extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -82,6 +83,7 @@ static __always_inline void spin_lock(spinlock_t *lock)
__spin_lock_irqsave_nested(lock, flags, subclass)
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock)
{
/* Investigate: Drop bh when blocking ? */
local_bh_disable();
@@ -89,6 +91,7 @@ static __always_inline void spin_lock_bh(spinlock_t *lock)
}
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -101,45 +104,44 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
local_bh_enable();
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
-#define spin_trylock(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock(lock) rt_spin_trylock(lock)
-#define spin_trylock_bh(lock) \
- __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock) rt_spin_trylock_bh(lock)
-#define spin_trylock_irq(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock_irq(lock) rt_spin_trylock(lock)
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = spin_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ *flags = 0;
+ return rt_spin_trylock(lock);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
#define spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 2dfa35ffec76..b65bb6e4451c 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -14,7 +14,7 @@
#ifndef CONFIG_PREEMPT_RT
/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
-typedef struct spinlock {
+context_lock_struct(spinlock) {
union {
struct raw_spinlock rlock;
@@ -26,7 +26,8 @@ typedef struct spinlock {
};
#endif
};
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define ___SPIN_LOCK_INITIALIZER(lockname) \
{ \
@@ -47,12 +48,13 @@ typedef struct spinlock {
/* PREEMPT_RT kernels map spinlock to rt_mutex */
#include <linux/rtmutex.h>
-typedef struct spinlock {
+context_lock_struct(spinlock) {
struct rt_mutex_base lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define __SPIN_LOCK_UNLOCKED(name) \
{ \
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
index 91cb36b65a17..e5644ab2161f 100644
--- a/include/linux/spinlock_types_raw.h
+++ b/include/linux/spinlock_types_raw.h
@@ -11,7 +11,7 @@
#include <linux/lockdep_types.h>
-typedef struct raw_spinlock {
+context_lock_struct(raw_spinlock) {
arch_spinlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -20,7 +20,8 @@ typedef struct raw_spinlock {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} raw_spinlock_t;
+};
+typedef struct raw_spinlock raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 344ad51c8f6c..bb44a0bd7696 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -21,7 +21,7 @@
#include <linux/workqueue.h>
#include <linux/rcu_segcblist.h>
-struct srcu_struct;
+context_lock_struct(srcu_struct, __reentrant_ctx_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -77,7 +77,7 @@ int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
@@ -131,14 +131,16 @@ static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned
}
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
-int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#else
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_read_lock(ssp);
}
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, idx);
}
@@ -210,6 +212,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/*
+ * No-op helper to denote that ssp must be held. Because SRCU-protected pointers
+ * should still be marked with __rcu_guarded, and we do not want to mark them
+ * with __guarded_by(ssp) as it would complicate annotations for writers, we
+ * choose the following strategy: srcu_dereference_check() calls this helper
+ * that checks that the passed ssp is held, and then fake-acquires 'RCU'.
+ */
+static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) __must_hold_shared(ssp) { }
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
@@ -223,9 +233,15 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
- (c) || srcu_read_lock_held(ssp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+({ \
+ __srcu_read_lock_must_hold(ssp); \
+ __acquire_shared_ctx_lock(RCU); \
+ __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu); \
+ __release_shared_ctx_lock(RCU); \
+ __v; \
+})
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -268,7 +284,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
* from another.
*/
-static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -304,7 +321,8 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
* contexts where RCU is watching, that is, from contexts where it would
* be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
*/
-static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -344,7 +362,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *
* complain.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
-__acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -360,7 +378,7 @@ __acquires(ssp)
* See srcu_read_lock_fast() for more information.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
- __acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -381,7 +399,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_
* and srcu_read_lock_fast(). However, the same definition/initialization
* requirements called out for srcu_read_lock_safe() apply.
*/
-static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
@@ -400,7 +418,8 @@ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *
* then none of the other flavors may be used, whether before, during,
* or after.
*/
-static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -412,7 +431,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
+srcu_read_lock_notrace(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -443,7 +463,8 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
* which calls to down_read() may be nested. The same srcu_struct may be
* used concurrently by srcu_down_read() and srcu_read_lock().
*/
-static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_down_read(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
WARN_ON_ONCE(in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -458,7 +479,7 @@ static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
* Exit an SRCU read-side critical section.
*/
static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -474,7 +495,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
* Exit a light-weight SRCU read-side critical section.
*/
static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
srcu_lock_release(&ssp->dep_map);
@@ -490,7 +511,7 @@ static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ct
* Exit an SRCU-fast-updown read-side critical section.
*/
static inline void
-srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
srcu_lock_release(&ssp->dep_map);
@@ -504,7 +525,7 @@ srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *
* See srcu_read_unlock_fast() for more information.
*/
static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
- struct srcu_ctr __percpu *scp) __releases(ssp)
+ struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
__srcu_read_unlock_fast(ssp, scp);
@@ -519,7 +540,7 @@ static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
* the same context as the maching srcu_down_read_fast().
*/
static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
@@ -535,7 +556,7 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __
* Exit an SRCU read-side critical section, but in an NMI-safe manner.
*/
static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
@@ -545,7 +566,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
@@ -560,7 +581,7 @@ srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
* the same context as the maching srcu_down_read().
*/
static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
@@ -600,15 +621,21 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
_T->idx = srcu_read_lock(_T->lock),
srcu_read_unlock(_T->lock, _T->idx),
int idx)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T)
DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
_T->scp = srcu_read_lock_fast(_T->lock),
srcu_read_unlock_fast(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T)
DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
_T->scp = srcu_read_lock_fast_notrace(_T->lock),
srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_notrace_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T)
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index e0698024667a..dec7cbe015aa 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -73,6 +73,7 @@ void synchronize_srcu(struct srcu_struct *ssp);
* index that must be passed to the matching srcu_read_unlock().
*/
static inline int __srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int idx;
@@ -80,6 +81,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
preempt_enable();
+ __acquire_shared(ssp);
return idx;
}
@@ -96,22 +98,26 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline
void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index d6f978b50472..958cb7ef41cb 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -233,7 +233,7 @@ struct srcu_struct {
#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
__DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires_shared(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_expedite_current(struct srcu_struct *ssp);
@@ -286,6 +286,7 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
* implementations of this_cpu_inc().
*/
static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -294,6 +295,7 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -308,7 +310,9 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
*/
static inline void notrace
__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
@@ -326,6 +330,7 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
*/
static inline
struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -334,6 +339,7 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -348,7 +354,9 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
*/
static inline void notrace
__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index f1054b9c2d8a..32352a216567 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,14 +28,14 @@
* This could also be configured at run time using CPU freq framework. */
/* MDC Clock Selection define*/
-#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */
-#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */
-#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
-#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
-#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
-#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/124 */
-#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_scr_i/204 */
-#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_scr_i/324 */
+#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_csr_i/42 */
+#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_csr_i/62 */
+#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_csr_i/16 */
+#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_csr_i/26 */
+#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_csr_i/102 */
+#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_csr_i/124 */
+#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_csr_i/204 */
+#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_csr_i/324 */
/* MTL algorithms identifiers */
#define MTL_TX_ALGORITHM_WRR 0x0
@@ -191,6 +191,7 @@ enum dwmac_core_type {
#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(12)
#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(13)
+#define STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD BIT(14)
struct mac_device_info;
@@ -256,7 +257,7 @@ struct plat_stmmacenet_data {
int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
- int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
int (*mac_finish)(struct net_device *ndev,
@@ -299,7 +300,6 @@ struct plat_stmmacenet_data {
int int_snapshot_num;
int msi_mac_vec;
int msi_wol_vec;
- int msi_lpi_vec;
int msi_sfty_ce_vec;
int msi_sfty_ue_vec;
int msi_rx_base_vec;
diff --git a/include/linux/string.h b/include/linux/string.h
index 1b564c36d721..b850bd91b3d8 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -336,8 +336,8 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s);
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
-__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
-__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 891f6173c951..eb4bd62df319 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -14,12 +14,10 @@
/*
* Debugging macros etc
*/
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
extern unsigned int rpc_debug;
extern unsigned int nfs_debug;
extern unsigned int nfsd_debug;
extern unsigned int nlm_debug;
-#endif
#define dprintk(fmt, ...) \
dfprintk(FACILITY, fmt, ##__VA_ARGS__)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5506d20857c3..4dc14c7a711b 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -35,8 +35,10 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
+ unsigned int sp_nrthreads; /* # of threads currently running in pool */
+ unsigned int sp_nrthrmin; /* Min number of threads to run per pool */
+ unsigned int sp_nrthrmax; /* Max requested number of threads in pool */
struct lwq sp_xprts; /* pending transports */
- unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
@@ -53,6 +55,7 @@ enum {
SP_TASK_PENDING, /* still work to do even if no xprt is queued */
SP_NEED_VICTIM, /* One thread needs to agree to exit */
SP_VICTIM_REMAINS, /* One thread needs to actually exit */
+ SP_TASK_STARTING, /* Task has started but not added to idle yet */
};
@@ -71,7 +74,7 @@ struct svc_serv {
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
unsigned int sv_nprogs; /* Number of sv_programs */
- unsigned int sv_nrthreads; /* # of server threads */
+ unsigned int sv_nrthreads; /* # of running server threads */
unsigned int sv_max_payload; /* datagram payload size */
unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
unsigned int sv_xdrsize; /* XDR buffer size */
@@ -440,13 +443,17 @@ struct svc_serv *svc_create(struct svc_program *, unsigned int,
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
+int svc_new_thread(struct svc_serv *serv, struct svc_pool *pool);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
-int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+int svc_set_pool_threads(struct svc_serv *serv, struct svc_pool *pool,
+ unsigned int min_threads, unsigned int max_threads);
+int svc_set_num_threads(struct svc_serv *serv, unsigned int min_threads,
+ unsigned int nrservs);
int svc_pool_stats_open(struct svc_info *si, struct file *file);
void svc_process(struct svc_rqst *rqstp);
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index de37069aba90..372a00882ca6 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -61,7 +61,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_recv(struct svc_rqst *rqstp);
+int svc_recv(struct svc_rqst *rqstp, long timeo);
void svc_send(struct svc_rqst *rqstp);
int svc_addsock(struct svc_serv *serv, struct net *net,
const int fd, char *name_return, const size_t len,
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
index 66ca3ece951a..a723fb1da9c8 100644
--- a/include/linux/sunrpc/xdrgen/_builtins.h
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -46,6 +46,66 @@ xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
return true;
}
+/*
+ * De facto (non-standard but commonly implemented) signed short type:
+ * - Wire sends sign-extended 32-bit value (e.g., 0xFFFFFFFF)
+ * - be32_to_cpup() returns u32 (0xFFFFFFFF)
+ * - Explicit (s16) cast truncates to 16 bits (0xFFFF = -1)
+ */
+static inline bool
+xdrgen_decode_short(struct xdr_stream *xdr, s16 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (s16)be32_to_cpup(p);
+ return true;
+}
+
+/*
+ * De facto (non-standard but commonly implemented) signed short type:
+ * - C integer promotion sign-extends s16 val to int before passing to
+ * cpu_to_be32()
+ * - This is well-defined: -1 as s16 -1 as int 0xFFFFFFFF on wire
+ */
+static inline bool
+xdrgen_encode_short(struct xdr_stream *xdr, s16 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+/*
+ * De facto (non-standard but commonly implemented) unsigned short type:
+ * 16-bit integer zero-extended to fill one XDR_UNIT.
+ */
+static inline bool
+xdrgen_decode_unsigned_short(struct xdr_stream *xdr, u16 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (u16)be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_short(struct xdr_stream *xdr, u16 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
static inline bool
xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
{
@@ -188,12 +248,10 @@ xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
return false;
if (unlikely(maxlen && len > maxlen))
return false;
- if (len != 0) {
- p = xdr_inline_decode(xdr, len);
- if (unlikely(!p))
- return false;
- ptr->data = (unsigned char *)p;
- }
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
ptr->len = len;
return true;
}
@@ -219,12 +277,10 @@ xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
return false;
if (unlikely(maxlen && len > maxlen))
return false;
- if (len != 0) {
- p = xdr_inline_decode(xdr, len);
- if (unlikely(!p))
- return false;
- ptr->data = (u8 *)p;
- }
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
ptr->len = len;
return true;
}
diff --git a/include/linux/sunrpc/xdrgen/nfs4_1.h b/include/linux/sunrpc/xdrgen/nfs4_1.h
index cf21a14aa885..4ac54bdbd335 100644
--- a/include/linux/sunrpc/xdrgen/nfs4_1.h
+++ b/include/linux/sunrpc/xdrgen/nfs4_1.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Generated by xdrgen. Manual edits will be lost. */
/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
-/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+/* XDR specification modification time: Thu Jan 8 23:12:07 2026 */
#ifndef _LINUX_XDRGEN_NFS4_1_DEF_H
#define _LINUX_XDRGEN_NFS4_1_DEF_H
@@ -18,6 +18,14 @@ typedef struct {
uint32_t *element;
} bitmap4;
+typedef opaque utf8string;
+
+typedef utf8string utf8str_cis;
+
+typedef utf8string utf8str_cs;
+
+typedef utf8string utf8str_mixed;
+
struct nfstime4 {
int64_t seconds;
uint32_t nseconds;
@@ -40,6 +48,7 @@ enum open_args_share_access4 {
OPEN_ARGS_SHARE_ACCESS_WRITE = 2,
OPEN_ARGS_SHARE_ACCESS_BOTH = 3,
};
+
typedef enum open_args_share_access4 open_args_share_access4;
enum open_args_share_deny4 {
@@ -48,6 +57,7 @@ enum open_args_share_deny4 {
OPEN_ARGS_SHARE_DENY_WRITE = 2,
OPEN_ARGS_SHARE_DENY_BOTH = 3,
};
+
typedef enum open_args_share_deny4 open_args_share_deny4;
enum open_args_share_access_want4 {
@@ -59,6 +69,7 @@ enum open_args_share_access_want4 {
OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20,
OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21,
};
+
typedef enum open_args_share_access_want4 open_args_share_access_want4;
enum open_args_open_claim4 {
@@ -70,6 +81,7 @@ enum open_args_open_claim4 {
OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5,
OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6,
};
+
typedef enum open_args_open_claim4 open_args_open_claim4;
enum open_args_createmode4 {
@@ -78,10 +90,15 @@ enum open_args_createmode4 {
OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2,
OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3,
};
+
typedef enum open_args_createmode4 open_args_createmode4;
typedef struct open_arguments4 fattr4_open_arguments;
+/*
+ * Determine what OPEN supports.
+ */
+
enum { FATTR4_OPEN_ARGUMENTS = 86 };
enum { OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010 };
@@ -90,6 +107,11 @@ typedef struct nfstime4 fattr4_time_deleg_access;
typedef struct nfstime4 fattr4_time_deleg_modify;
+/*
+ * New RECOMMENDED Attribute for
+ * delegation caching of times
+ */
+
enum { FATTR4_TIME_DELEG_ACCESS = 84 };
enum { FATTR4_TIME_DELEG_MODIFY = 85 };
@@ -124,13 +146,88 @@ enum open_delegation_type4 {
OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
};
+
typedef enum open_delegation_type4 open_delegation_type4;
+enum aclmodel4 {
+ ACL_MODEL_NFS4 = 1,
+ ACL_MODEL_POSIX_DRAFT = 2,
+ ACL_MODEL_NONE = 3,
+};
+
+typedef enum aclmodel4 aclmodel4;
+
+enum aclscope4 {
+ ACL_SCOPE_FILE_OBJECT = 1,
+ ACL_SCOPE_FILE_SYSTEM = 2,
+ ACL_SCOPE_SERVER = 3,
+};
+
+typedef enum aclscope4 aclscope4;
+
+enum posixacetag4 {
+ POSIXACE4_TAG_USER_OBJ = 1,
+ POSIXACE4_TAG_USER = 2,
+ POSIXACE4_TAG_GROUP_OBJ = 3,
+ POSIXACE4_TAG_GROUP = 4,
+ POSIXACE4_TAG_MASK = 5,
+ POSIXACE4_TAG_OTHER = 6,
+};
+
+typedef enum posixacetag4 posixacetag4;
+
+typedef uint32_t posixaceperm4;
+
+enum { POSIXACE4_PERM_EXECUTE = 0x00000001 };
+
+enum { POSIXACE4_PERM_WRITE = 0x00000002 };
+
+enum { POSIXACE4_PERM_READ = 0x00000004 };
+
+struct posixace4 {
+ posixacetag4 tag;
+ posixaceperm4 perm;
+ utf8str_mixed who;
+};
+
+typedef aclmodel4 fattr4_acl_trueform;
+
+typedef aclscope4 fattr4_acl_trueform_scope;
+
+typedef struct {
+ u32 count;
+ struct posixace4 *element;
+} fattr4_posix_default_acl;
+
+typedef struct {
+ u32 count;
+ struct posixace4 *element;
+} fattr4_posix_access_acl;
+
+/*
+ * New for POSIX ACL extension
+ */
+
+enum { FATTR4_ACL_TRUEFORM = 89 };
+
+enum { FATTR4_ACL_TRUEFORM_SCOPE = 90 };
+
+enum { FATTR4_POSIX_DEFAULT_ACL = 91 };
+
+enum { FATTR4_POSIX_ACCESS_ACL = 92 };
+
#define NFS4_int64_t_sz \
(XDR_hyper)
#define NFS4_uint32_t_sz \
(XDR_unsigned_int)
#define NFS4_bitmap4_sz (XDR_unsigned_int)
+#define NFS4_utf8string_sz (XDR_unsigned_int)
+#define NFS4_utf8str_cis_sz \
+ (NFS4_utf8string_sz)
+#define NFS4_utf8str_cs_sz \
+ (NFS4_utf8string_sz)
+#define NFS4_utf8str_mixed_sz \
+ (NFS4_utf8string_sz)
#define NFS4_nfstime4_sz \
(NFS4_int64_t_sz + NFS4_uint32_t_sz)
#define NFS4_fattr4_offline_sz \
@@ -149,5 +246,18 @@ typedef enum open_delegation_type4 open_delegation_type4;
#define NFS4_fattr4_time_deleg_modify_sz \
(NFS4_nfstime4_sz)
#define NFS4_open_delegation_type4_sz (XDR_int)
+#define NFS4_aclmodel4_sz (XDR_int)
+#define NFS4_aclscope4_sz (XDR_int)
+#define NFS4_posixacetag4_sz (XDR_int)
+#define NFS4_posixaceperm4_sz \
+ (NFS4_uint32_t_sz)
+#define NFS4_posixace4_sz \
+ (NFS4_posixacetag4_sz + NFS4_posixaceperm4_sz + NFS4_utf8str_mixed_sz)
+#define NFS4_fattr4_acl_trueform_sz \
+ (NFS4_aclmodel4_sz)
+#define NFS4_fattr4_acl_trueform_scope_sz \
+ (NFS4_aclscope4_sz)
+#define NFS4_fattr4_posix_default_acl_sz (XDR_unsigned_int)
+#define NFS4_fattr4_posix_access_acl_sz (XDR_unsigned_int)
#endif /* _LINUX_XDRGEN_NFS4_1_DEF_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 38ca3df68716..62fc7499b408 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -224,13 +224,11 @@ enum {
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/* Bit flag in swap_map */
-#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
/* Special value in first swap_map */
#define SWAP_MAP_MAX 0x3e /* Max count */
#define SWAP_MAP_BAD 0x3f /* Note page is bad */
-#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
/* Special value in each swap_map continuation */
#define SWAP_CONT_MAX 0x7f /* Max count */
@@ -453,16 +451,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-int folio_alloc_swap(struct folio *folio);
-bool folio_free_swap(struct folio *folio);
-void put_swap_folio(struct folio *folio, swp_entry_t entry);
-extern swp_entry_t get_swap_page_of_type(int);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t, int);
-extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t entry, int nr);
-extern void swap_free_nr(swp_entry_t entry, int nr_pages);
-extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
@@ -474,6 +463,29 @@ struct backing_dev_info;
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
sector_t swap_folio_sector(struct folio *folio);
+/*
+ * If there is an existing swap slot reference (swap entry) and the caller
+ * guarantees that there is no race modification of it (e.g., PTL
+ * protecting the swap entry in page table; shmem's cmpxchg protects t
+ * he swap entry in shmem mapping), these two helpers below can be used
+ * to put/dup the entries directly.
+ *
+ * All entries must be allocated by folio_alloc_swap(). And they must have
+ * a swap count > 1. See comments of folio_*_swap helpers for more info.
+ */
+int swap_dup_entry_direct(swp_entry_t entry);
+void swap_put_entries_direct(swp_entry_t entry, int nr);
+
+/*
+ * folio_free_swap tries to free the swap entries pinned by a swap cache
+ * folio, it has to be here to be called by other components.
+ */
+bool folio_free_swap(struct folio *folio);
+
+/* Allocate / free (hibernation) exclusive entries */
+swp_entry_t swap_alloc_hibernation_slot(int type);
+void swap_free_hibernation_slot(swp_entry_t entry);
+
static inline void put_swap_device(struct swap_info_struct *si)
{
percpu_ref_put(&si->users);
@@ -501,10 +513,6 @@ static inline void put_swap_device(struct swap_info_struct *si)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
-static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr)
-{
-}
-
static inline void free_swap_cache(struct folio *folio)
{
}
@@ -514,25 +522,12 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
-{
-}
-
-static inline int swap_duplicate(swp_entry_t swp)
-{
- return 0;
-}
-
-static inline int swapcache_prepare(swp_entry_t swp, int nr)
+static inline int swap_dup_entry_direct(swp_entry_t ent)
{
return 0;
}
-static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
-{
-}
-
-static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
+static inline void swap_put_entries_direct(swp_entry_t ent, int nr)
{
}
@@ -551,11 +546,6 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline int folio_alloc_swap(struct folio *folio)
-{
- return -EINVAL;
-}
-
static inline bool folio_free_swap(struct folio *folio)
{
return false;
@@ -568,17 +558,6 @@ static inline int add_swap_extent(struct swap_info_struct *sis,
return -EINVAL;
}
#endif /* CONFIG_SWAP */
-
-static inline void free_swap_and_cache(swp_entry_t entry)
-{
- free_swap_and_cache_nr(entry, 1);
-}
-
-static inline void swap_free(swp_entry_t entry)
-{
- swap_free_nr(entry, 1);
-}
-
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index cf84d98964b2..02bd6ddb6278 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -59,7 +59,6 @@ struct compat_stat;
struct old_timeval32;
struct robust_list_head;
struct futex_waitv;
-struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
@@ -718,7 +717,7 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
asmlinkage long sys_umask(int mask);
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
+asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, void __user *cache);
asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
@@ -961,6 +960,7 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
+asmlinkage long sys_rseq_slice_yield(void);
asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
asmlinkage long sys_open_tree_attr(int dfd, const char __user *path,
unsigned flags,
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index b449665c686a..5226efde9ad4 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -8,9 +8,11 @@
*/
#include <linux/err.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
#include <linux/types.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/edid.h>
struct device;
struct platform_device;
@@ -60,6 +62,16 @@ struct efifb_dmi_info {
int flags;
};
+struct sysfb_display_info {
+ struct screen_info screen;
+
+#if defined(CONFIG_FIRMWARE_EDID)
+ struct edid_info edid;
+#endif
+};
+
+extern struct sysfb_display_info sysfb_primary_display;
+
#ifdef CONFIG_SYSFB
void sysfb_disable(struct device *dev);
@@ -82,16 +94,17 @@ static inline bool sysfb_handles_screen_info(void)
#ifdef CONFIG_EFI
extern struct efifb_dmi_info efifb_dmi_list[];
-void sysfb_apply_efi_quirks(void);
-void sysfb_set_efifb_fwnode(struct platform_device *pd);
+void sysfb_apply_efi_quirks(struct screen_info *si);
+void sysfb_set_efifb_fwnode(const struct screen_info *si, struct platform_device *pd);
#else /* CONFIG_EFI */
-static inline void sysfb_apply_efi_quirks(void)
+static inline void sysfb_apply_efi_quirks(struct screen_info *si)
{
}
-static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
+static inline void sysfb_set_efifb_fwnode(const struct screen_info *si,
+ struct platform_device *pd)
{
}
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c33a96b7391a..99b775f3ff46 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -808,4 +808,17 @@ static inline void sysfs_put(struct kernfs_node *kn)
kernfs_put(kn);
}
+/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
+#define VERIFY_OCTAL_PERMISSIONS(perms) \
+ (BUILD_BUG_ON_ZERO((perms) < 0) + \
+ BUILD_BUG_ON_ZERO((perms) > 0777) + \
+ /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
+ BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
+ /* USER_WRITABLE >= GROUP_WRITABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
+ /* OTHER_WRITABLE? Generally considered a bad idea. */ \
+ BUILD_BUG_ON_ZERO((perms) & 2) + \
+ (perms))
+
#endif /* _SYSFS_H_ */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 20b8c6e21fef..f72eef31fa23 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -181,10 +181,7 @@ struct tcp_request_sock {
#endif
};
-static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
-{
- return (struct tcp_request_sock *)req;
-}
+#define tcp_rsk(ptr) container_of_const(ptr, struct tcp_request_sock, req.req)
static inline bool tcp_rsk_used_ao(const struct request_sock *req)
{
@@ -294,7 +291,8 @@ struct tcp_sock {
u8 nonagle : 4,/* Disable Nagle algorithm? */
rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
u8 received_ce_pending:4, /* Not yet transmit cnt of received_ce */
- unused2:4;
+ accecn_opt_sent_w_dsack:1,/* Sent ACCECN opt in previous ACK w/ D-SACK */
+ unused2:3;
u8 accecn_minlen:2,/* Minimum length of AccECN option sent */
est_ecnfield:2,/* ECN field for AccECN delivered estimates */
accecn_opt_demand:2,/* Demand AccECN option for n next ACKs */
@@ -345,6 +343,7 @@ struct tcp_sock {
u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_rtt_last_tsecr;
u32 delivered_ecn_bytes[3];
+ u16 pkts_acked_ewma;/* Pkts acked EWMA for AccECN cep heuristic */
u64 first_tx_mstamp; /* start of window send phase */
u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index 1f3e5dad6d0d..ee5f0bd41f43 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -76,6 +76,9 @@ struct tee_device {
/**
* struct tee_driver_ops - driver operations vtable
* @get_version: returns version of driver
+ * @get_tee_revision: returns revision string (diagnostic only);
+ * do not infer feature support from this, use
+ * TEE_IOC_VERSION instead
* @open: called for a context when the device file is opened
* @close_context: called when the device file is closed
* @release: called to release the context
@@ -95,9 +98,12 @@ struct tee_device {
* client closes the device file, even if there are existing references to the
* context. The TEE driver can use @close_context to start cleaning up.
*/
+
struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev,
struct tee_ioctl_version_data *vers);
+ int (*get_tee_revision)(struct tee_device *teedev,
+ char *buf, size_t len);
int (*open)(struct tee_context *ctx);
void (*close_context)(struct tee_context *ctx);
void (*release)(struct tee_context *ctx);
@@ -123,6 +129,9 @@ struct tee_driver_ops {
int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
};
+/* Size for TEE revision string buffer used by get_tee_revision(). */
+#define TEE_REVISION_STR_SIZE 128
+
/**
* struct tee_desc - Describes the TEE driver to the subsystem
* @name: name of driver
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 88a6f9697c89..e561a26f537a 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -315,6 +315,9 @@ struct tee_client_device {
* @driver: driver structure
*/
struct tee_client_driver {
+ int (*probe)(struct tee_client_device *);
+ void (*remove)(struct tee_client_device *);
+ void (*shutdown)(struct tee_client_device *);
const struct tee_client_device_id *id_table;
struct device_driver driver;
};
@@ -322,4 +325,13 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of_const(d, struct tee_client_driver, driver)
+#define tee_client_driver_register(drv) \
+ __tee_client_driver_register(drv, THIS_MODULE)
+int __tee_client_driver_register(struct tee_client_driver *, struct module *);
+void tee_client_driver_unregister(struct tee_client_driver *);
+
+#define module_tee_client_driver(__tee_client_driver) \
+ module_driver(__tee_client_driver, tee_client_driver_register, \
+ tee_client_driver_unregister)
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index b40de9bab4b7..051e42902690 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -46,15 +46,17 @@ enum syscall_work_bit {
SYSCALL_WORK_BIT_SYSCALL_AUDIT,
SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
+ SYSCALL_WORK_BIT_SYSCALL_RSEQ_SLICE,
};
-#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
-#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
-#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
-#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
-#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
-#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
-#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
+#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
+#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
+#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
+#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
+#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
+#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#define SYSCALL_WORK_SYSCALL_RSEQ_SLICE BIT(SYSCALL_WORK_BIT_SYSCALL_RSEQ_SLICE)
#endif
#include <asm/thread_info.h>
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ac76ae9fa36d..738007d6f577 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -126,6 +126,7 @@ enum tick_dep_bits {
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
+extern bool tick_nohz_is_active(void);
extern bool tick_nohz_tick_stopped(void);
extern bool tick_nohz_tick_stopped_cpu(int cpu);
extern void tick_nohz_idle_stop_tick(void);
@@ -142,6 +143,7 @@ extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
+static inline bool tick_nohz_is_active(void) { return false; }
static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
static inline void tick_nohz_idle_stop_tick(void) { }
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index dce03a5cafb7..7de6b350e559 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -115,6 +115,15 @@ extern void timecounter_init(struct timecounter *tc,
*/
extern u64 timecounter_read(struct timecounter *tc);
+/*
+ * This is like cyclecounter_cyc2ns(), but it is used for computing a
+ * time previous to the time stored in the cycle counter.
+ */
+static inline u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, u64 cycles, u64 frac)
+{
+ return ((cycles * cc->mult) - frac) >> cc->shift;
+}
+
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
@@ -131,7 +140,25 @@ extern u64 timecounter_read(struct timecounter *tc);
*
* Returns: cycle counter converted to nanoseconds since the initial time stamp
*/
-extern u64 timecounter_cyc2time(const struct timecounter *tc,
- u64 cycle_tstamp);
+static inline u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp)
+{
+ const struct cyclecounter *cc = tc->cc;
+ u64 delta = (cycle_tstamp - tc->cycle_last) & cc->mask;
+ u64 nsec = tc->nsec, frac = tc->frac;
+
+ /*
+ * Instead of always treating cycle_tstamp as more recent than
+ * tc->cycle_last, detect when it is too far in the future and
+ * treat it as old time stamp instead.
+ */
+ if (unlikely(delta > cc->mask / 2)) {
+ delta = (tc->cycle_last - cycle_tstamp) & cc->mask;
+ nsec -= cc_cyc2ns_backwards(cc, delta, frac);
+ } else {
+ nsec += cyclecounter_cyc2ns(cc, delta, tc->mask, &frac);
+ }
+
+ return nsec;
+}
#endif
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index c52b862dad45..fa4654ffb621 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -63,6 +63,11 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2);
/* Return @a with all but the lowest @size bytes cleared */
struct tnum tnum_cast(struct tnum a, u8 size);
+/* Swap the bytes of a tnum */
+struct tnum tnum_bswap16(struct tnum a);
+struct tnum tnum_bswap32(struct tnum a);
+struct tnum tnum_bswap64(struct tnum a);
+
/* Returns true if @a is a known constant */
static inline bool tnum_is_const(struct tnum a)
{
diff --git a/include/linux/trace_printk.h b/include/linux/trace_printk.h
new file mode 100644
index 000000000000..bb5874097f24
--- /dev/null
+++ b/include/linux/trace_printk.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TRACE_PRINTK_H
+#define _LINUX_TRACE_PRINTK_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/instruction_pointer.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_on/tracing_off and tracing_start()/tracing_stop
+ *
+ * Use tracing_on/tracing_off when you want to quickly turn on or off
+ * tracing. It simply enables or disables the recording of the trace events.
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
+ * file, which gives a means for the kernel and userspace to interact.
+ * Place a tracing_off() in the kernel where you want tracing to end.
+ * From user space, examine the trace, and then echo 1 > tracing_on
+ * to continue tracing.
+ *
+ * tracing_stop/tracing_start has slightly more overhead. It is used
+ * by things like suspend to ram where disabling the recording of the
+ * trace is not enough, but tracing must actually stop because things
+ * like calling smp_processor_id() may crash the system.
+ *
+ * Most likely, you want to use tracing_on/tracing_off.
+ */
+
+enum ftrace_dump_mode {
+ DUMP_NONE,
+ DUMP_ALL,
+ DUMP_ORIG,
+ DUMP_PARAM,
+};
+
+#ifdef CONFIG_TRACING
+void tracing_on(void);
+void tracing_off(void);
+int tracing_is_on(void);
+void tracing_snapshot(void);
+void tracing_snapshot_alloc(void);
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+
+static inline __printf(1, 2)
+void ____trace_printk_check_format(const char *fmt, ...)
+{
+}
+#define __trace_printk_check_format(fmt, args...) \
+do { \
+ if (0) \
+ ____trace_printk_check_format(fmt, ##args); \
+} while (0)
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_printk() is used.)
+ *
+ * A little optimization trick is done here. If there's only one
+ * argument, there's no need to scan the string for printf formats.
+ * The trace_puts() will suffice. But how can we take advantage of
+ * using trace_puts() when trace_printk() has only one argument?
+ * By stringifying the args and checking the size we can tell
+ * whether or not there are args. __stringify((__VA_ARGS__)) will
+ * turn into "()\0" with a size of 3 when there are no args, anything
+ * else will be bigger. All we need to do is define a string to this,
+ * and then take its size and compare to 3. If it's bigger, use
+ * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
+ * let gcc optimize the rest.
+ */
+
+#define trace_printk(fmt, ...) \
+do { \
+ char _______STR[] = __stringify((__VA_ARGS__)); \
+ if (sizeof(_______STR) > 3) \
+ do_trace_printk(fmt, ##__VA_ARGS__); \
+ else \
+ trace_puts(fmt); \
+} while (0)
+
+#define do_trace_printk(fmt, args...) \
+do { \
+ static const char *trace_printk_fmt __used \
+ __section("__trace_printk_fmt") = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_printk_check_format(fmt, ##args); \
+ \
+ if (__builtin_constant_p(fmt)) \
+ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
+ else \
+ __trace_printk(_THIS_IP_, fmt, ##args); \
+} while (0)
+
+extern __printf(2, 3)
+int __trace_bprintk(unsigned long ip, const char *fmt, ...);
+
+extern __printf(2, 3)
+int __trace_printk(unsigned long ip, const char *fmt, ...);
+
+/**
+ * trace_puts - write a string into the ftrace buffer
+ * @str: the string to record
+ *
+ * Note: __trace_bputs is an internal function for trace_puts and
+ * the @ip is passed in via the trace_puts macro.
+ *
+ * This is similar to trace_printk() but is made for those really fast
+ * paths that a developer wants the least amount of "Heisenbug" effects,
+ * where the processing of the print format is still too much.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_puts scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_puts() is used.)
+ *
+ * Returns: 0 if nothing was written, positive # if string was.
+ * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
+ */
+
+#define trace_puts(str) ({ \
+ static const char *trace_printk_fmt __used \
+ __section("__trace_printk_fmt") = \
+ __builtin_constant_p(str) ? str : NULL; \
+ \
+ if (__builtin_constant_p(str)) \
+ __trace_bputs(_THIS_IP_, trace_printk_fmt); \
+ else \
+ __trace_puts(_THIS_IP_, str); \
+})
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str);
+
+extern void trace_dump_stack(int skip);
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement.
+ */
+#define ftrace_vprintk(fmt, vargs) \
+do { \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt __used \
+ __section("__trace_printk_fmt") = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
+ } else \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+} while (0)
+
+extern __printf(2, 0) int
+__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern __printf(2, 0) int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
+#else
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void trace_dump_stack(int skip) { }
+
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline int tracing_is_on(void) { return 0; }
+static inline void tracing_snapshot(void) { }
+static inline void tracing_snapshot_alloc(void) { }
+
+static inline __printf(1, 2)
+int trace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static __printf(1, 0) inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
+#endif /* CONFIG_TRACING */
+
+#endif
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index 2efc271a96fa..b1f53fd5cfa3 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -56,6 +56,7 @@ struct anon_transport_class cls = { \
struct transport_container {
struct attribute_container ac;
const struct attribute_group *statistics;
+ const struct attribute_group *encryption;
};
#define attribute_container_to_transport_container(x) \
@@ -87,9 +88,9 @@ transport_unregister_device(struct device *dev)
transport_destroy_device(dev);
}
-static inline int transport_container_register(struct transport_container *tc)
+static inline void transport_container_register(struct transport_container *tc)
{
- return attribute_container_register(&tc->ac);
+ attribute_container_register(&tc->ac);
}
static inline void transport_container_unregister(struct transport_container *tc)
@@ -99,7 +100,7 @@ static inline void transport_container_unregister(struct transport_container *tc
}
int transport_class_register(struct transport_class *);
-int anon_transport_class_register(struct anon_transport_class *);
+void anon_transport_class_register(struct anon_transport_class *);
void transport_class_unregister(struct transport_class *);
void anon_transport_class_unregister(struct anon_transport_class *);
diff --git a/include/linux/types.h b/include/linux/types.h
index d4437e9c452c..7e71d260763c 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
-#define __EXPORTED_HEADERS__
#include <uapi/linux/types.h>
#ifndef __ASSEMBLY__
@@ -171,6 +170,11 @@ typedef u64 phys_addr_t;
typedef u32 phys_addr_t;
#endif
+struct phys_vec {
+ phys_addr_t paddr;
+ size_t len;
+};
+
typedef phys_addr_t resource_size_t;
/*
@@ -180,7 +184,7 @@ typedef phys_addr_t resource_size_t;
typedef unsigned long irq_hw_number_t;
typedef struct {
- int counter;
+ int __aligned(sizeof(int)) counter;
} atomic_t;
#define ATOMIC_INIT(i) { (i) }
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 457879938fc1..f47cf4d78ad7 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -79,6 +79,14 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return local64_read(&p->v);
}
+static inline void *u64_stats_copy(void *dst, const void *src, size_t len)
+{
+ BUILD_BUG_ON(len % sizeof(u64_stats_t));
+ for (size_t i = 0; i < len / sizeof(u64_stats_t); i++)
+ ((u64 *)dst)[i] = local64_read(&((local64_t *)src)[i]);
+ return dst;
+}
+
static inline void u64_stats_set(u64_stats_t *p, u64 val)
{
local64_set(&p->v, val);
@@ -89,6 +97,11 @@ static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
local64_add(val, &p->v);
}
+static inline void u64_stats_sub(u64_stats_t *p, s64 val)
+{
+ local64_sub(val, &p->v);
+}
+
static inline void u64_stats_inc(u64_stats_t *p)
{
local64_inc(&p->v);
@@ -110,6 +123,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
}
#else /* 64 bit */
+#include <linux/string.h>
typedef struct {
u64 v;
@@ -120,6 +134,12 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return p->v;
}
+static inline void *u64_stats_copy(void *dst, const void *src, size_t len)
+{
+ BUILD_BUG_ON(len % sizeof(u64_stats_t));
+ return memcpy(dst, src, len);
+}
+
static inline void u64_stats_set(u64_stats_t *p, u64 val)
{
p->v = val;
@@ -130,6 +150,11 @@ static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
p->v += val;
}
+static inline void u64_stats_sub(u64_stats_t *p, s64 val)
+{
+ p->v -= val;
+}
+
static inline void u64_stats_inc(u64_stats_t *p)
{
p->v++;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 58795688a186..1cbf6b4d3aab 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -236,7 +236,7 @@ static inline void udp_allow_gso(struct sock *sk)
hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
#endif
-#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
+#define IS_UDPLITE(__sk) (unlikely(__sk->sk_protocol == IPPROTO_UDPLITE))
static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
{
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5b127043a151..a9bc5b3067e3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -389,6 +389,9 @@ ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
size_t maxsize, unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0);
+ssize_t iov_iter_extract_bvecs(struct iov_iter *iter, struct bio_vec *bv,
+ size_t max_size, unsigned short *nr_vecs,
+ unsigned short max_vecs, iov_iter_extraction_t extraction_flags);
/**
* iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
diff --git a/include/linux/unwind_user.h b/include/linux/unwind_user.h
index 7f7282516bf5..64618618febd 100644
--- a/include/linux/unwind_user.h
+++ b/include/linux/unwind_user.h
@@ -5,8 +5,22 @@
#include <linux/unwind_user_types.h>
#include <asm/unwind_user.h>
-#ifndef ARCH_INIT_USER_FP_FRAME
- #define ARCH_INIT_USER_FP_FRAME
+#ifndef CONFIG_HAVE_UNWIND_USER_FP
+
+#define ARCH_INIT_USER_FP_FRAME(ws)
+
+#endif
+
+#ifndef ARCH_INIT_USER_FP_ENTRY_FRAME
+#define ARCH_INIT_USER_FP_ENTRY_FRAME(ws)
+#endif
+
+#ifndef unwind_user_at_function_start
+static inline bool unwind_user_at_function_start(struct pt_regs *regs)
+{
+ return false;
+}
+#define unwind_user_at_function_start unwind_user_at_function_start
#endif
int unwind_user(struct unwind_stacktrace *trace, unsigned int max_entries);
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index ee3d36eda45d..f548fea2adec 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -242,6 +242,7 @@ extern void arch_uprobe_clear_state(struct mm_struct *mm);
extern void arch_uprobe_init_state(struct mm_struct *mm);
extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
+extern unsigned long arch_uprobe_get_xol_area(void);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 2945923a8a95..b0e84896e6ac 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -290,6 +290,7 @@ extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
extern void usbnet_set_rx_mode(struct net_device *net);
extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+extern int usbnet_mii_ioctl(struct net_device *net, struct ifreq *rq, int cmd);
extern int usbnet_nway_reset(struct net_device *net);
extern int usbnet_manage_power(struct usbnet *, int);
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index 22e0dab0809e..ea92ac623a45 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -10,6 +10,14 @@
/* ------------------------------------------------------------------------
* GUIDs
+ *
+ * The GUID returned by lsusb can be converted to this format with the
+ * following python snippet:
+ *
+ * import uuid
+ * id = "{01234567-89ab-cdef-0123-456789abcdef}"
+ * le = uuid.UUID(id).bytes_le
+ * print("{" + ", ".join([f"0x{b:02x}" for b in le]) + "}")
*/
#define UVC_GUID_UVC_CAMERA \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 2eb528058d0d..71564868b8f6 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -119,7 +119,7 @@
* a fuss about it. This makes the programmer responsible for tagging
* the functions that can be garbage-collected.
*
- * With the macro it is possible to write the following:
+ * With the macro it is possible to write the following::
*
* static int foo_suspend(struct device *dev)
* {
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 1ac86896875c..2ebba746c18f 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -28,7 +28,6 @@
struct vfio_pci_core_device;
struct vfio_pci_region;
struct p2pdma_provider;
-struct dma_buf_phys_vec;
struct dma_buf_attachment;
struct vfio_pci_eventfd {
@@ -62,25 +61,25 @@ struct vfio_pci_device_ops {
int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
};
#if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
-int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len);
int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
#else
static inline int
-vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len)
@@ -89,7 +88,7 @@ vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
}
static inline int vfio_pci_core_get_dmabuf_phys(
struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
- unsigned int region_index, struct dma_buf_phys_vec *phys_vec,
+ unsigned int region_index, struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
{
return -EOPNOTSUPP;
@@ -236,6 +235,6 @@ static inline bool is_aligned_for_order(struct vm_area_struct *vma,
}
int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys);
+ struct phys_vec *phys);
#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 0c67543a45c8..f91704731057 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -173,6 +173,7 @@ struct virtio_vsock_pkt_info {
u32 remote_cid, remote_port;
struct vsock_sock *vsk;
struct msghdr *msg;
+ struct net *net;
u32 pkt_len;
u16 type;
u16 op;
@@ -185,7 +186,7 @@ struct virtio_transport {
struct vsock_transport transport;
/* Takes ownership of the packet */
- int (*send_pkt)(struct sk_buff *skb);
+ int (*send_pkt)(struct sk_buff *skb, struct net *net);
/* Used in MSG_ZEROCOPY mode. Checks, that provided data
* (number of buffers) could be transmitted with zerocopy
@@ -256,10 +257,10 @@ void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
-bool virtio_transport_stream_allow(u32 cid, u32 port);
+bool virtio_transport_stream_allow(struct vsock_sock *vsk, u32 cid, u32 port);
int virtio_transport_dgram_bind(struct vsock_sock *vsk,
struct sockaddr_vm *addr);
-bool virtio_transport_dgram_allow(u32 cid, u32 port);
+bool virtio_transport_dgram_allow(struct vsock_sock *vsk, u32 cid, u32 port);
int virtio_transport_connect(struct vsock_sock *vsk);
@@ -280,7 +281,7 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_transport *t,
- struct sk_buff *skb);
+ struct sk_buff *skb, struct net *net);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 92f80b4d69a6..22a139f82d75 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -122,13 +122,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SWPOUT,
THP_SWPOUT_FALLBACK,
#endif
-#ifdef CONFIG_MEMORY_BALLOON
+#ifdef CONFIG_BALLOON
BALLOON_INFLATE,
BALLOON_DEFLATE,
-#ifdef CONFIG_BALLOON_COMPACTION
+#ifdef CONFIG_BALLOON_MIGRATION
BALLOON_MIGRATE,
-#endif
-#endif
+#endif /* CONFIG_BALLOON_MIGRATION */
+#endif /* CONFIG_BALLOON */
#ifdef CONFIG_DEBUG_TLBFLUSH
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3398a345bda8..3c9c266cf782 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -286,10 +286,8 @@ void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
void inc_node_page_state(struct page *, enum node_stat_item);
void dec_node_page_state(struct page *, enum node_stat_item);
-extern void inc_node_state(struct pglist_data *, enum node_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
-extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
@@ -303,6 +301,7 @@ int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *));
+void vmstat_flush_workqueue(void);
#else /* CONFIG_SMP */
/*
@@ -394,15 +393,12 @@ static inline void __dec_node_page_state(struct page *page,
#define dec_node_page_state __dec_node_page_state
#define mod_node_page_state __mod_node_page_state
-#define inc_zone_state __inc_zone_state
-#define inc_node_state __inc_node_state
-#define dec_zone_state __dec_zone_state
-
#define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_zone_stat_thresholds(void) { }
static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
+static inline void vmstat_flush_workqueue(void) { }
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_zonestat *pzstats) { }
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index dabc351cc127..a4749f56398f 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -588,7 +588,7 @@ struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_housekeeping_update(const struct cpumask *hk);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f48e8ccffe81..e530112c4b3a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -328,9 +328,6 @@ struct dirty_throttle_control {
bool dirty_exceeded;
};
-void laptop_io_completion(struct backing_dev_info *info);
-void laptop_sync_completion(void);
-void laptop_mode_timer_fn(struct timer_list *t);
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -342,7 +339,6 @@ extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
-extern int laptop_mode;
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 45ff6f7a872b..85b1fff02fde 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -17,6 +17,7 @@
#ifndef __LINUX_WW_MUTEX_H
#define __LINUX_WW_MUTEX_H
+#include <linux/instruction_pointer.h>
#include <linux/mutex.h>
#include <linux/rtmutex.h>
@@ -44,7 +45,7 @@ struct ww_class {
unsigned int is_wait_die;
};
-struct ww_mutex {
+context_lock_struct(ww_mutex) {
struct WW_MUTEX_BASE base;
struct ww_acquire_ctx *ctx;
#ifdef DEBUG_WW_MUTEXES
@@ -52,7 +53,7 @@ struct ww_mutex {
#endif
};
-struct ww_acquire_ctx {
+context_lock_struct(ww_acquire_ctx) {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
@@ -141,6 +142,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
+ __acquires(ctx) __no_context_analysis
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
@@ -179,6 +181,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+ __releases(ctx) __acquires_shared(ctx) __no_context_analysis
{
#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
@@ -196,6 +199,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ __releases_shared(ctx) __no_context_analysis
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
@@ -245,7 +249,8 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -278,7 +283,8 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
@@ -305,6 +311,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __acquires(lock) __must_hold(ctx) __no_context_analysis
{
int ret;
#ifdef DEBUG_WW_MUTEXES
@@ -342,6 +349,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx)
{
#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
@@ -349,10 +357,11 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
return ww_mutex_lock_interruptible(lock, ctx);
}
-extern void ww_mutex_unlock(struct ww_mutex *lock);
+extern void ww_mutex_unlock(struct ww_mutex *lock) __releases(lock);
extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(true, lock) __must_hold(ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
@@ -363,6 +372,7 @@ extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
+ __must_not_hold(lock)
{
#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index a4d6cc0c9f68..1e0e2cb53579 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -19,6 +19,7 @@
* @WWAN_PORT_FASTBOOT: Fastboot protocol control
* @WWAN_PORT_ADB: ADB protocol control
* @WWAN_PORT_MIPC: MTK MIPC diagnostic interface
+ * @WWAN_PORT_NMEA: embedded GNSS receiver with NMEA output
*
* @WWAN_PORT_MAX: Highest supported port types
* @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
@@ -34,6 +35,7 @@ enum wwan_port_type {
WWAN_PORT_FASTBOOT,
WWAN_PORT_ADB,
WWAN_PORT_MIPC,
+ WWAN_PORT_NMEA,
/* Add new port types above this line */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 64e9afe7d647..296b5ee5c979 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -114,7 +114,7 @@ struct simple_xattr {
struct rb_node rb_node;
char *name;
size_t size;
- char value[];
+ char value[] __counted_by(size);
};
void simple_xattrs_init(struct simple_xattrs *xattrs);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index f3ccff2d966c..478410c880b1 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -22,6 +22,7 @@ struct zs_pool_stats {
};
struct zs_pool;
+struct scatterlist;
struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
@@ -40,9 +41,12 @@ unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
- void *local_copy);
+ size_t mem_len, void *local_copy);
void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
- void *handle_mem);
+ size_t mem_len, void *handle_mem);
+void zs_obj_read_sg_begin(struct zs_pool *pool, unsigned long handle,
+ struct scatterlist *sg, size_t mem_len);
+void zs_obj_read_sg_end(struct zs_pool *pool, unsigned long handle);
void zs_obj_write(struct zs_pool *pool, unsigned long handle,
void *handle_mem, size_t mem_len);
diff --git a/include/media/dvb_vb2.h b/include/media/dvb_vb2.h
index 8cb88452cd6c..8932396d2c99 100644
--- a/include/media/dvb_vb2.h
+++ b/include/media/dvb_vb2.h
@@ -72,8 +72,6 @@ struct dvb_buffer {
/**
* struct dvb_vb2_ctx - control struct for VB2 handler
* @vb_q: pointer to &struct vb2_queue with videobuf2 queue.
- * @mutex: mutex to serialize vb2 operations. Used by
- * vb2 core %wait_prepare and %wait_finish operations.
* @slock: spin lock used to protect buffer filling at dvb_vb2.c.
* @dvb_q: List of buffers that are not filled yet.
* @buf: Pointer to the buffer that are currently being filled.
@@ -96,7 +94,6 @@ struct dvb_buffer {
*/
struct dvb_vb2_ctx {
struct vb2_queue vb_q;
- struct mutex mutex;
spinlock_t slock;
struct list_head dvb_q;
struct dvb_buffer *buf;
@@ -114,8 +111,8 @@ struct dvb_vb2_ctx {
};
#ifndef CONFIG_DVB_MMAP
-static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx,
- const char *name, int non_blocking)
+static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name,
+ struct mutex *mutex, int non_blocking)
{
return 0;
};
@@ -124,7 +121,7 @@ static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx)
return 0;
};
#define dvb_vb2_is_streaming(ctx) (0)
-#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0)
+#define dvb_vb2_fill_buffer(ctx, file, wait, flags, flush) (0)
static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx,
struct file *file,
@@ -138,10 +135,12 @@ static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx,
*
* @ctx: control struct for VB2 handler
* @name: name for the VB2 handler
+ * @mutex: pointer to the mutex that serializes vb2 ioctls
* @non_blocking:
* if not zero, it means that the device is at non-blocking mode
*/
-int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking);
+int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name,
+ struct mutex *mutex, int non_blocking);
/**
* dvb_vb2_release - Releases the VB2 handler allocated resources and
@@ -166,10 +165,12 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx);
* @buffer_flags:
* pointer to buffer flags as defined by &enum dmx_buffer_flags.
* can be NULL.
+ * @flush: flush the buffer, even if it isn't full.
*/
int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
const unsigned char *src, int len,
- enum dmx_buffer_flags *buffer_flags);
+ enum dmx_buffer_flags *buffer_flags,
+ bool flush);
/**
* dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV
diff --git a/include/media/media-device.h b/include/media/media-device.h
index 53d2a16a70b0..749c327e3c58 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -11,6 +11,7 @@
#ifndef _MEDIA_DEVICE_H
#define _MEDIA_DEVICE_H
+#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -106,6 +107,9 @@ struct media_device_ops {
* @ops: Operation handler callbacks
* @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t.
* other operations that stop or start streaming.
+ * @num_requests: number of associated requests
+ * @num_request_objects: number of associated request objects
+ * @media_dir: DebugFS media directory
* @request_id: Used to generate unique request IDs
*
* This structure represents an abstract high-level media device. It allows easy
@@ -179,6 +183,11 @@ struct media_device {
const struct media_device_ops *ops;
struct mutex req_queue_mutex;
+ atomic_t num_requests;
+ atomic_t num_request_objects;
+
+ /* debugfs */
+ struct dentry *media_dir;
atomic_t request_id;
};
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index d27c1c646c28..dbcabeffcb57 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -20,9 +20,13 @@
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/debugfs.h>
struct media_device;
+/* debugfs top-level media directory */
+extern struct dentry *media_debugfs_root;
+
/*
* Flag to mark the media_devnode struct as registered. Drivers must not touch
* this flag directly, it will be set and cleared by media_devnode_register and
diff --git a/include/media/media-request.h b/include/media/media-request.h
index bb500b2f9da4..43ed18c11b51 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -56,6 +56,9 @@ struct media_request_object;
* @access_count: count the number of request accesses that are in progress
* @objects: List of @struct media_request_object request objects
* @num_incomplete_objects: The number of incomplete objects in the request
+ * @manual_completion: if true, then the request won't be marked as completed
+ * when @num_incomplete_objects reaches 0. Call media_request_manual_complete()
+ * to complete the request after @num_incomplete_objects == 0.
* @poll_wait: Wait queue for poll
* @lock: Serializes access to this struct
*/
@@ -68,6 +71,7 @@ struct media_request {
unsigned int access_count;
struct list_head objects;
unsigned int num_incomplete_objects;
+ bool manual_completion;
wait_queue_head_t poll_wait;
spinlock_t lock;
};
@@ -218,6 +222,38 @@ media_request_get_by_fd(struct media_device *mdev, int request_fd);
int media_request_alloc(struct media_device *mdev,
int *alloc_fd);
+/**
+ * media_request_mark_manual_completion - Enable manual completion
+ *
+ * @req: The request
+ *
+ * Mark that the request has to be manually completed by calling
+ * media_request_manual_complete().
+ *
+ * This function shall be called in the req_queue callback.
+ */
+static inline void
+media_request_mark_manual_completion(struct media_request *req)
+{
+ req->manual_completion = true;
+}
+
+/**
+ * media_request_manual_complete - Mark the request as completed
+ *
+ * @req: The request
+ *
+ * This function completes a request that was marked for manual completion by an
+ * earlier call to media_request_mark_manual_completion(). The request's
+ * @manual_completion field is reset to false.
+ *
+ * All objects contained in the request must have been completed previously. It
+ * is an error to call this function otherwise. If such an error occurred, the
+ * function will WARN and the object completion will be delayed until
+ * @num_incomplete_objects is 0.
+ */
+void media_request_manual_complete(struct media_request *req);
+
#else
static inline void media_request_get(struct media_request *req)
@@ -256,6 +292,7 @@ struct media_request_object_ops {
* struct media_request_object - An opaque object that belongs to a media
* request
*
+ * @mdev: Media device this object belongs to
* @ops: object's operations
* @priv: object's priv pointer
* @req: the request this object belongs to (can be NULL)
@@ -267,6 +304,7 @@ struct media_request_object_ops {
* another struct that contains the actual data for this request object.
*/
struct media_request_object {
+ struct media_device *mdev;
const struct media_request_object_ops *ops;
void *priv;
struct media_request *req;
@@ -336,7 +374,7 @@ void media_request_object_init(struct media_request_object *obj);
* @req: The media request
* @ops: The object ops for this object
* @priv: A driver-specific priv pointer associated with this object
- * @is_buffer: Set to true if the object a buffer object.
+ * @is_buffer: Set to true if the object is a buffer object.
* @obj: The object
*
* Bind this object to the request and set the ops and priv values of
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 31fc1bee3797..327976b14d50 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -1581,6 +1581,9 @@ int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd);
* not overwritten. Callers should register the controls they want to handle
* themselves before calling this function.
*
+ * This function will set the control handler's error field on failure, just as
+ * other functions adding controls to the handler.
+ *
* Return: 0 on success, a negative error code on failure.
*/
int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index f7c57c776589..cd82e70ccbaa 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -182,7 +182,7 @@ enum v4l2_fwnode_bus_type {
/**
* v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
+ * @fwnode: pointer to the endpoint's fwnode handle (may be NULL)
* @vep: pointer to the V4L2 fwnode data structure
*
* This function parses the V4L2 fwnode endpoint specific parameters from the
@@ -218,7 +218,7 @@ enum v4l2_fwnode_bus_type {
*
* Return: %0 on success or a negative error code on failure:
* %-ENOMEM on memory allocation failure
- * %-EINVAL on parsing failure
+ * %-EINVAL on parsing failure, including @fwnode == NULL
* %-ENXIO on mismatching bus types
*/
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
@@ -236,7 +236,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
/**
* v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
+ * @fwnode: pointer to the endpoint's fwnode handle (may be NULL)
* @vep: pointer to the V4L2 fwnode data structure
*
* This function parses the V4L2 fwnode endpoint specific parameters from the
@@ -276,7 +276,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
*
* Return: %0 on success or a negative error code on failure:
* %-ENOMEM on memory allocation failure
- * %-EINVAL on parsing failure
+ * %-EINVAL on parsing failure, including @fwnode == NULL
* %-ENXIO on mismatching bus types
*/
int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 6f7a58350441..54c83b18d555 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -663,7 +663,22 @@ void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
struct video_device;
/* names for fancy debug output */
+
+/**
+ * var v4l2_field_names - Helper array mapping ``V4L2_FIELD_*`` to strings.
+ *
+ * Specially when printing debug messages, it is interesting to output
+ * the field order at the V4L2 buffers. This array associates all possible
+ * values of field pix format from V4L2 API into a string.
+ */
extern const char *v4l2_field_names[];
+
+/**
+ * var v4l2_type_names - Helper array mapping ``V4L2_BUF_TYPE_*`` to strings.
+ *
+ * When printing debug messages, it is interesting to output the V4L2 buffer
+ * type number with a name that represents its content.
+ */
extern const char *v4l2_type_names[];
#ifdef CONFIG_COMPAT
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index bf6a09a04dcf..31de25d792b9 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -548,6 +548,27 @@ v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
/**
+ * v4l2_m2m_get() - take a reference to the m2m_dev structure
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * This is used to share the M2M device across multiple devices. This
+ * can be used to avoid scheduling two hardware nodes concurrently.
+ */
+void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev);
+
+/**
+ * v4l2_m2m_put() - remove a reference to the m2m_dev structure
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Once the M2M device has no more references, v4l2_m2m_release() will be
+ * called automatically. Users of this method should never call
+ * v4l2_m2m_release() directly. See v4l2_m2m_get() for more details.
+ */
+void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev);
+
+/**
* v4l2_m2m_ctx_init() - allocate and initialize a m2m context
*
* @m2m_dev: opaque pointer to the internal data to handle M2M context
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 9b02aeba4108..4424d481d7f7 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -351,13 +351,6 @@ struct vb2_buffer {
* \*num_buffers are being allocated additionally to
* the buffers already allocated. If either \*num_planes
* or the requested sizes are invalid callback must return %-EINVAL.
- * @wait_prepare: release any locks taken while calling vb2 functions;
- * it is called before an ioctl needs to wait for a new
- * buffer to arrive; required to avoid a deadlock in
- * blocking access type.
- * @wait_finish: reacquire all locks released in the previous callback;
- * required to continue operation after sleeping while
- * waiting for a new buffer to arrive.
* @buf_out_validate: called when the output buffer is prepared or queued
* to a request; drivers can use this to validate
* userspace-provided information; this is required only
@@ -436,9 +429,6 @@ struct vb2_ops {
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[]);
- void (*wait_prepare)(struct vb2_queue *q);
- void (*wait_finish)(struct vb2_queue *q);
-
int (*buf_out_validate)(struct vb2_buffer *vb);
int (*buf_init)(struct vb2_buffer *vb);
int (*buf_prepare)(struct vb2_buffer *vb);
@@ -521,10 +511,10 @@ struct vb2_buf_ops {
* @non_coherent_mem: when set queue will attempt to allocate buffers using
* non-coherent memory.
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
- * driver can set this to a mutex to let the v4l2 core serialize
- * the queuing ioctls. If the driver wants to handle locking
- * itself, then this should be set to NULL. This lock is not used
- * by the videobuf2 core API.
+ * driver must set this to a mutex to let the v4l2 core serialize
+ * the queuing ioctls. This lock is used when waiting for a new
+ * buffer to arrive: the lock is released, we wait for the new
+ * buffer, and then retaken.
* @owner: The filehandle that 'owns' the buffers, i.e. the filehandle
* that called reqbufs, create_buffers or started fileio.
* This field is not used by the videobuf2 core API, but it allows
@@ -680,8 +670,6 @@ struct vb2_queue {
* called. Used to check for unbalanced ops.
*/
u32 cnt_queue_setup;
- u32 cnt_wait_prepare;
- u32 cnt_wait_finish;
u32 cnt_prepare_streaming;
u32 cnt_start_streaming;
u32 cnt_stop_streaming;
@@ -766,8 +754,7 @@ void vb2_discard_done(struct vb2_queue *q);
* @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This function will wait until all buffers that have been given to the driver
- * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It
- * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair.
+ * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done().
* It is intended to be called with all locks taken, for example from
* &vb2_ops->stop_streaming callback.
*/
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 77ce8238ab30..71d2864fb235 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -367,24 +367,6 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
*/
void vb2_video_unregister_device(struct video_device *vdev);
-/**
- * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
- *
- * @vq: pointer to &struct vb2_queue
- *
- * ..note:: only use if vq->lock is non-NULL.
- */
-void vb2_ops_wait_prepare(struct vb2_queue *vq);
-
-/**
- * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue
- *
- * @vq: pointer to &struct vb2_queue
- *
- * ..note:: only use if vq->lock is non-NULL.
- */
-void vb2_ops_wait_finish(struct vb2_queue *vq);
-
struct media_request;
int vb2_request_validate(struct media_request *req);
void vb2_request_queue(struct media_request *req);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index d40e978126e3..d3ff48a2fbe0 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
+#include <net/netns/vsock.h>
#include <net/sock.h>
#include <uapi/linux/vm_sockets.h>
@@ -124,7 +125,7 @@ struct vsock_transport {
size_t len, int flags);
int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
struct msghdr *, size_t len);
- bool (*dgram_allow)(u32 cid, u32 port);
+ bool (*dgram_allow)(struct vsock_sock *vsk, u32 cid, u32 port);
/* STREAM. */
/* TODO: stream_bind() */
@@ -136,14 +137,14 @@ struct vsock_transport {
s64 (*stream_has_space)(struct vsock_sock *);
u64 (*stream_rcvhiwat)(struct vsock_sock *);
bool (*stream_is_active)(struct vsock_sock *);
- bool (*stream_allow)(u32 cid, u32 port);
+ bool (*stream_allow)(struct vsock_sock *vsk, u32 cid, u32 port);
/* SEQ_PACKET. */
ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
int flags);
int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg,
size_t len);
- bool (*seqpacket_allow)(u32 remote_cid);
+ bool (*seqpacket_allow)(struct vsock_sock *vsk, u32 remote_cid);
u32 (*seqpacket_has_data)(struct vsock_sock *vsk);
/* Notification. */
@@ -216,6 +217,11 @@ void vsock_remove_connected(struct vsock_sock *vsk);
struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
+struct sock *vsock_find_bound_socket_net(struct sockaddr_vm *addr,
+ struct net *net);
+struct sock *vsock_find_connected_socket_net(struct sockaddr_vm *src,
+ struct sockaddr_vm *dst,
+ struct net *net);
void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(struct vsock_transport *transport,
void (*fn)(struct sock *sk));
@@ -256,4 +262,53 @@ static inline bool vsock_msgzerocopy_allow(const struct vsock_transport *t)
{
return t->msgzerocopy_allow && t->msgzerocopy_allow();
}
+
+static inline enum vsock_net_mode vsock_net_mode(struct net *net)
+{
+ if (!net)
+ return VSOCK_NET_MODE_GLOBAL;
+
+ return READ_ONCE(net->vsock.mode);
+}
+
+static inline bool vsock_net_mode_global(struct vsock_sock *vsk)
+{
+ return vsock_net_mode(sock_net(sk_vsock(vsk))) == VSOCK_NET_MODE_GLOBAL;
+}
+
+static inline void vsock_net_set_child_mode(struct net *net,
+ enum vsock_net_mode mode)
+{
+ WRITE_ONCE(net->vsock.child_ns_mode, mode);
+}
+
+static inline enum vsock_net_mode vsock_net_child_mode(struct net *net)
+{
+ return READ_ONCE(net->vsock.child_ns_mode);
+}
+
+/* Return true if two namespaces pass the mode rules. Otherwise, return false.
+ *
+ * A NULL namespace is treated as VSOCK_NET_MODE_GLOBAL.
+ *
+ * Read more about modes in the comment header of net/vmw_vsock/af_vsock.c.
+ */
+static inline bool vsock_net_check_mode(struct net *ns0, struct net *ns1)
+{
+ enum vsock_net_mode mode0, mode1;
+
+ /* Any vsocks within the same network namespace are always reachable,
+ * regardless of the mode.
+ */
+ if (net_eq(ns0, ns1))
+ return true;
+
+ mode0 = vsock_net_mode(ns0);
+ mode1 = vsock_net_mode(ns1);
+
+ /* Different namespaces are only reachable if they are both
+ * global mode.
+ */
+ return mode0 == VSOCK_NET_MODE_GLOBAL && mode0 == mode1;
+}
#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index a7bba42dde15..ad3d7626130e 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -116,10 +116,6 @@ enum {
AX25_PROTO_STD_DUPLEX,
#ifdef CONFIG_AX25_DAMA_SLAVE
AX25_PROTO_DAMA_SLAVE,
-#ifdef CONFIG_AX25_DAMA_MASTER
- AX25_PROTO_DAMA_MASTER,
-#define AX25_PROTO_MAX AX25_PROTO_DAMA_MASTER
-#endif
#endif
__AX25_PROTO_MAX,
AX25_PROTO_MAX = __AX25_PROTO_MAX -1
@@ -138,7 +134,7 @@ enum {
AX25_VALUES_IDLE, /* Connected mode idle timer */
AX25_VALUES_N2, /* Default N2 value */
AX25_VALUES_PACLEN, /* AX.25 MTU */
- AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave, DAMA Master */
+ AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave */
#ifdef CONFIG_AX25_DAMA_SLAVE
AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */
#endif
@@ -226,7 +222,7 @@ typedef struct ax25_dev {
struct net_device *forward;
struct ctl_table_header *sysheader;
int values[AX25_MAX_VALUES];
-#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
+#ifdef CONFIG_AX25_DAMA_SLAVE
ax25_dama_info dama;
#endif
refcount_t refcount;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index d46ed9011ee5..69eed69f7f26 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -130,21 +130,30 @@ struct bt_voice {
#define BT_RCVMTU 13
#define BT_PHY 14
-#define BT_PHY_BR_1M_1SLOT 0x00000001
-#define BT_PHY_BR_1M_3SLOT 0x00000002
-#define BT_PHY_BR_1M_5SLOT 0x00000004
-#define BT_PHY_EDR_2M_1SLOT 0x00000008
-#define BT_PHY_EDR_2M_3SLOT 0x00000010
-#define BT_PHY_EDR_2M_5SLOT 0x00000020
-#define BT_PHY_EDR_3M_1SLOT 0x00000040
-#define BT_PHY_EDR_3M_3SLOT 0x00000080
-#define BT_PHY_EDR_3M_5SLOT 0x00000100
-#define BT_PHY_LE_1M_TX 0x00000200
-#define BT_PHY_LE_1M_RX 0x00000400
-#define BT_PHY_LE_2M_TX 0x00000800
-#define BT_PHY_LE_2M_RX 0x00001000
-#define BT_PHY_LE_CODED_TX 0x00002000
-#define BT_PHY_LE_CODED_RX 0x00004000
+#define BT_PHY_BR_1M_1SLOT BIT(0)
+#define BT_PHY_BR_1M_3SLOT BIT(1)
+#define BT_PHY_BR_1M_5SLOT BIT(2)
+#define BT_PHY_EDR_2M_1SLOT BIT(3)
+#define BT_PHY_EDR_2M_3SLOT BIT(4)
+#define BT_PHY_EDR_2M_5SLOT BIT(5)
+#define BT_PHY_EDR_3M_1SLOT BIT(6)
+#define BT_PHY_EDR_3M_3SLOT BIT(7)
+#define BT_PHY_EDR_3M_5SLOT BIT(8)
+#define BT_PHY_LE_1M_TX BIT(9)
+#define BT_PHY_LE_1M_RX BIT(10)
+#define BT_PHY_LE_2M_TX BIT(11)
+#define BT_PHY_LE_2M_RX BIT(12)
+#define BT_PHY_LE_CODED_TX BIT(13)
+#define BT_PHY_LE_CODED_RX BIT(14)
+
+#define BT_PHY_BREDR_MASK (BT_PHY_BR_1M_1SLOT | BT_PHY_BR_1M_3SLOT | \
+ BT_PHY_BR_1M_5SLOT | BT_PHY_EDR_2M_1SLOT | \
+ BT_PHY_EDR_2M_3SLOT | BT_PHY_EDR_2M_5SLOT | \
+ BT_PHY_EDR_3M_1SLOT | BT_PHY_EDR_3M_3SLOT | \
+ BT_PHY_EDR_3M_5SLOT)
+#define BT_PHY_LE_MASK (BT_PHY_LE_1M_TX | BT_PHY_LE_1M_RX | \
+ BT_PHY_LE_2M_TX | BT_PHY_LE_2M_RX | \
+ BT_PHY_LE_CODED_TX | BT_PHY_LE_CODED_RX)
#define BT_MODE 15
@@ -173,7 +182,7 @@ struct bt_iso_io_qos {
__u32 interval;
__u16 latency;
__u16 sdu;
- __u8 phy;
+ __u8 phys;
__u8 rtn;
};
@@ -212,9 +221,9 @@ struct bt_iso_qos {
};
};
-#define BT_ISO_PHY_1M 0x01
-#define BT_ISO_PHY_2M 0x02
-#define BT_ISO_PHY_CODED 0x04
+#define BT_ISO_PHY_1M BIT(0)
+#define BT_ISO_PHY_2M BIT(1)
+#define BT_ISO_PHY_CODED BIT(2)
#define BT_ISO_PHY_ANY (BT_ISO_PHY_1M | BT_ISO_PHY_2M | \
BT_ISO_PHY_CODED)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index a27cd3626b87..89ad9470fa71 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -654,6 +654,8 @@ enum {
#define HCI_LE_ISO_BROADCASTER 0x40
#define HCI_LE_ISO_SYNC_RECEIVER 0x80
#define HCI_LE_LL_EXT_FEATURE 0x80
+#define HCI_LE_CS 0x40
+#define HCI_LE_CS_HOST 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -1883,6 +1885,15 @@ struct hci_cp_le_set_default_phy {
#define HCI_LE_SET_PHY_2M 0x02
#define HCI_LE_SET_PHY_CODED 0x04
+#define HCI_OP_LE_SET_PHY 0x2032
+struct hci_cp_le_set_phy {
+ __le16 handle;
+ __u8 all_phys;
+ __u8 tx_phys;
+ __u8 rx_phys;
+ __le16 phy_opts;
+} __packed;
+
#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
struct hci_cp_le_set_ext_scan_params {
__u8 own_addr_type;
@@ -2136,8 +2147,8 @@ struct hci_cis_params {
__u8 cis_id;
__le16 c_sdu;
__le16 p_sdu;
- __u8 c_phy;
- __u8 p_phy;
+ __u8 c_phys;
+ __u8 p_phys;
__u8 c_rtn;
__u8 p_rtn;
} __packed;
@@ -2269,6 +2280,204 @@ struct hci_cp_le_read_all_remote_features {
__u8 pages;
} __packed;
+/* Channel Sounding Commands */
+#define HCI_OP_LE_CS_RD_LOCAL_SUPP_CAP 0x2089
+struct hci_rp_le_cs_rd_local_supp_cap {
+ __u8 status;
+ __u8 num_config_supported;
+ __le16 max_consecutive_procedures_supported;
+ __u8 num_antennas_supported;
+ __u8 max_antenna_paths_supported;
+ __u8 roles_supported;
+ __u8 modes_supported;
+ __u8 rtt_capability;
+ __u8 rtt_aa_only_n;
+ __u8 rtt_sounding_n;
+ __u8 rtt_random_payload_n;
+ __le16 nadm_sounding_capability;
+ __le16 nadm_random_capability;
+ __u8 cs_sync_phys_supported;
+ __le16 subfeatures_supported;
+ __le16 t_ip1_times_supported;
+ __le16 t_ip2_times_supported;
+ __le16 t_fcs_times_supported;
+ __le16 t_pm_times_supported;
+ __u8 t_sw_time_supported;
+ __u8 tx_snr_capability;
+} __packed;
+
+#define HCI_OP_LE_CS_RD_RMT_SUPP_CAP 0x208A
+struct hci_cp_le_cs_rd_local_supp_cap {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_WR_CACHED_RMT_SUPP_CAP 0x208B
+struct hci_cp_le_cs_wr_cached_rmt_supp_cap {
+ __le16 handle;
+ __u8 num_config_supported;
+ __le16 max_consecutive_procedures_supported;
+ __u8 num_antennas_supported;
+ __u8 max_antenna_paths_supported;
+ __u8 roles_supported;
+ __u8 modes_supported;
+ __u8 rtt_capability;
+ __u8 rtt_aa_only_n;
+ __u8 rtt_sounding_n;
+ __u8 rtt_random_payload_n;
+ __le16 nadm_sounding_capability;
+ __le16 nadm_random_capability;
+ __u8 cs_sync_phys_supported;
+ __le16 subfeatures_supported;
+ __le16 t_ip1_times_supported;
+ __le16 t_ip2_times_supported;
+ __le16 t_fcs_times_supported;
+ __le16 t_pm_times_supported;
+ __u8 t_sw_time_supported;
+ __u8 tx_snr_capability;
+} __packed;
+
+struct hci_rp_le_cs_wr_cached_rmt_supp_cap {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_SEC_ENABLE 0x208C
+struct hci_cp_le_cs_sec_enable {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_SET_DEFAULT_SETTINGS 0x208D
+struct hci_cp_le_cs_set_default_settings {
+ __le16 handle;
+ __u8 role_enable;
+ __u8 cs_sync_ant_sel;
+ __s8 max_tx_power;
+} __packed;
+
+struct hci_rp_le_cs_set_default_settings {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_RD_RMT_FAE_TABLE 0x208E
+struct hci_cp_le_cs_rd_rmt_fae_table {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_WR_CACHED_RMT_FAE_TABLE 0x208F
+struct hci_cp_le_cs_wr_rmt_cached_fae_table {
+ __le16 handle;
+ __u8 remote_fae_table[72];
+} __packed;
+
+struct hci_rp_le_cs_wr_rmt_cached_fae_table {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_CREATE_CONFIG 0x2090
+struct hci_cp_le_cs_create_config {
+ __le16 handle;
+ __u8 config_id;
+ __u8 create_context;
+ __u8 main_mode_type;
+ __u8 sub_mode_type;
+ __u8 min_main_mode_steps;
+ __u8 max_main_mode_steps;
+ __u8 main_mode_repetition;
+ __u8 mode_0_steps;
+ __u8 role;
+ __u8 rtt_type;
+ __u8 cs_sync_phy;
+ __u8 channel_map[10];
+ __u8 channel_map_repetition;
+ __u8 channel_selection_type;
+ __u8 ch3c_shape;
+ __u8 ch3c_jump;
+ __u8 reserved;
+} __packed;
+
+#define HCI_OP_LE_CS_REMOVE_CONFIG 0x2091
+struct hci_cp_le_cs_remove_config {
+ __le16 handle;
+ __u8 config_id;
+} __packed;
+
+#define HCI_OP_LE_CS_SET_CH_CLASSIFICATION 0x2092
+struct hci_cp_le_cs_set_ch_classification {
+ __u8 ch_classification[10];
+} __packed;
+
+struct hci_rp_le_cs_set_ch_classification {
+ __u8 status;
+} __packed;
+
+#define HCI_OP_LE_CS_SET_PROC_PARAM 0x2093
+struct hci_cp_le_cs_set_proc_param {
+ __le16 handle;
+ __u8 config_id;
+ __le16 max_procedure_len;
+ __le16 min_procedure_interval;
+ __le16 max_procedure_interval;
+ __le16 max_procedure_count;
+ __u8 min_subevent_len[3];
+ __u8 max_subevent_len[3];
+ __u8 tone_antenna_config_selection;
+ __u8 phy;
+ __u8 tx_power_delta;
+ __u8 preferred_peer_antenna;
+ __u8 snr_control_initiator;
+ __u8 snr_control_reflector;
+} __packed;
+
+struct hci_rp_le_cs_set_proc_param {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_CS_SET_PROC_ENABLE 0x2094
+struct hci_cp_le_cs_set_proc_enable {
+ __le16 handle;
+ __u8 config_id;
+ __u8 enable;
+} __packed;
+
+#define HCI_OP_LE_CS_TEST 0x2095
+struct hci_cp_le_cs_test {
+ __u8 main_mode_type;
+ __u8 sub_mode_type;
+ __u8 main_mode_repetition;
+ __u8 mode_0_steps;
+ __u8 role;
+ __u8 rtt_type;
+ __u8 cs_sync_phy;
+ __u8 cs_sync_antenna_selection;
+ __u8 subevent_len[3];
+ __le16 subevent_interval;
+ __u8 max_num_subevents;
+ __u8 transmit_power_level;
+ __u8 t_ip1_time;
+ __u8 t_ip2_time;
+ __u8 t_fcs_time;
+ __u8 t_pm_time;
+ __u8 t_sw_time;
+ __u8 tone_antenna_config_selection;
+ __u8 reserved;
+ __u8 snr_control_initiator;
+ __u8 snr_control_reflector;
+ __le16 drbg_nonce;
+ __u8 channel_map_repetition;
+ __le16 override_config;
+ __u8 override_parameters_length;
+ __u8 override_parameters_data[];
+} __packed;
+
+struct hci_rp_le_cs_test {
+ __u8 status;
+} __packed;
+
+#define HCI_OP_LE_CS_TEST_END 0x2096
+
/* ---- HCI Events ---- */
struct hci_ev_status {
__u8 status;
@@ -2960,6 +3169,129 @@ struct hci_evt_le_read_all_remote_features_complete {
__u8 features[248];
} __packed;
+/* Channel Sounding Events */
+#define HCI_EVT_LE_CS_READ_RMT_SUPP_CAP_COMPLETE 0x2C
+struct hci_evt_le_cs_read_rmt_supp_cap_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 num_configs_supp;
+ __le16 max_consec_proc_supp;
+ __u8 num_ant_supp;
+ __u8 max_ant_path_supp;
+ __u8 roles_supp;
+ __u8 modes_supp;
+ __u8 rtt_cap;
+ __u8 rtt_aa_only_n;
+ __u8 rtt_sounding_n;
+ __u8 rtt_rand_payload_n;
+ __le16 nadm_sounding_cap;
+ __le16 nadm_rand_cap;
+ __u8 cs_sync_phys_supp;
+ __le16 sub_feat_supp;
+ __le16 t_ip1_times_supp;
+ __le16 t_ip2_times_supp;
+ __le16 t_fcs_times_supp;
+ __le16 t_pm_times_supp;
+ __u8 t_sw_times_supp;
+ __u8 tx_snr_cap;
+} __packed;
+
+#define HCI_EVT_LE_CS_READ_RMT_FAE_TABLE_COMPLETE 0x2D
+struct hci_evt_le_cs_read_rmt_fae_table_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 remote_fae_table[72];
+} __packed;
+
+#define HCI_EVT_LE_CS_SECURITY_ENABLE_COMPLETE 0x2E
+struct hci_evt_le_cs_security_enable_complete {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_EVT_LE_CS_CONFIG_COMPLETE 0x2F
+struct hci_evt_le_cs_config_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 config_id;
+ __u8 action;
+ __u8 main_mode_type;
+ __u8 sub_mode_type;
+ __u8 min_main_mode_steps;
+ __u8 max_main_mode_steps;
+ __u8 main_mode_rep;
+ __u8 mode_0_steps;
+ __u8 role;
+ __u8 rtt_type;
+ __u8 cs_sync_phy;
+ __u8 channel_map[10];
+ __u8 channel_map_rep;
+ __u8 channel_sel_type;
+ __u8 ch3c_shape;
+ __u8 ch3c_jump;
+ __u8 reserved;
+ __u8 t_ip1_time;
+ __u8 t_ip2_time;
+ __u8 t_fcs_time;
+ __u8 t_pm_time;
+} __packed;
+
+#define HCI_EVT_LE_CS_PROCEDURE_ENABLE_COMPLETE 0x30
+struct hci_evt_le_cs_procedure_enable_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 config_id;
+ __u8 state;
+ __u8 tone_ant_config_sel;
+ __s8 sel_tx_pwr;
+ __u8 sub_evt_len[3];
+ __u8 sub_evts_per_evt;
+ __le16 sub_evt_intrvl;
+ __le16 evt_intrvl;
+ __le16 proc_intrvl;
+ __le16 proc_counter;
+ __le16 max_proc_len;
+} __packed;
+
+#define HCI_EVT_LE_CS_SUBEVENT_RESULT 0x31
+struct hci_evt_le_cs_subevent_result {
+ __le16 handle;
+ __u8 config_id;
+ __le16 start_acl_conn_evt_counter;
+ __le16 proc_counter;
+ __le16 freq_comp;
+ __u8 ref_pwr_lvl;
+ __u8 proc_done_status;
+ __u8 subevt_done_status;
+ __u8 abort_reason;
+ __u8 num_ant_paths;
+ __u8 num_steps_reported;
+ __u8 step_mode[0]; /* depends on num_steps_reported */
+ __u8 step_channel[0]; /* depends on num_steps_reported */
+ __u8 step_data_length[0]; /* depends on num_steps_reported */
+ __u8 step_data[0]; /* depends on num_steps_reported */
+} __packed;
+
+#define HCI_EVT_LE_CS_SUBEVENT_RESULT_CONTINUE 0x32
+struct hci_evt_le_cs_subevent_result_continue {
+ __le16 handle;
+ __u8 config_id;
+ __u8 proc_done_status;
+ __u8 subevt_done_status;
+ __u8 abort_reason;
+ __u8 num_ant_paths;
+ __u8 num_steps_reported;
+ __u8 step_mode[0]; /* depends on num_steps_reported */
+ __u8 step_channel[0]; /* depends on num_steps_reported */
+ __u8 step_data_length[0]; /* depends on num_steps_reported */
+ __u8 step_data[0]; /* depends on num_steps_reported */
+} __packed;
+
+#define HCI_EVT_LE_CS_TEST_END_COMPLETE 0x33
+struct hci_evt_le_cs_test_end_complete {
+ __u8 status;
+} __packed;
+
#define HCI_EV_VENDOR 0xff
/* Internal events generated by Bluetooth stack */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 4263e71a23ef..a7bffb908c1e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -730,6 +730,8 @@ struct hci_conn {
__u16 le_per_adv_data_offset;
__u8 le_adv_phy;
__u8 le_adv_sec_phy;
+ __u8 le_tx_def_phys;
+ __u8 le_rx_def_phys;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
@@ -2071,6 +2073,12 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define ll_ext_feature_capable(dev) \
((dev)->le_features[7] & HCI_LE_LL_EXT_FEATURE)
+/* Channel sounding support */
+#define le_cs_capable(dev) \
+ ((dev)->le_features[5] & HCI_LE_CS)
+#define le_cs_host_capable(dev) \
+ ((dev)->le_features[5] & HCI_LE_CS_HOST)
+
#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
(!hci_test_quirk((dev), HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG)))
@@ -2334,6 +2342,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
void *hci_recv_event_data(struct hci_dev *hdev, __u8 event);
u32 hci_conn_get_phy(struct hci_conn *conn);
+int hci_conn_set_phy(struct hci_conn *conn, u32 phys);
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 56076bbc981d..73e494b2591d 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -191,3 +191,6 @@ int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_past_sync(struct hci_conn *conn, struct hci_conn *le);
int hci_le_read_remote_features(struct hci_conn *conn);
+
+int hci_acl_change_pkt_type(struct hci_conn *conn, u16 pkt_type);
+int hci_le_set_phy(struct hci_conn *conn, u8 tx_phys, u8 rx_phys);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 00e182a22720..ec3af01e4db9 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -655,8 +655,7 @@ struct l2cap_conn {
struct sk_buff *rx_skb;
__u32 rx_len;
- __u8 tx_ident;
- struct mutex ident_lock;
+ struct ida tx_ida;
struct sk_buff_head pending_rx;
struct work_struct pending_rx_work;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 462078403557..4ad5521e7731 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -254,6 +254,7 @@ struct bonding {
struct delayed_work ad_work;
struct delayed_work mcast_work;
struct delayed_work slave_arr_work;
+ struct delayed_work peer_notify_work;
#ifdef CONFIG_DEBUG_FS
/* debugging support via debugfs */
struct dentry *debug_dir;
@@ -710,6 +711,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
int level);
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+void bond_peer_notify_work_rearm(struct bonding *bond, unsigned long delay);
void bond_work_init_all(struct bonding *bond);
void bond_work_cancel_all(struct bonding *bond);
diff --git a/include/net/can.h b/include/net/can.h
new file mode 100644
index 000000000000..6db9e826f0e0
--- /dev/null
+++ b/include/net/can.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * net/can.h
+ *
+ * Definitions for the CAN network socket buffer extensions
+ *
+ * Copyright (C) 2026 Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ */
+
+#ifndef _NET_CAN_H
+#define _NET_CAN_H
+
+/**
+ * struct can_skb_ext - skb extensions for CAN specific content
+ * @can_iif: ifindex of the first interface the CAN frame appeared on
+ * @can_framelen: cached echo CAN frame length for bql
+ * @can_gw_hops: can-gw CAN frame time-to-live counter
+ * @can_ext_flags: CAN skb extensions flags
+ */
+struct can_skb_ext {
+ int can_iif;
+ u16 can_framelen;
+ u8 can_gw_hops;
+ u8 can_ext_flags;
+};
+
+#endif /* _NET_CAN_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2900202588a5..fc01de19c798 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2025 Intel Corporation
+ * Copyright (C) 2018-2026 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -126,6 +126,7 @@ struct wiphy;
* @IEEE80211_CHAN_NO_4MHZ: 4 MHz bandwidth is not permitted on this channel.
* @IEEE80211_CHAN_NO_8MHZ: 8 MHz bandwidth is not permitted on this channel.
* @IEEE80211_CHAN_NO_16MHZ: 16 MHz bandwidth is not permitted on this channel.
+ * @IEEE80211_CHAN_NO_UHR: UHR operation is not permitted on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = BIT(0),
@@ -143,6 +144,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_10MHZ = BIT(12),
IEEE80211_CHAN_NO_HE = BIT(13),
/* can use free bits here */
+ IEEE80211_CHAN_NO_UHR = BIT(18),
IEEE80211_CHAN_NO_320MHZ = BIT(19),
IEEE80211_CHAN_NO_EHT = BIT(20),
IEEE80211_CHAN_DFS_CONCURRENT = BIT(21),
@@ -429,6 +431,18 @@ struct ieee80211_sta_eht_cap {
u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
};
+/**
+ * struct ieee80211_sta_uhr_cap - STA's UHR capabilities
+ * @has_uhr: true iff UHR is supported and data is valid
+ * @mac: fixed MAC capabilities
+ * @phy: fixed PHY capabilities
+ */
+struct ieee80211_sta_uhr_cap {
+ bool has_uhr;
+ struct ieee80211_uhr_cap_mac mac;
+ struct ieee80211_uhr_cap_phy phy;
+};
+
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/*
@@ -454,6 +468,7 @@ struct ieee80211_sta_eht_cap {
* @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
* 6 GHz band channel (and 0 may be valid value).
* @eht_cap: STA's EHT capabilities
+ * @uhr_cap: STA's UHR capabilities
* @vendor_elems: vendor element(s) to advertise
* @vendor_elems.data: vendor element(s) data
* @vendor_elems.len: vendor element(s) length
@@ -463,6 +478,7 @@ struct ieee80211_sband_iftype_data {
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
+ struct ieee80211_sta_uhr_cap uhr_cap;
struct {
const u8 *data;
unsigned int len;
@@ -705,6 +721,26 @@ ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband,
}
/**
+ * ieee80211_get_uhr_iftype_cap - return UHR capabilities for an sband's iftype
+ * @sband: the sband to search for the iftype on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to the struct ieee80211_sta_uhr_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_uhr_cap *
+ieee80211_get_uhr_iftype_cap(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, iftype);
+
+ if (data && data->uhr_cap.has_uhr)
+ return &data->uhr_cap;
+
+ return NULL;
+}
+
+/**
* wiphy_read_of_freq_limits - read frequency limits from device tree
*
* @wiphy: the wireless device to get extra limits for
@@ -1486,6 +1522,7 @@ struct cfg80211_s1g_short_beacon {
* @he_cap: HE capabilities (or %NULL if HE isn't enabled)
* @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled)
* @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled)
+ * @uhr_oper: UHR operation (or %NULL if UHR isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @twt_responder: Enable Target Wait Time
@@ -1525,6 +1562,7 @@ struct cfg80211_ap_settings {
const struct ieee80211_he_operation *he_oper;
const struct ieee80211_eht_cap_elem *eht_cap;
const struct ieee80211_eht_operation *eht_oper;
+ const struct ieee80211_uhr_operation *uhr_oper;
bool ht_required, vht_required, he_required, sae_h2e_required;
bool twt_responder;
u32 flags;
@@ -1698,6 +1736,8 @@ struct sta_txpwr {
* @eht_capa: EHT capabilities of station
* @eht_capa_len: the length of the EHT capabilities
* @s1g_capa: S1G capabilities of station
+ * @uhr_capa: UHR capabilities of the station
+ * @uhr_capa_len: the length of the UHR capabilities
*/
struct link_station_parameters {
const u8 *mld_mac;
@@ -1717,6 +1757,8 @@ struct link_station_parameters {
const struct ieee80211_eht_cap_elem *eht_capa;
u8 eht_capa_len;
const struct ieee80211_s1g_cap *s1g_capa;
+ const struct ieee80211_uhr_cap *uhr_capa;
+ u8 uhr_capa_len;
};
/**
@@ -1785,6 +1827,7 @@ struct cfg80211_ttlm_params {
* present/updated
* @eml_cap: EML capabilities of this station
* @link_sta_params: link related params.
+ * @epp_peer: EPP peer indication
*/
struct station_parameters {
struct net_device *vlan;
@@ -1811,6 +1854,7 @@ struct station_parameters {
bool eml_cap_present;
u16 eml_cap;
struct link_station_parameters link_sta_params;
+ bool epp_peer;
};
/**
@@ -1896,6 +1940,11 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
* @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
* @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS
+ * @RATE_INFO_FLAGS_UHR_MCS: UHR MCS information
+ * @RATE_INFO_FLAGS_UHR_ELR_MCS: UHR ELR MCS was used
+ * (set together with @RATE_INFO_FLAGS_UHR_MCS)
+ * @RATE_INFO_FLAGS_UHR_IM: UHR Interference Mitigation
+ * was used
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
@@ -1907,6 +1956,9 @@ enum rate_info_flags {
RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6),
RATE_INFO_FLAGS_EHT_MCS = BIT(7),
RATE_INFO_FLAGS_S1G_MCS = BIT(8),
+ RATE_INFO_FLAGS_UHR_MCS = BIT(9),
+ RATE_INFO_FLAGS_UHR_ELR_MCS = BIT(10),
+ RATE_INFO_FLAGS_UHR_IM = BIT(11),
};
/**
@@ -1922,7 +1974,7 @@ enum rate_info_flags {
* @RATE_INFO_BW_160: 160 MHz bandwidth
* @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
* @RATE_INFO_BW_320: 320 MHz bandwidth
- * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
+ * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT/UHR RU allocation
* @RATE_INFO_BW_1: 1 MHz bandwidth
* @RATE_INFO_BW_2: 2 MHz bandwidth
* @RATE_INFO_BW_4: 4 MHz bandwidth
@@ -1953,7 +2005,7 @@ enum rate_info_bw {
*
* @flags: bitflag of flags from &enum rate_info_flags
* @legacy: bitrate in 100kbit/s for 802.11abg
- * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate
+ * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G/UHR rate
* @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
* @he_gi: HE guard interval (from &enum nl80211_he_gi)
@@ -3260,6 +3312,7 @@ struct cfg80211_ml_reconf_req {
* Drivers shall disable MLO features for the current association if this
* flag is not set.
* @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any)
+ * @ASSOC_REQ_DISABLE_UHR: Disable UHR
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
@@ -3270,6 +3323,7 @@ enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_EHT = BIT(5),
CONNECT_REQ_MLO_SUPPORT = BIT(6),
ASSOC_REQ_SPP_AMSDU = BIT(7),
+ ASSOC_REQ_DISABLE_UHR = BIT(8),
};
/**
@@ -4187,6 +4241,7 @@ struct cfg80211_ftm_responder_stats {
* @num_bursts_exp: actual number of bursts exponent negotiated
* @burst_duration: actual burst duration negotiated
* @ftms_per_burst: actual FTMs per burst negotiated
+ * @burst_period: actual burst period negotiated in units of 100ms
* @lci_len: length of LCI information (if present)
* @civicloc_len: length of civic location information (if present)
* @lci: LCI data (may be %NULL)
@@ -4228,6 +4283,7 @@ struct cfg80211_pmsr_ftm_result {
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
+ u16 burst_period;
s32 rssi_avg;
s32 rssi_spread;
struct rate_info tx_rate, rx_rate;
@@ -4290,7 +4346,9 @@ struct cfg80211_pmsr_result {
* @burst_period: burst period to use
* @asap: indicates to use ASAP mode
* @num_bursts_exp: number of bursts exponent
- * @burst_duration: burst duration
+ * @burst_duration: burst duration. If @trigger_based or @non_trigger_based is
+ * set, this is the burst duration in milliseconds, and zero means the
+ * device should pick an appropriate value based on @ftms_per_burst.
* @ftms_per_burst: number of FTMs per burst
* @ftmr_retries: number of retries for FTM request
* @request_lci: request LCI information
@@ -4303,6 +4361,8 @@ struct cfg80211_pmsr_result {
* EDCA based ranging will be used.
* @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
* @trigger_based or @non_trigger_based is set.
+ * @rsta: Operate as the RSTA in the measurement. Only valid if @lmr_feedback
+ * and either @trigger_based or @non_trigger_based is set.
* @bss_color: the bss color of the responder. Optional. Set to zero to
* indicate the driver should set the BSS color. Only valid if
* @non_trigger_based or @trigger_based is set.
@@ -4318,7 +4378,8 @@ struct cfg80211_pmsr_ftm_request_peer {
request_civicloc:1,
trigger_based:1,
non_trigger_based:1,
- lmr_feedback:1;
+ lmr_feedback:1,
+ rsta:1;
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
@@ -5638,6 +5699,18 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type);
* not limited)
* @ftm.trigger_based: trigger based ranging measurement is supported
* @ftm.non_trigger_based: non trigger based ranging measurement is supported
+ * @ftm.support_6ghz: supports ranging in 6 GHz band
+ * @ftm.max_tx_ltf_rep: maximum number of TX LTF repetitions supported (0 means
+ * only one LTF, no repetitions)
+ * @ftm.max_rx_ltf_rep: maximum number of RX LTF repetitions supported (0 means
+ * only one LTF, no repetitions)
+ * @ftm.max_tx_sts: maximum number of TX STS supported (zero based)
+ * @ftm.max_rx_sts: maximum number of RX STS supported (zero based)
+ * @ftm.max_total_ltf_tx: maximum total number of LTFs that can be transmitted
+ * (0 means unknown)
+ * @ftm.max_total_ltf_rx: maximum total number of LTFs that can be received
+ * (0 means unknown)
+ * @ftm.support_rsta: supports operating as RSTA in PMSR FTM request
*/
struct cfg80211_pmsr_capabilities {
unsigned int max_peers;
@@ -5655,7 +5728,15 @@ struct cfg80211_pmsr_capabilities {
request_lci:1,
request_civicloc:1,
trigger_based:1,
- non_trigger_based:1;
+ non_trigger_based:1,
+ support_6ghz:1;
+ u8 max_tx_ltf_rep;
+ u8 max_rx_ltf_rep;
+ u8 max_tx_sts;
+ u8 max_rx_sts;
+ u8 max_total_ltf_tx;
+ u8 max_total_ltf_rx;
+ u8 support_rsta:1;
} ftm;
};
@@ -9785,6 +9866,21 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
const struct ieee80211_channel *chan);
+/**
+ * cfg80211_stop_link - stop AP/P2P_GO link if link_id is non-negative or stops
+ * all links on the interface.
+ *
+ * @wiphy: the wiphy
+ * @wdev: wireless device
+ * @link_id: valid link ID in case of MLO AP/P2P_GO Operation or else -1
+ * @gfp: context flags
+ *
+ * If link_id is set during MLO operation, stops only the specified AP/P2P_GO
+ * link and if link_id is set to -1 or last link is stopped, the entire
+ * interface is stopped as if AP was stopped, IBSS/mesh left, STA disconnected.
+ */
+void cfg80211_stop_link(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int link_id, gfp_t gfp);
/**
* cfg80211_stop_iface - trigger interface disconnection
@@ -9798,8 +9894,11 @@ int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
*
* Note: This doesn't need any locks and is asynchronous.
*/
-void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
- gfp_t gfp);
+static inline void
+cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, gfp_t gfp)
+{
+ cfg80211_stop_link(wiphy, wdev, -1, gfp);
+}
/**
* cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy
@@ -10147,9 +10246,9 @@ cfg80211_6ghz_power_type(u8 control, u32 client_flags)
case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
case IEEE80211_6GHZ_CTRL_REG_AP_ROLE_NOT_RELEVANT:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD:
return IEEE80211_REG_LPI_AP;
case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD:
return IEEE80211_REG_SP_AP;
case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
return IEEE80211_REG_VLP_AP;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6b2b5ed64ea4..6c17446f3dcc 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -57,6 +57,7 @@ struct tc_action;
#define DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE 29
#define DSA_TAG_PROTO_YT921X_VALUE 30
#define DSA_TAG_PROTO_MXL_GSW1XX_VALUE 31
+#define DSA_TAG_PROTO_MXL862_VALUE 32
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -91,6 +92,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_VSC73XX_8021Q = DSA_TAG_PROTO_VSC73XX_8021Q_VALUE,
DSA_TAG_PROTO_YT921X = DSA_TAG_PROTO_YT921X_VALUE,
DSA_TAG_PROTO_MXL_GSW1XX = DSA_TAG_PROTO_MXL_GSW1XX_VALUE,
+ DSA_TAG_PROTO_MXL862 = DSA_TAG_PROTO_MXL862_VALUE,
};
struct dsa_switch;
@@ -216,12 +218,6 @@ struct dsa_mall_mirror_tc_entry {
bool ingress;
};
-/* TC port policer entry */
-struct dsa_mall_policer_tc_entry {
- u32 burst;
- u64 rate_bytes_per_sec;
-};
-
/* TC matchall entry */
struct dsa_mall_tc_entry {
struct list_head list;
@@ -229,7 +225,7 @@ struct dsa_mall_tc_entry {
enum dsa_port_mall_action_type type;
union {
struct dsa_mall_mirror_tc_entry mirror;
- struct dsa_mall_policer_tc_entry policer;
+ struct flow_action_police policer;
};
};
@@ -1110,7 +1106,7 @@ struct dsa_switch_ops {
void (*port_mirror_del)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int (*port_policer_add)(struct dsa_switch *ds, int port,
- struct dsa_mall_policer_tc_entry *policer);
+ const struct flow_action_police *policer);
void (*port_policer_del)(struct dsa_switch *ds, int port);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
diff --git a/include/net/dst.h b/include/net/dst.h
index f8aa1239b4db..307073eae7f8 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -219,6 +219,12 @@ static inline u32 dst_mtu(const struct dst_entry *dst)
return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst);
}
+/* Variant of dst_mtu() for IPv4 users. */
+static inline u32 dst4_mtu(const struct dst_entry *dst)
+{
+ return INDIRECT_CALL_1(dst->ops->mtu, ipv4_mtu, dst);
+}
+
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
{
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 596ab9791e4d..70a02ee14308 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -231,6 +231,21 @@ struct flow_action_cookie *flow_action_cookie_create(void *data,
gfp_t gfp);
void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
+struct flow_action_police {
+ u32 burst;
+ u64 rate_bytes_ps;
+ u64 peakrate_bytes_ps;
+ u32 avrate;
+ u16 overhead;
+ u64 burst_pkt;
+ u64 rate_pkt_ps;
+ u32 mtu;
+ struct {
+ enum flow_action_id act_id;
+ u32 extval;
+ } exceed, notexceed;
+};
+
struct flow_action_entry {
enum flow_action_id id;
u32 hw_index;
@@ -275,20 +290,7 @@ struct flow_action_entry {
u32 trunc_size;
bool truncate;
} sample;
- struct { /* FLOW_ACTION_POLICE */
- u32 burst;
- u64 rate_bytes_ps;
- u64 peakrate_bytes_ps;
- u32 avrate;
- u16 overhead;
- u64 burst_pkt;
- u64 rate_pkt_ps;
- u32 mtu;
- struct {
- enum flow_action_id act_id;
- u32 extval;
- } exceed, notexceed;
- } police;
+ struct flow_action_police police; /* FLOW_ACTION_POLICE */
struct { /* FLOW_ACTION_CT */
int action;
u16 zone;
@@ -526,7 +528,7 @@ static inline bool flow_rule_has_enc_control_flags(const u32 enc_ctrl_flags,
*
* Return: true if control flags are set, false otherwise.
*/
-static inline bool flow_rule_match_has_control_flags(struct flow_rule *rule,
+static inline bool flow_rule_match_has_control_flags(const struct flow_rule *rule,
struct netlink_ext_ack *extack)
{
struct flow_match_control match;
@@ -718,7 +720,7 @@ struct flow_offload_action {
struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
static inline struct flow_rule *
-flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
+flow_cls_offload_flow_rule(const struct flow_cls_offload *flow_cmd)
{
return flow_cmd->rule;
}
diff --git a/include/net/gro.h b/include/net/gro.h
index b65f631c521d..2300b6da05b2 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -405,9 +405,8 @@ INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
struct sk_buff *));
INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
- struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+struct sk_buff *udp6_gro_receive(struct list_head *, struct sk_buff *);
+int udp6_gro_complete(struct sk_buff *, int);
#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
({ \
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 745891d2e113..ece8dabd209a 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -18,7 +18,9 @@ struct sk_buff;
struct sock;
struct sockaddr;
-struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
+struct dst_entry *inet6_csk_route_req(const struct sock *sk,
+ struct dst_entry *dst,
+ struct flowi6 *fl6,
const struct request_sock *req, u8 proto);
int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index ea32393464a2..827b87a95dab 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -51,11 +51,25 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
return outer;
}
+/* Apply either ECT(0) or ECT(1) */
+static inline void __INET_ECN_xmit(struct sock *sk, bool use_ect_1)
+{
+ __u8 ect = use_ect_1 ? INET_ECN_ECT_1 : INET_ECN_ECT_0;
+
+ /* Mask the complete byte in case the connection alternates between
+ * ECT(0) and ECT(1).
+ */
+ inet_sk(sk)->tos &= ~INET_ECN_MASK;
+ inet_sk(sk)->tos |= ect;
+ if (inet6_sk(sk)) {
+ inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+ inet6_sk(sk)->tclass |= ect;
+ }
+}
+
static inline void INET_ECN_xmit(struct sock *sk)
{
- inet_sk(sk)->tos |= INET_ECN_ECT_0;
- if (inet6_sk(sk) != NULL)
- inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+ __INET_ECN_xmit(sk, false);
}
static inline void INET_ECN_dontxmit(struct sock *sk)
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index ac1c75975908..7cdcbed3e5cb 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -26,6 +26,8 @@
#include <net/tcp_states.h>
#include <net/l3mdev.h>
+#define IP_OPTIONS_DATA_FIXED_SIZE 40
+
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
@@ -58,12 +60,9 @@ struct ip_options {
struct ip_options_rcu {
struct rcu_head rcu;
- struct ip_options opt;
-};
-struct ip_options_data {
- struct ip_options_rcu opt;
- char data[40];
+ /* Must be last as it ends in a flexible-array member. */
+ struct ip_options opt;
};
struct inet_request_sock {
@@ -101,10 +100,7 @@ struct inet_request_sock {
};
};
-static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
-{
- return (struct inet_request_sock *)sk;
-}
+#define inet_rsk(ptr) container_of_const(ptr, struct inet_request_sock, req)
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
@@ -163,6 +159,13 @@ static inline bool inet_sk_bound_dev_eq(const struct net *net,
#endif
}
+struct inet6_cork {
+ struct ipv6_txoptions *opt;
+ u8 hop_limit;
+ u8 tclass;
+ u8 dontfrag:1;
+};
+
struct inet_cork {
unsigned int flags;
__be32 addr;
@@ -183,6 +186,9 @@ struct inet_cork {
struct inet_cork_full {
struct inet_cork base;
struct flowi fl;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_cork base6;
+#endif
};
struct ip_mc_socklist;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 7c5512baa4b2..a55f9bf95fe3 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -266,6 +266,12 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
+/* Variant of dst_mtu() for IPv6 users */
+static inline u32 dst6_mtu(const struct dst_entry *dst)
+{
+ return INDIRECT_CALL_1(dst->ops->mtu, ip6_mtu, dst);
+}
+
static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
{
const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 74fbf1ad8065..cc56e09525d0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -25,8 +25,6 @@ struct ip_tunnel_info;
#define SIN6_LEN_RFC2133 24
-#define IPV6_MAXPLEN 65535
-
/*
* NextHeader field of IPv6 header
*/
@@ -151,17 +149,6 @@ struct frag_hdr {
__be32 identification;
};
-/*
- * Jumbo payload option, as described in RFC 2675 2.
- */
-struct hop_jumbo_hdr {
- u8 nexthdr;
- u8 hdrlen;
- u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */
- u8 tlv_len; /* 4 */
- __be32 jumbo_payload_len;
-};
-
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
@@ -464,72 +451,6 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt);
-/* This helper is specialized for BIG TCP needs.
- * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
- * It assumes headers are already in skb->head.
- * Returns: 0, or IPPROTO_TCP if a BIG TCP packet is there.
- */
-static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
-{
- const struct hop_jumbo_hdr *jhdr;
- const struct ipv6hdr *nhdr;
-
- if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
- return 0;
-
- if (skb->protocol != htons(ETH_P_IPV6))
- return 0;
-
- if (skb_network_offset(skb) +
- sizeof(struct ipv6hdr) +
- sizeof(struct hop_jumbo_hdr) > skb_headlen(skb))
- return 0;
-
- nhdr = ipv6_hdr(skb);
-
- if (nhdr->nexthdr != NEXTHDR_HOP)
- return 0;
-
- jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1);
- if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
- jhdr->nexthdr != IPPROTO_TCP)
- return 0;
- return jhdr->nexthdr;
-}
-
-/* Return 0 if HBH header is successfully removed
- * Or if HBH removal is unnecessary (packet is not big TCP)
- * Return error to indicate dropping the packet
- */
-static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
-{
- const int hophdr_len = sizeof(struct hop_jumbo_hdr);
- int nexthdr = ipv6_has_hopopt_jumbo(skb);
- struct ipv6hdr *h6;
-
- if (!nexthdr)
- return 0;
-
- if (skb_cow_head(skb, 0))
- return -1;
-
- /* Remove the HBH header.
- * Layout: [Ethernet header][IPv6 header][HBH][L4 Header]
- */
- memmove(skb_mac_header(skb) + hophdr_len, skb_mac_header(skb),
- skb_network_header(skb) - skb_mac_header(skb) +
- sizeof(struct ipv6hdr));
-
- __skb_pull(skb, hophdr_len);
- skb->network_header += hophdr_len;
- skb->mac_header += hophdr_len;
-
- h6 = ipv6_hdr(skb);
- h6->nexthdr = nexthdr;
-
- return 0;
-}
-
static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
{
s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);
@@ -931,10 +852,10 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool ipv6_can_nonlocal_bind(struct net *net,
- struct inet_sock *inet)
+static inline bool ipv6_can_nonlocal_bind(const struct net *net,
+ const struct inet_sock *inet)
{
- return net->ipv6.sysctl.ip_nonlocal_bind ||
+ return READ_ONCE(net->ipv6.sysctl.ip_nonlocal_bind) ||
test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
}
@@ -949,10 +870,12 @@ static inline bool ipv6_can_nonlocal_bind(struct net *net,
#define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
-static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+static inline __be32 ip6_make_flowlabel(const struct net *net,
+ struct sk_buff *skb,
__be32 flowlabel, bool autolabel,
struct flowi6 *fl6)
{
+ u8 auto_flowlabels;
u32 hash;
/* @flowlabel may include more than a flow label, eg, the traffic class.
@@ -960,10 +883,12 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
*/
flowlabel &= IPV6_FLOWLABEL_MASK;
- if (flowlabel ||
- net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
- (!autolabel &&
- net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+ if (flowlabel)
+ return flowlabel;
+
+ auto_flowlabels = READ_ONCE(net->ipv6.sysctl.auto_flowlabels);
+ if (auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+ (!autolabel && auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
return flowlabel;
hash = skb_get_hash_flowi6(skb, fl6);
@@ -976,15 +901,15 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
- if (net->ipv6.sysctl.flowlabel_state_ranges)
+ if (READ_ONCE(net->ipv6.sysctl.flowlabel_state_ranges))
flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
return flowlabel;
}
-static inline int ip6_default_np_autolabel(struct net *net)
+static inline int ip6_default_np_autolabel(const struct net *net)
{
- switch (net->ipv6.sysctl.auto_flowlabels) {
+ switch (READ_ONCE(net->ipv6.sysctl.auto_flowlabels)) {
case IP6_AUTO_FLOW_LABEL_OFF:
case IP6_AUTO_FLOW_LABEL_OPTIN:
default:
@@ -995,13 +920,13 @@ static inline int ip6_default_np_autolabel(struct net *net)
}
}
#else
-static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+static inline __be32 ip6_make_flowlabel(const struct net *net, struct sk_buff *skb,
__be32 flowlabel, bool autolabel,
struct flowi6 *fl6)
{
return flowlabel;
}
-static inline int ip6_default_np_autolabel(struct net *net)
+static inline int ip6_default_np_autolabel(const struct net *net)
{
return 0;
}
@@ -1010,11 +935,11 @@ static inline int ip6_default_np_autolabel(struct net *net)
#if IS_ENABLED(CONFIG_IPV6)
static inline int ip6_multipath_hash_policy(const struct net *net)
{
- return net->ipv6.sysctl.multipath_hash_policy;
+ return READ_ONCE(net->ipv6.sysctl.multipath_hash_policy);
}
static inline u32 ip6_multipath_hash_fields(const struct net *net)
{
- return net->ipv6.sysctl.multipath_hash_fields;
+ return READ_ONCE(net->ipv6.sysctl.multipath_hash_fields);
}
#else
static inline int ip6_multipath_hash_policy(const struct net *net)
@@ -1103,8 +1028,7 @@ void ip6_flush_pending_frames(struct sock *sk);
int ip6_send_skb(struct sk_buff *skb);
struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
- struct inet_cork_full *cork,
- struct inet6_cork *v6_cork);
+ struct inet_cork_full *cork);
struct sk_buff *ip6_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
@@ -1115,8 +1039,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
{
- return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
- &inet6_sk(sk)->cork);
+ return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
}
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
@@ -1147,11 +1070,11 @@ int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
* Extension header (options) processing
*/
-void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
- u8 *proto, struct in6_addr **daddr_p,
- struct in6_addr *saddr);
-void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
- u8 *proto);
+u8 ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 proto, struct in6_addr **daddr_p,
+ struct in6_addr *saddr);
+u8 ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+ u8 proto);
int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
__be16 *frag_offp);
@@ -1170,9 +1093,19 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);
-struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
- const struct ipv6_txoptions *opt,
- struct in6_addr *orig);
+struct in6_addr *__fl6_update_dst(struct flowi6 *fl6,
+ const struct ipv6_txoptions *opt,
+ struct in6_addr *orig);
+
+static inline struct in6_addr *
+fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt,
+ struct in6_addr *orig)
+{
+ if (likely(!opt))
+ return NULL;
+
+ return __fl6_update_dst(fl6, opt, orig);
+}
/*
* socket options (ipv6_sockglue.c)
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 9804fa5d9c67..5606ed6e7084 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -195,35 +195,15 @@ struct iucv_handler {
struct list_head paths;
};
-/**
- * iucv_register:
- * @handler: address of iucv handler structure
- * @smp: != 0 indicates that the handler can deal with out of order messages
- *
- * Registers a driver with IUCV.
- *
- * Returns: 0 on success, -ENOMEM if the memory allocation for the pathid
- * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
- */
int iucv_register(struct iucv_handler *handler, int smp);
+void iucv_unregister(struct iucv_handler *handler, int smp);
/**
- * iucv_unregister
- * @handler: address of iucv handler structure
- * @smp: != 0 indicates that the handler can deal with out of order messages
- *
- * Unregister driver from IUCV.
- */
-void iucv_unregister(struct iucv_handler *handle, int smp);
-
-/**
- * iucv_path_alloc
+ * iucv_path_alloc - Allocate a new path structure for use with iucv_connect.
* @msglim: initial message limit
* @flags: initial flags
* @gfp: kmalloc allocation flag
*
- * Allocate a new path structure for use with iucv_connect.
- *
* Returns: NULL if the memory allocation failed or a pointer to the
* path structure.
*/
@@ -240,229 +220,48 @@ static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
}
/**
- * iucv_path_free
+ * iucv_path_free - Frees a path structure.
* @path: address of iucv path structure
- *
- * Frees a path structure.
*/
static inline void iucv_path_free(struct iucv_path *path)
{
kfree(path);
}
-/**
- * iucv_path_accept
- * @path: address of iucv path structure
- * @handler: address of iucv handler structure
- * @userdata: 16 bytes of data reflected to the communication partner
- * @private: private data passed to interrupt handlers for this path
- *
- * This function is issued after the user received a connection pending
- * external interrupt and now wishes to complete the IUCV communication path.
- *
- * Returns: the result of the CP IUCV call.
- */
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
u8 *userdata, void *private);
-/**
- * iucv_path_connect
- * @path: address of iucv path structure
- * @handler: address of iucv handler structure
- * @userid: 8-byte user identification
- * @system: 8-byte target system identification
- * @userdata: 16 bytes of data reflected to the communication partner
- * @private: private data passed to interrupt handlers for this path
- *
- * This function establishes an IUCV path. Although the connect may complete
- * successfully, you are not able to use the path until you receive an IUCV
- * Connection Complete external interrupt.
- *
- * Returns: the result of the CP IUCV call.
- */
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
u8 *userid, u8 *system, u8 *userdata,
void *private);
-/**
- * iucv_path_quiesce:
- * @path: address of iucv path structure
- * @userdata: 16 bytes of data reflected to the communication partner
- *
- * This function temporarily suspends incoming messages on an IUCV path.
- * You can later reactivate the path by invoking the iucv_resume function.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
-/**
- * iucv_path_resume:
- * @path: address of iucv path structure
- * @userdata: 16 bytes of data reflected to the communication partner
- *
- * This function resumes incoming messages on an IUCV path that has
- * been stopped with iucv_path_quiesce.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_path_resume(struct iucv_path *path, u8 *userdata);
-/**
- * iucv_path_sever
- * @path: address of iucv path structure
- * @userdata: 16 bytes of data reflected to the communication partner
- *
- * This function terminates an IUCV path.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_path_sever(struct iucv_path *path, u8 *userdata);
-/**
- * iucv_message_purge
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @srccls: source class of message
- *
- * Cancels a message you have sent.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
u32 srccls);
-/**
- * iucv_message_receive
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
- * @buffer: address of data buffer or address of struct iucv_array
- * @size: length of data buffer
- * @residual:
- *
- * This function receives messages that are being sent to you over
- * established paths. This function will deal with RMDATA messages
- * embedded in struct iucv_message as well.
- *
- * Locking: local_bh_enable/local_bh_disable
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size, size_t *residual);
-/**
- * __iucv_message_receive
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
- * @buffer: address of data buffer or address of struct iucv_array
- * @size: length of data buffer
- * @residual:
- *
- * This function receives messages that are being sent to you over
- * established paths. This function will deal with RMDATA messages
- * embedded in struct iucv_message as well.
- *
- * Locking: no locking.
- *
- * Returns: the result from the CP IUCV call.
- */
int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size,
size_t *residual);
-/**
- * iucv_message_reject
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- *
- * The reject function refuses a specified message. Between the time you
- * are notified of a message and the time that you complete the message,
- * the message may be rejected.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
-/**
- * iucv_message_reply
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
- * @reply: address of data buffer or address of struct iucv_array
- * @size: length of reply data buffer
- *
- * This function responds to the two-way messages that you receive. You
- * must identify completely the message to which you wish to reply. ie,
- * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
- * the parameter list.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *reply, size_t size);
-/**
- * iucv_message_send
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
- * @srccls: source class of message
- * @buffer: address of data buffer or address of struct iucv_array
- * @size: length of send buffer
- *
- * This function transmits data to another application. Data to be
- * transmitted is in a buffer and this is a one-way message and the
- * receiver will not reply to the message.
- *
- * Locking: local_bh_enable/local_bh_disable
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
-/**
- * __iucv_message_send
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
- * @srccls: source class of message
- * @buffer: address of data buffer or address of struct iucv_array
- * @size: length of send buffer
- *
- * This function transmits data to another application. Data to be
- * transmitted is in a buffer and this is a one-way message and the
- * receiver will not reply to the message.
- *
- * Locking: no locking.
- *
- * Returns: the result from the CP IUCV call.
- */
int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
-/**
- * iucv_message_send2way
- * @path: address of iucv path structure
- * @msg: address of iucv msg structure
- * @flags: how the message is sent and the reply is received
- * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
- * @srccls: source class of message
- * @buffer: address of data buffer or address of struct iucv_array
- * @size: length of send buffer
- * @ansbuf: address of answer buffer or address of struct iucv_array
- * @asize: size of reply buffer
- *
- * This function transmits data to another application. Data to be
- * transmitted is in a buffer. The receiver of the send is expected to
- * reply to the message and a buffer is provided into which IUCV moves
- * the reply to this message.
- *
- * Returns: the result from the CP IUCV call.
- */
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size,
void *answer, size_t asize, size_t *residual);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 1eb8dad18f7e..710e98665eb3 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -207,18 +207,19 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
static inline
struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev;
+ rcu_read_lock();
+ dev = skb_dst_dev_rcu(skb);
if (netif_is_l3_slave(dev)) {
struct net_device *master;
- rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
if (master && master->l3mdev_ops->l3mdev_l3_out)
skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
skb, proto);
- rcu_read_unlock();
}
+ rcu_read_unlock();
return skb;
}
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c2e49542626c..7f9d96939a4e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2025 Intel Corporation
+ * Copyright (C) 2018 - 2026 Intel Corporation
*/
#ifndef MAC80211_H
@@ -706,6 +706,7 @@ struct ieee80211_parsed_tpe {
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
* @epcs_support: does this BSS support EPCS
+ * @uhr_support: does this BSS support UHR
* @csa_active: marks whether a channel switch is going on.
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -832,6 +833,8 @@ struct ieee80211_bss_conf {
u8 pwr_reduction;
bool eht_support;
bool epcs_support;
+ bool uhr_support;
+
bool csa_active;
bool mu_mimo_owner;
@@ -1598,6 +1601,7 @@ enum mac80211_rx_encoding {
RX_ENC_VHT,
RX_ENC_HE,
RX_ENC_EHT,
+ RX_ENC_UHR,
};
/**
@@ -1631,7 +1635,7 @@ enum mac80211_rx_encoding {
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
- * @nss: number of streams (VHT, HE and EHT only)
+ * @nss: number of streams (VHT, HE, EHT and UHR only)
* @flag: %RX_FLAG_\*
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
@@ -1642,6 +1646,11 @@ enum mac80211_rx_encoding {
* @eht: EHT specific rate information
* @eht.ru: EHT RU, from &enum nl80211_eht_ru_alloc
* @eht.gi: EHT GI, from &enum nl80211_eht_gi
+ * @uhr: UHR specific rate information
+ * @uhr.ru: UHR RU, from &enum nl80211_eht_ru_alloc
+ * @uhr.gi: UHR GI, from &enum nl80211_eht_gi
+ * @uhr.elr: UHR ELR MCS was used
+ * @uhr.im: UHR interference mitigation was used
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1673,6 +1682,12 @@ struct ieee80211_rx_status {
u8 ru:4;
u8 gi:2;
} eht;
+ struct {
+ u8 ru:4;
+ u8 gi:2;
+ u8 elr:1;
+ u8 im:1;
+ } uhr;
};
u8 rate_idx;
u8 nss;
@@ -1903,6 +1918,31 @@ enum ieee80211_offload_flags {
};
/**
+ * struct ieee80211_eml_params - EHT Operating mode notification parameters
+ *
+ * EML Operating mode notification parameters received in the Operating mode
+ * notification frame. This struct is used as a container to pass the info to
+ * the underlay driver.
+ *
+ * @link_id: the link ID where the Operating mode notification frame has been
+ * received.
+ * @control: EML control field defined in P802.11be section 9.4.1.76.
+ * @link_bitmap: eMLSR/eMLMR enabled links defined in P802.11be
+ * section 9.4.1.76.
+ * @emlmr_mcs_map_count: eMLMR number of valid mcs_map_bw fields according to
+ * P802.11be section 9.4.1.76 (valid if eMLMR mode control bit is set).
+ * @emlmr_mcs_map_bw: eMLMR supported MCS and NSS set subfileds defined in
+ * P802.11be section 9.4.1.76 (valid if eMLMR mode control bit is set).
+ */
+struct ieee80211_eml_params {
+ u8 link_id;
+ u8 control;
+ u16 link_bitmap;
+ u8 emlmr_mcs_map_count;
+ u8 emlmr_mcs_map_bw[9];
+};
+
+/**
* struct ieee80211_vif_cfg - interface configuration
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS or not
@@ -2434,6 +2474,7 @@ struct ieee80211_sta_aggregates {
* @he_cap: HE capabilities of this STA
* @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
* @eht_cap: EHT capabilities of this STA
+ * @uhr_cap: UHR capabilities of this STA
* @s1g_cap: S1G capabilities of this STA
* @agg: per-link data for multi-link aggregation
* @bandwidth: current bandwidth the station can receive with
@@ -2457,6 +2498,7 @@ struct ieee80211_link_sta {
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
+ struct ieee80211_sta_uhr_cap uhr_cap;
struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_sta_aggregates agg;
@@ -2520,6 +2562,7 @@ struct ieee80211_link_sta {
* by the AP.
* @valid_links: bitmap of valid links, or 0 for non-MLO
* @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
+ * @epp_peer: indicates that the peer is an EPP peer.
*/
struct ieee80211_sta {
u8 addr[ETH_ALEN] __aligned(2);
@@ -2544,6 +2587,7 @@ struct ieee80211_sta {
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
u16 valid_links;
+ bool epp_peer;
struct ieee80211_link_sta deflink;
struct ieee80211_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -4511,6 +4555,9 @@ struct ieee80211_prep_tx_info {
* interface with the specified type would be added, and thus drivers that
* implement this callback need to handle such cases. The type is the full
* &enum nl80211_iftype.
+ * @set_eml_op_mode: Configure eMLSR/eMLMR operation mode in the underlay
+ * driver according to the parameter received in the EML Operating mode
+ * notification frame.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4906,6 +4953,10 @@ struct ieee80211_ops {
struct ieee80211_neg_ttlm *ttlm);
void (*prep_add_interface)(struct ieee80211_hw *hw,
enum nl80211_iftype type);
+ int (*set_eml_op_mode)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_eml_params *eml_params);
};
/**
@@ -6272,6 +6323,30 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
struct ieee80211_vif *vif),
void *data);
+struct ieee80211_vif *
+__ieee80211_iterate_interfaces(struct ieee80211_hw *hw,
+ struct ieee80211_vif *prev,
+ u32 iter_flags);
+
+/**
+ * for_each_interface - iterate interfaces under wiphy mutex
+ * @vif: the iterator variable
+ * @hw: the HW to iterate for
+ * @flags: the iteration flags, see &enum ieee80211_interface_iteration_flags
+ */
+#define for_each_interface(vif, hw, flags) \
+ for (vif = __ieee80211_iterate_interfaces(hw, NULL, flags); \
+ vif; \
+ vif = __ieee80211_iterate_interfaces(hw, vif, flags))
+
+/**
+ * for_each_active_interface - iterate active interfaces under wiphy mutex
+ * @vif: the iterator variable
+ * @hw: the HW to iterate for
+ */
+#define for_each_active_interface(vif, hw) \
+ for_each_interface(vif, hw, IEEE80211_IFACE_ITER_ACTIVE)
+
/**
* ieee80211_iterate_active_interfaces_mtx - iterate active interfaces
*
@@ -6284,12 +6359,18 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
* @iterator: the iterator function to call, cannot sleep
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
- u32 iter_flags,
- void (*iterator)(void *data,
- u8 *mac,
- struct ieee80211_vif *vif),
- void *data);
+static inline void
+ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
+ u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ struct ieee80211_vif *vif;
+
+ for_each_interface(vif, hw, iter_flags | IEEE80211_IFACE_ITER_ACTIVE)
+ iterator(data, vif->addr, vif);
+}
/**
* ieee80211_iterate_stations_atomic - iterate stations
@@ -6308,6 +6389,20 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
struct ieee80211_sta *sta),
void *data);
+struct ieee80211_sta *
+__ieee80211_iterate_stations(struct ieee80211_hw *hw,
+ struct ieee80211_sta *prev);
+
+/**
+ * for_each_station - iterate stations under wiphy mutex
+ * @sta: the iterator variable
+ * @hw: the HW to iterate for
+ */
+#define for_each_station(sta, hw) \
+ for (sta = __ieee80211_iterate_stations(hw, NULL); \
+ sta; \
+ sta = __ieee80211_iterate_stations(hw, sta))
+
/**
* ieee80211_iterate_stations_mtx - iterate stations
*
@@ -6320,10 +6415,17 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
* @iterator: the iterator function to call
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
- void (*iterator)(void *data,
- struct ieee80211_sta *sta),
- void *data);
+static inline void
+ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data)
+{
+ struct ieee80211_sta *sta;
+
+ for_each_station(sta, hw)
+ iterator(data, sta);
+}
/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
@@ -7237,6 +7339,20 @@ ieee80211_get_eht_iftype_cap_vif(const struct ieee80211_supported_band *sband,
}
/**
+ * ieee80211_get_uhr_iftype_cap_vif - return UHR capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_uhr_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_uhr_cap *
+ieee80211_get_uhr_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_uhr_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
* ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
*
* @vif: the specified virtual interface
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index eaa27483f99b..766f4fb25e26 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -35,6 +35,8 @@ enum gdma_request_type {
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
+ GDMA_ALLOC_DM = 96, /* 0x60 */
+ GDMA_DESTROY_DM = 97, /* 0x61 */
};
#define GDMA_RESOURCE_DOORBELL_PAGE 27
@@ -598,6 +600,10 @@ enum {
/* Driver can self reset on FPGA Reconfig EQE notification */
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
+
+/* Driver detects stalled send queues and recovers them */
+#define GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY BIT(18)
+
#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
/* Driver supports linearizing the skb when num_sge exceeds hardware limit */
@@ -621,7 +627,8 @@ enum {
GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
- GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY)
+ GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
+ GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -861,6 +868,8 @@ enum gdma_mr_type {
GDMA_MR_TYPE_GVA = 2,
/* Guest zero-based address MRs */
GDMA_MR_TYPE_ZBVA = 4,
+ /* Device address MRs */
+ GDMA_MR_TYPE_DM = 5,
};
struct gdma_create_mr_params {
@@ -876,6 +885,12 @@ struct gdma_create_mr_params {
u64 dma_region_handle;
enum gdma_mr_access_flags access_flags;
} zbva;
+ struct {
+ u64 dm_handle;
+ u64 offset;
+ u64 length;
+ enum gdma_mr_access_flags access_flags;
+ } da;
};
};
@@ -890,13 +905,23 @@ struct gdma_create_mr_request {
u64 dma_region_handle;
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
- } gva;
+ } __packed gva;
struct {
u64 dma_region_handle;
enum gdma_mr_access_flags access_flags;
- } zbva;
- };
+ } __packed zbva;
+ struct {
+ u64 dm_handle;
+ u64 offset;
+ enum gdma_mr_access_flags access_flags;
+ } __packed da;
+ } __packed;
u32 reserved_2;
+ union {
+ struct {
+ u64 length;
+ } da_ext;
+ };
};/* HW DATA */
struct gdma_create_mr_response {
@@ -915,6 +940,27 @@ struct gdma_destroy_mr_response {
struct gdma_resp_hdr hdr;
};/* HW DATA */
+struct gdma_alloc_dm_req {
+ struct gdma_req_hdr hdr;
+ u64 length;
+ u32 alignment;
+ u32 flags;
+}; /* HW Data */
+
+struct gdma_alloc_dm_resp {
+ struct gdma_resp_hdr hdr;
+ u64 dm_handle;
+}; /* HW Data */
+
+struct gdma_destroy_dm_req {
+ struct gdma_req_hdr hdr;
+ u64 dm_handle;
+}; /* HW Data */
+
+struct gdma_destroy_dm_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
int mana_gd_verify_vf_version(struct pci_dev *pdev);
int mana_gd_register_device(struct gdma_dev *gd);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index d7e089c6b694..a078af283bdd 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -480,7 +480,7 @@ struct mana_context {
struct mana_ethtool_hc_stats hc_stats;
struct mana_eq *eqs;
struct dentry *mana_eqs_debugfs;
-
+ struct workqueue_struct *per_port_queue_reset_wq;
/* Workqueue for querying hardware stats */
struct delayed_work gf_stats_work;
bool hwc_timeout_occurred;
@@ -495,6 +495,7 @@ struct mana_context {
struct mana_port_context {
struct mana_context *ac;
struct net_device *ndev;
+ struct work_struct queue_reset_work;
u8 mac_addr[ETH_ALEN];
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index cb664f6e3558..d7bec49ee9ea 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -37,6 +37,7 @@
#include <net/netns/smc.h>
#include <net/netns/bpf.h>
#include <net/netns/mctp.h>
+#include <net/netns/vsock.h>
#include <net/net_trackers.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
@@ -120,6 +121,7 @@ struct net {
* it is critical that it is on a read_mostly cache line.
*/
u32 hash_mix;
+ bool is_dying;
struct net_device *loopback_dev; /* The loopback */
@@ -196,6 +198,9 @@ struct net {
/* Move to a better place when the config guard is removed. */
struct mutex rtnl_mutex;
#endif
+#if IS_ENABLED(CONFIG_VSOCKETS)
+ struct netns_vsock vsock;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index cd00e0406cf4..95ed28212f4e 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -14,6 +14,10 @@ struct netdev_config {
u8 hds_config;
};
+struct netdev_queue_config {
+ u32 rx_page_size;
+};
+
/* See the netdev.yaml spec for definition of each statistic */
struct netdev_queue_stats_rx {
u64 bytes;
@@ -111,6 +115,11 @@ void netdev_stat_queue_sum(struct net_device *netdev,
int tx_start, int tx_end,
struct netdev_queue_stats_tx *tx_sum);
+enum {
+ /* The queue checks and honours the page size qcfg parameter */
+ QCFG_RX_PAGE_SIZE = 0x1,
+};
+
/**
* struct netdev_queue_mgmt_ops - netdev ops for queue management
*
@@ -130,27 +139,52 @@ void netdev_stat_queue_sum(struct net_device *netdev,
* @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
* for this queue. Return NULL on error.
*
+ * @ndo_default_qcfg: (Optional) Populate queue config struct with defaults.
+ * Queue config structs are passed to this helper before
+ * the user-requested settings are applied.
+ *
+ * @ndo_validate_qcfg: (Optional) Check if queue config is supported.
+ * Called when configuration affecting a queue may be
+ * changing, either due to NIC-wide config, or config
+ * scoped to the queue at a specified index.
+ * When NIC-wide config is changed the callback will
+ * be invoked for all queues.
+ *
+ * @supported_params: Bitmask of supported parameters, see QCFG_*.
+ *
* Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
* the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
* be called for an interface which is open.
*/
struct netdev_queue_mgmt_ops {
- size_t ndo_queue_mem_size;
- int (*ndo_queue_mem_alloc)(struct net_device *dev,
- void *per_queue_mem,
- int idx);
- void (*ndo_queue_mem_free)(struct net_device *dev,
- void *per_queue_mem);
- int (*ndo_queue_start)(struct net_device *dev,
- void *per_queue_mem,
- int idx);
- int (*ndo_queue_stop)(struct net_device *dev,
- void *per_queue_mem,
- int idx);
- struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
- int idx);
+ size_t ndo_queue_mem_size;
+ int (*ndo_queue_mem_alloc)(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_queue_mem,
+ int idx);
+ void (*ndo_queue_mem_free)(struct net_device *dev,
+ void *per_queue_mem);
+ int (*ndo_queue_start)(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ void *per_queue_mem,
+ int idx);
+ int (*ndo_queue_stop)(struct net_device *dev,
+ void *per_queue_mem,
+ int idx);
+ void (*ndo_default_qcfg)(struct net_device *dev,
+ struct netdev_queue_config *qcfg);
+ int (*ndo_validate_qcfg)(struct net_device *dev,
+ struct netdev_queue_config *qcfg,
+ struct netlink_ext_ack *extack);
+ struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
+ int idx);
+
+ unsigned int supported_params;
};
+void netdev_queue_config(struct net_device *dev, int rxq,
+ struct netdev_queue_config *qcfg);
+
bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
/**
@@ -310,6 +344,17 @@ static inline void netif_subqueue_sent(const struct net_device *dev,
netdev_tx_sent_queue(txq, bytes);
}
+static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq)
+{
+ unsigned long trans_start = READ_ONCE(txq->trans_start);
+
+ if (netif_xmit_stopped(txq) &&
+ time_after(jiffies, trans_start + txq->dev->watchdog_timeo))
+ return jiffies_to_msecs(jiffies - trans_start);
+
+ return 0;
+}
+
#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
({ \
struct netdev_queue *_txq; \
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index 8cdcd138b33f..cfa72c485387 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include <net/xdp.h>
#include <net/page_pool/types.h>
+#include <net/netdev_queues.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
@@ -27,6 +28,7 @@ struct netdev_rx_queue {
struct xsk_buff_pool *pool;
#endif
struct napi_struct *napi;
+ struct netdev_queue_config qcfg;
struct pp_memory_provider_params mp_params;
} ____cacheline_aligned_in_smp;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index aa0a7c82199e..bc42dd0e10e6 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
+#include <net/netns/generic.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/nf_conntrack_sctp.h>
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 52a06de41aa0..cf0166520cf3 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -13,6 +13,7 @@ struct nf_conncount_list {
u32 last_gc; /* jiffies at most recent gc */
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
+ unsigned int last_gc_count; /* length of list at most recent gc */
};
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen);
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index f7dd950ff250..4d55b7325707 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -11,7 +11,7 @@
#ifndef _NF_CONNTRACK_TUPLE_H
#define _NF_CONNTRACK_TUPLE_H
-#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <linux/list_nulls.h>
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 4aeffddb7586..45eb26b2e95b 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -6,11 +6,13 @@
#include <linux/ipv6.h>
#include <linux/jhash.h>
#include <linux/netfilter.h>
+#include <linux/rhashtable-types.h>
#include <linux/skbuff.h>
/* Each queued (to userspace) skbuff has one of these. */
struct nf_queue_entry {
struct list_head list;
+ struct rhash_head hash_node;
struct sk_buff *skb;
unsigned int id;
unsigned int hook_index; /* index in hook_entries->hook[] */
@@ -19,7 +21,9 @@ struct nf_queue_entry {
struct net_device *physout;
#endif
struct nf_hook_state state;
+ bool nf_ct_is_unconfirmed;
u16 size; /* sizeof(entry) + saved route keys */
+ u16 queue_num;
/* extra space to store route keys */
};
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0e266c2d0e7f..426534a711b0 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,7 +6,6 @@
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/u64_stats_sync.h>
#include <linux/rhashtable.h>
@@ -278,6 +277,8 @@ struct nft_userdata {
unsigned char data[];
};
+#define NFT_SET_ELEM_INTERNAL_LAST 0x1
+
/* placeholder structure for opaque set element backend representation. */
struct nft_elem_priv { };
@@ -287,6 +288,7 @@ struct nft_elem_priv { };
* @key: element key
* @key_end: closing element key
* @data: element data
+ * @flags: flags
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -302,6 +304,7 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} data;
+ u32 flags;
struct nft_elem_priv *priv;
};
@@ -452,6 +455,7 @@ struct nft_set_ext;
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
* @gc_init: initialize garbage collection
+ * @abort_skip_removal: skip removal of elements from abort path
* @elemsize: element private size
*
* Operations lookup, update and delete have simpler interfaces, are faster
@@ -509,6 +513,7 @@ struct nft_set_ops {
const struct nft_set *set);
void (*gc_init)(const struct nft_set *set);
+ bool abort_skip_removal;
unsigned int elemsize;
};
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index a0633eeaec97..c53ac00bb974 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -42,7 +42,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
if (ip6h->version != 6)
return -1;
- pkt_len = ntohs(ip6h->payload_len);
+ pkt_len = ipv6_payload_len(pkt->skb, ip6h);
skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
if (pkt_len + sizeof(*ip6h) > skb_len)
return -1;
@@ -86,7 +86,7 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
if (ip6h->version != 6)
goto inhdr_error;
- pkt_len = ntohs(ip6h->payload_len);
+ pkt_len = ipv6_payload_len(pkt->skb, ip6h);
if (pkt_len + sizeof(*ip6h) > pkt->skb->len) {
idev = __in6_dev_get(nft_in(pkt));
__IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 9e10f4ac50c3..a96b3e5e5574 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -389,8 +389,36 @@ static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
return netmem_to_nmdesc(netmem)->dma_addr;
}
-void get_netmem(netmem_ref netmem);
-void put_netmem(netmem_ref netmem);
+#if defined(CONFIG_NET_DEVMEM)
+static inline bool net_is_devmem_iov(const struct net_iov *niov)
+{
+ return niov->type == NET_IOV_DMABUF;
+}
+#else
+static inline bool net_is_devmem_iov(const struct net_iov *niov)
+{
+ return false;
+}
+#endif
+
+void __get_netmem(netmem_ref netmem);
+void __put_netmem(netmem_ref netmem);
+
+static __always_inline void get_netmem(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ __get_netmem(netmem);
+ else
+ get_page(netmem_to_page(netmem));
+}
+
+static __always_inline void put_netmem(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ __put_netmem(netmem);
+ else
+ put_page(netmem_to_page(netmem));
+}
#define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL) \
do { \
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 08d2ecc96e2b..34bdb1308e8f 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -30,19 +30,23 @@ struct netns_sysctl_ipv6 {
int ip6_rt_min_advmss;
u32 multipath_hash_fields;
u8 multipath_hash_policy;
- u8 bindv6only;
+
+ __cacheline_group_begin(sysctl_ipv6_flowlabel);
u8 flowlabel_consistency;
u8 auto_flowlabels;
- int icmpv6_time;
+ u8 flowlabel_state_ranges;
+ __cacheline_group_end(sysctl_ipv6_flowlabel);
+
u8 icmpv6_echo_ignore_all;
u8 icmpv6_echo_ignore_multicast;
u8 icmpv6_echo_ignore_anycast;
+ int icmpv6_time;
DECLARE_BITMAP(icmpv6_ratemask, ICMPV6_MSG_MAX + 1);
unsigned long *icmpv6_ratemask_ptr;
u8 anycast_src_echo_reply;
+ u8 bindv6only;
u8 ip_nonlocal_bind;
u8 fwmark_reflect;
- u8 flowlabel_state_ranges;
int idgen_retries;
int idgen_delay;
int flowlabel_reflect;
diff --git a/include/net/netns/vsock.h b/include/net/netns/vsock.h
new file mode 100644
index 000000000000..b34d69a22fa8
--- /dev/null
+++ b/include/net/netns/vsock.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NET_NAMESPACE_VSOCK_H
+#define __NET_NET_NAMESPACE_VSOCK_H
+
+#include <linux/types.h>
+
+enum vsock_net_mode {
+ VSOCK_NET_MODE_GLOBAL,
+ VSOCK_NET_MODE_LOCAL,
+};
+
+struct netns_vsock {
+ struct ctl_table_header *sysctl_hdr;
+
+ /* protected by the vsock_table_lock in af_vsock.c */
+ u32 port;
+
+ enum vsock_net_mode mode;
+ enum vsock_net_mode child_ns_mode;
+};
+#endif /* __NET_NET_NAMESPACE_VSOCK_H */
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 1509a536cb85..0d453484a585 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -161,6 +161,7 @@ struct memory_provider_ops;
struct pp_memory_provider_params {
void *mp_priv;
const struct memory_provider_ops *mp_ops;
+ u32 rx_page_size;
};
struct page_pool {
diff --git a/include/net/phy/realtek_phy.h b/include/net/phy/realtek_phy.h
new file mode 100644
index 000000000000..d683bc1b0659
--- /dev/null
+++ b/include/net/phy/realtek_phy.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _REALTEK_PHY_H
+#define _REALTEK_PHY_H
+
+#define PHY_ID_RTL_DUMMY_SFP 0x001ccbff
+
+#endif /* _REALTEK_PHY_H */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index e703c507d0da..18a419cd9d94 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -308,4 +308,28 @@ static inline unsigned int qdisc_peek_len(struct Qdisc *sch)
return len;
}
+static inline void qdisc_lock_init(struct Qdisc *sch,
+ const struct Qdisc_ops *ops)
+{
+ spin_lock_init(&sch->q.lock);
+
+ /* Skip dynamic keys if nesting is not possible */
+ if (ops->static_flags & TCQ_F_INGRESS ||
+ ops == &noqueue_qdisc_ops)
+ return;
+
+ lockdep_register_key(&sch->root_lock_key);
+ lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
+}
+
+static inline void qdisc_lock_uninit(struct Qdisc *sch,
+ const struct Qdisc_ops *ops)
+{
+ if (ops->static_flags & TCQ_F_INGRESS ||
+ ops == &noqueue_qdisc_ops)
+ return;
+
+ lockdep_unregister_key(&sch->root_lock_key);
+}
+
#endif
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 9b9e04f6bb89..5a9c826a7092 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -123,14 +123,7 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb,
return sk;
}
-static inline void __reqsk_free(struct request_sock *req)
-{
- req->rsk_ops->destructor(req);
- if (req->rsk_listener)
- sock_put(req->rsk_listener);
- kfree(req->saved_syn);
- kmem_cache_free(req->rsk_ops->slab, req);
-}
+void __reqsk_free(struct request_sock *req);
static inline void reqsk_free(struct request_sock *req)
{
@@ -196,8 +189,6 @@ struct request_sock_queue {
*/
};
-void reqsk_queue_alloc(struct request_sock_queue *queue);
-
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
bool reset);
diff --git a/include/net/sch_priv.h b/include/net/sch_priv.h
new file mode 100644
index 000000000000..4789f668ae87
--- /dev/null
+++ b/include/net/sch_priv.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_SCHED_PRIV_H
+#define __NET_SCHED_PRIV_H
+
+#include <net/sch_generic.h>
+
+struct mq_sched {
+ struct Qdisc **qdiscs;
+};
+
+int mq_init_common(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack,
+ const struct Qdisc_ops *qdisc_ops);
+void mq_destroy_common(struct Qdisc *sch);
+void mq_attach(struct Qdisc *sch);
+void mq_dump_common(struct Qdisc *sch, struct sk_buff *skb);
+struct netdev_queue *mq_select_queue(struct Qdisc *sch,
+ struct tcmsg *tcm);
+struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl);
+unsigned long mq_find(struct Qdisc *sch, u32 classid);
+int mq_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm);
+int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d);
+void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg);
+
+#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index aafe8bdb2c0f..66b56288c1d3 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -341,6 +341,7 @@ struct sk_filter;
* @sk_reuseport_cb: reuseport group container
* @sk_bpf_storage: ptr to cache and control for bpf_sk_storage
* @sk_rcu: used during RCU grace period
+ * @sk_freeptr: used for SLAB_TYPESAFE_BY_RCU managed sockets
* @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
@@ -582,7 +583,14 @@ struct sock {
struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
struct numa_drop_counters *sk_drop_counters;
- struct rcu_head sk_rcu;
+ /* sockets using SLAB_TYPESAFE_BY_RCU can use sk_freeptr.
+ * By the time kfree() is called, sk_rcu can not be in
+ * use and can be mangled.
+ */
+ union {
+ struct rcu_head sk_rcu;
+ freeptr_t sk_freeptr;
+ };
netns_tracker ns_tracker;
struct xarray sk_user_frags;
@@ -1368,6 +1376,7 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
+ unsigned int freeptr_offset;
unsigned int ipv6_pinfo_offset;
slab_flags_t slab_flags;
unsigned int useroffset; /* Usercopy region offset */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0deb5e9dd911..40e72b9cb85f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -347,6 +347,15 @@ extern struct proto tcp_prot;
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
+/*
+ * TCP splice context
+ */
+struct tcp_splice_state {
+ struct pipe_inode_info *pipe;
+ size_t len;
+ unsigned int flags;
+};
+
void tcp_tsq_work_init(void);
int tcp_v4_err(struct sk_buff *skb, u32);
@@ -378,6 +387,8 @@ void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
void tcp_twsk_purge(struct list_head *net_exit_list);
+int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+ unsigned int offset, size_t len);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
@@ -541,6 +552,7 @@ enum tcp_synack_type {
TCP_SYNACK_NORMAL,
TCP_SYNACK_FASTOPEN,
TCP_SYNACK_COOKIE,
+ TCP_SYNACK_RETRANS,
};
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
@@ -751,7 +763,15 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_done_with_error(struct sock *sk, int err);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
-void tcp_check_space(struct sock *sk);
+void __tcp_check_space(struct sock *sk);
+static inline void tcp_check_space(struct sock *sk)
+{
+ /* pairs with tcp_poll() */
+ smp_mb();
+
+ if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ __tcp_check_space(sk);
+}
void tcp_sack_compress_send_ack(struct sock *sk);
static inline void tcp_cleanup_skb(struct sk_buff *skb)
@@ -809,6 +829,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
/* tcp.c */
void tcp_get_info(struct sock *, struct tcp_info *);
+void tcp_rate_check_app_limited(struct sock *sk);
/* Read 'sendfile()'-style from a TCP socket */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
@@ -1203,7 +1224,15 @@ enum tcp_ca_ack_event_flags {
#define TCP_CONG_NON_RESTRICTED BIT(0)
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN BIT(1)
-#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
+/* Require successfully negotiated AccECN capability */
+#define TCP_CONG_NEEDS_ACCECN BIT(2)
+/* Use ECT(1) instead of ECT(0) while the CA is uninitialized */
+#define TCP_CONG_ECT_1_NEGOTIATION BIT(3)
+/* Cannot fallback to RFC3168 during AccECN negotiation */
+#define TCP_CONG_NO_FALLBACK_RFC3168 BIT(4)
+#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \
+ TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \
+ TCP_CONG_NO_FALLBACK_RFC3168)
union tcp_cc_info;
@@ -1243,12 +1272,27 @@ struct rate_sample {
struct tcp_congestion_ops {
/* fast path fields are put first to fill one cache line */
+ /* A congestion control (CC) must provide one of either:
+ *
+ * (a) a cong_avoid function, if the CC wants to use the core TCP
+ * stack's default functionality to implement a "classic"
+ * (Reno/CUBIC-style) response to packet loss, RFC3168 ECN,
+ * idle periods, pacing rate computations, etc.
+ *
+ * (b) a cong_control function, if the CC wants custom behavior and
+ * complete control of all congestion control behaviors.
+ */
+ /* (a) "classic" response: calculate new cwnd.
+ */
+ void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
+ /* (b) "custom" response: call when packets are delivered to update
+ * cwnd and pacing rate, after all the ca_state processing.
+ */
+ void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
+
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
- /* do new cwnd calculation (required) */
- void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
-
/* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state);
@@ -1261,15 +1305,9 @@ struct tcp_congestion_ops {
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
- /* override sysctl_tcp_min_tso_segs */
+ /* override sysctl_tcp_min_tso_segs (optional) */
u32 (*min_tso_segs)(struct sock *sk);
- /* call when packets are delivered to update cwnd and pacing rate,
- * after all the ca_state processing. (optional)
- */
- void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
-
-
/* new value of cwnd after loss (required) */
u32 (*undo_cwnd)(struct sock *sk);
/* returns the multiplier used in tcp_sndbuf_expand (optional) */
@@ -1335,6 +1373,27 @@ static inline bool tcp_ca_needs_ecn(const struct sock *sk)
return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
}
+static inline bool tcp_ca_needs_accecn(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN;
+}
+
+static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION;
+}
+
+static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168;
+}
+
static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1346,13 +1405,6 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
/* From tcp_cong.c */
void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
-/* From tcp_rate.c */
-void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
-void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
- struct rate_sample *rs);
-void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
- bool is_sack_reneg, struct rate_sample *rs);
-void tcp_rate_check_app_limited(struct sock *sk);
static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
@@ -1581,8 +1633,14 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason);
+static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
+{
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason);
+}
-int tcp_filter(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
int tcp_abort(struct sock *sk, int err);
@@ -2318,8 +2376,6 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct tcphdr *th);
INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
-INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
#ifdef CONFIG_INET
void tcp_gro_complete(struct sk_buff *skb);
#else
@@ -2513,10 +2569,7 @@ void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
extern bool tcp_rack_mark_lost(struct sock *sk);
-extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
-extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
/* tcp_plb.c */
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index f13e5cd2b1ac..e9a933641636 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -29,8 +29,15 @@ enum tcp_accecn_option {
TCP_ACCECN_OPTION_DISABLED = 0,
TCP_ACCECN_OPTION_MINIMUM = 1,
TCP_ACCECN_OPTION_FULL = 2,
+ TCP_ACCECN_OPTION_PERSIST = 3,
};
+/* Apply either ECT(0) or ECT(1) based on TCP_CONG_ECT_1_NEGOTIATION flag */
+static inline void INET_ECN_xmit_ect_1_negotiation(struct sock *sk)
+{
+ __INET_ECN_xmit(sk, tcp_ca_ect_1_negotiation(sk));
+}
+
static inline void tcp_ecn_queue_cwr(struct tcp_sock *tp)
{
/* Do not set CWR if in AccECN mode! */
@@ -60,12 +67,6 @@ static inline void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
}
-/* tp->accecn_fail_mode */
-#define TCP_ACCECN_ACE_FAIL_SEND BIT(0)
-#define TCP_ACCECN_ACE_FAIL_RECV BIT(1)
-#define TCP_ACCECN_OPT_FAIL_SEND BIT(2)
-#define TCP_ACCECN_OPT_FAIL_RECV BIT(3)
-
static inline bool tcp_accecn_ace_fail_send(const struct tcp_sock *tp)
{
return tp->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_SEND;
@@ -91,11 +92,6 @@ static inline void tcp_accecn_fail_mode_set(struct tcp_sock *tp, u8 mode)
tp->accecn_fail_mode |= mode;
}
-#define TCP_ACCECN_OPT_NOT_SEEN 0x0
-#define TCP_ACCECN_OPT_EMPTY_SEEN 0x1
-#define TCP_ACCECN_OPT_COUNTER_SEEN 0x2
-#define TCP_ACCECN_OPT_FAIL_SEEN 0x3
-
static inline u8 tcp_accecn_ace(const struct tcphdr *th)
{
return (th->ae << 2) | (th->cwr << 1) | th->ece;
@@ -169,7 +165,9 @@ static inline void tcp_accecn_third_ack(struct sock *sk,
switch (ace) {
case 0x0:
/* Invalid value */
- tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV);
+ if (!TCP_SKB_CB(skb)->sacked)
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV |
+ TCP_ACCECN_OPT_FAIL_RECV);
break;
case 0x7:
case 0x5:
@@ -398,6 +396,7 @@ static inline void tcp_accecn_init_counters(struct tcp_sock *tp)
tp->received_ce_pending = 0;
__tcp_accecn_init_bytes_counters(tp->received_ecn_bytes);
__tcp_accecn_init_bytes_counters(tp->delivered_ecn_bytes);
+ tp->accecn_opt_sent_w_dsack = 0;
tp->accecn_minlen = 0;
tp->accecn_opt_demand = 0;
tp->est_ecnfield = 0;
@@ -467,6 +466,26 @@ static inline u8 tcp_accecn_option_init(const struct sk_buff *skb,
return TCP_ACCECN_OPT_COUNTER_SEEN;
}
+static inline void tcp_ecn_rcv_synack_accecn(struct sock *sk,
+ const struct sk_buff *skb, u8 dsf)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
+ tp->syn_ect_rcv = dsf & INET_ECN_MASK;
+ /* Demand Accurate ECN option in response to the SYN on the SYN/ACK
+ * and the TCP server will try to send one more packet with an AccECN
+ * Option at a later point during the connection.
+ */
+ if (tp->rx_opt.accecn &&
+ tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
+ u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
+
+ tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
+ tp->accecn_opt_demand = 2;
+ }
+}
+
/* See Table 2 of the AccECN draft */
static inline void tcp_ecn_rcv_synack(struct sock *sk, const struct sk_buff *skb,
const struct tcphdr *th, u8 ip_dsfield)
@@ -489,32 +508,32 @@ static inline void tcp_ecn_rcv_synack(struct sock *sk, const struct sk_buff *skb
tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
break;
case 0x1:
- case 0x5:
/* +========+========+============+=============+
* | A | B | SYN/ACK | Feedback |
* | | | B->A | Mode of A |
* | | | AE CWR ECE | |
* +========+========+============+=============+
- * | AccECN | Nonce | 1 0 1 | (Reserved) |
* | AccECN | ECN | 0 0 1 | Classic ECN |
* | Nonce | AccECN | 0 0 1 | Classic ECN |
* | ECN | AccECN | 0 0 1 | Classic ECN |
* +========+========+============+=============+
*/
- if (tcp_ecn_mode_pending(tp))
- /* Downgrade from AccECN, or requested initially */
+ if (tcp_ca_no_fallback_rfc3168(sk))
+ tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
+ else
tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
break;
- default:
- tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
- tp->syn_ect_rcv = ip_dsfield & INET_ECN_MASK;
- if (tp->rx_opt.accecn &&
- tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
- u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
-
- tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
- tp->accecn_opt_demand = 2;
+ case 0x5:
+ if (tcp_ecn_mode_pending(tp)) {
+ tcp_ecn_rcv_synack_accecn(sk, skb, ip_dsfield);
+ if (INET_ECN_is_ce(ip_dsfield)) {
+ tp->received_ce++;
+ tp->received_ce_pending++;
+ }
}
+ break;
+ default:
+ tcp_ecn_rcv_synack_accecn(sk, skb, ip_dsfield);
if (INET_ECN_is_ce(ip_dsfield) &&
tcp_accecn_validate_syn_feedback(sk, ace,
tp->syn_ect_snt)) {
@@ -525,9 +544,11 @@ static inline void tcp_ecn_rcv_synack(struct sock *sk, const struct sk_buff *skb
}
}
-static inline void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th,
+static inline void tcp_ecn_rcv_syn(struct sock *sk, const struct tcphdr *th,
const struct sk_buff *skb)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+
if (tcp_ecn_mode_pending(tp)) {
if (!tcp_accecn_syn_requested(th)) {
/* Downgrade to classic ECN feedback */
@@ -539,7 +560,8 @@ static inline void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th,
tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
}
}
- if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || !th->cwr))
+ if (tcp_ecn_mode_rfc3168(tp) &&
+ (!th->ece || !th->cwr || tcp_ca_no_fallback_rfc3168(sk)))
tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
}
@@ -561,7 +583,7 @@ static inline void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
else if (tcp_ca_needs_ecn(sk) ||
tcp_bpf_ca_needs_ecn(sk))
- INET_ECN_xmit(sk);
+ INET_ECN_xmit_ect_1_negotiation(sk);
if (tp->ecn_flags & TCP_ECN_MODE_ACCECN) {
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ACE;
@@ -579,7 +601,8 @@ static inline void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
bool use_ecn, use_accecn;
u8 tcp_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
- use_accecn = tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ACCECN;
+ use_accecn = tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ACCECN ||
+ tcp_ca_needs_accecn(sk);
use_ecn = tcp_ecn == TCP_ECN_IN_ECN_OUT_ECN ||
tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ECN ||
tcp_ca_needs_ecn(sk) || bpf_needs_ecn || use_accecn;
@@ -595,7 +618,7 @@ static inline void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
if (use_ecn) {
if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
- INET_ECN_xmit(sk);
+ INET_ECN_xmit_ect_1_negotiation(sk);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
if (use_accecn) {
@@ -619,12 +642,22 @@ static inline void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
}
static inline void
-tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
+tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
+ enum tcp_synack_type synack_type)
{
- if (tcp_rsk(req)->accecn_ok)
- tcp_accecn_echo_syn_ect(th, tcp_rsk(req)->syn_ect_rcv);
- else if (inet_rsk(req)->ecn_ok)
- th->ece = 1;
+ /* Accurate ECN shall retransmit SYN/ACK with ACE=0 if the
+ * previously retransmitted SYN/ACK also times out.
+ */
+ if (!req->num_timeout || synack_type != TCP_SYNACK_RETRANS) {
+ if (tcp_rsk(req)->accecn_ok)
+ tcp_accecn_echo_syn_ect(th, tcp_rsk(req)->syn_ect_rcv);
+ else if (inet_rsk(req)->ecn_ok)
+ th->ece = 1;
+ } else if (tcp_rsk(req)->accecn_ok) {
+ th->ae = 0;
+ th->cwr = 0;
+ th->ece = 0;
+ }
}
static inline bool tcp_accecn_option_beacon_check(const struct sock *sk)
diff --git a/include/net/udp.h b/include/net/udp.h
index a061d1b22ddc..700dbedcb15f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -527,18 +527,18 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
* SNMP statistics for UDP and UDP-Lite
*/
#define UDP_INC_STATS(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ if (unlikely(is_udplite)) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
#define __UDP_INC_STATS(net, field, is_udplite) do { \
- if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
#define __UDP6_INC_STATS(net, field, is_udplite) do { \
- if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
+ if (unlikely(is_udplite)) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0)
#define UDP6_INC_STATS(net, field, __lite) do { \
- if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
+ if (unlikely(__lite)) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0)
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 9acef2fbd2fd..d9c6d04bb3b5 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -10,6 +10,11 @@
#include <net/ipv6_stubs.h>
#endif
+#define UDP_TUNNEL_PARTIAL_FEATURES NETIF_F_GSO_ENCAP_ALL
+#define UDP_TUNNEL_STRIPPED_GSO_TYPES ((UDP_TUNNEL_PARTIAL_FEATURES | \
+ NETIF_F_GSO_PARTIAL) >> \
+ NETIF_F_GSO_SHIFT)
+
struct udp_port_cfg {
u8 family;
@@ -145,6 +150,33 @@ void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
__be16 src_port, __be16 dst_port, bool nocheck,
u16 ip6cb_flags);
+static inline bool udp_tunnel_handle_partial(struct sk_buff *skb)
+{
+ bool double_encap = !!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL);
+
+ /*
+ * If the skb went through partial segmentation, lower devices
+ * will not need to offload the related features - except for
+ * UDP_TUNNEL, that will be re-added by the later
+ * udp_tunnel_handle_offloads().
+ */
+ if (double_encap)
+ skb_shinfo(skb)->gso_type &= ~UDP_TUNNEL_STRIPPED_GSO_TYPES;
+ return double_encap;
+}
+
+static inline void udp_tunnel_set_inner_protocol(struct sk_buff *skb,
+ bool double_encap,
+ __be16 inner_proto)
+{
+ /*
+ * The inner protocol has been set by the nested tunnel, don't
+ * overraid it.
+ */
+ if (!double_encap)
+ skb_set_inner_protocol(skb, inner_proto);
+}
+
void udp_tunnel_sock_release(struct socket *sock);
struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 0a14daaa5dd4..10d3edde6b2f 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1156,19 +1156,19 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PROTOCOL 128
/* Used to keep whole l2 header for transport mode GRO */
- __u32 orig_mac_len;
+ __u16 orig_mac_len;
__u8 proto;
__u8 inner_ipproto;
};
struct sec_path {
- int len;
- int olen;
- int verified_cnt;
-
struct xfrm_state *xvec[XFRM_MAX_DEPTH];
struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
+
+ u8 len;
+ u8 olen;
+ u8 verified_cnt;
};
struct sec_path *secpath_set(struct sk_buff *skb);
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 92a2358c6ce3..0b1abdb99c9e 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -90,11 +90,6 @@ struct xsk_buff_pool {
* destructor callback.
*/
spinlock_t cq_prod_lock;
- /* Mutual exclusion of the completion ring in the SKB mode.
- * Protect: when sockets share a single cq when the same netdev
- * and queue id is shared.
- */
- spinlock_t cq_cached_prod_lock;
struct xdp_buff_xsk *free_heads[];
};
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index eaecc3c5f772..fdb785fa4613 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -339,9 +339,11 @@ TRACE_EVENT(aer_event,
const u32 status,
const u8 severity,
const u8 tlp_header_valid,
- struct pcie_tlp_log *tlp),
+ struct pcie_tlp_log *tlp,
+ const char *bus_type),
- TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp),
+
+ TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp, bus_type),
TP_STRUCT__entry(
__string( dev_name, dev_name )
@@ -349,10 +351,12 @@ TRACE_EVENT(aer_event,
__field( u8, severity )
__field( u8, tlp_header_valid)
__array( u32, tlp_header, PCIE_STD_MAX_TLP_HEADERLOG)
+ __string( bus_type, bus_type )
),
TP_fast_assign(
__assign_str(dev_name);
+ __assign_str(bus_type);
__entry->status = status;
__entry->severity = severity;
__entry->tlp_header_valid = tlp_header_valid;
@@ -364,8 +368,8 @@ TRACE_EVENT(aer_event,
}
),
- TP_printk("%s PCIe Bus Error: severity=%s, %s, TLP Header=%s\n",
- __get_str(dev_name),
+ TP_printk("%s %s Bus Error: severity=%s, %s, TLP Header=%s\n",
+ __get_str(dev_name), __get_str(bus_type),
__entry->severity == AER_CORRECTABLE ? "Corrected" :
__entry->severity == AER_FATAL ?
"Fatal" : "Uncorrected, non-fatal",
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6aad66bc5dd7..3f3827e1c711 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -15,6 +15,7 @@
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/bvec.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -43,6 +44,7 @@
#include <uapi/rdma/rdma_user_ioctl.h>
#include <uapi/rdma/ib_user_ioctl_verbs.h>
#include <linux/pci-tph.h>
+#include <linux/dma-buf.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -764,6 +766,7 @@ enum ib_event_type {
IB_EVENT_CLIENT_REREGISTER,
IB_EVENT_GID_CHANGE,
IB_EVENT_WQ_FATAL,
+ IB_EVENT_DEVICE_SPEED_CHANGE,
};
const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
@@ -877,6 +880,20 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
*/
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
+struct ib_port_speed_info {
+ const char *str;
+ int rate; /* in deci-Gb/sec (100 MBps units) */
+};
+
+/**
+ * ib_port_attr_to_speed_info - Convert port attributes to speed information
+ * @attr: Port attributes containing active_speed and active_width
+ * @speed_info: Speed information to return
+ *
+ * Returns 0 on success, -EINVAL on error.
+ */
+int ib_port_attr_to_speed_info(struct ib_port_attr *attr,
+ struct ib_port_speed_info *speed_info);
/**
* enum ib_mr_type - memory region type
@@ -2348,6 +2365,9 @@ struct rdma_user_mmap_entry {
unsigned long start_pgoff;
size_t npages;
bool driver_removed;
+ /* protects access to dmabufs */
+ struct mutex dmabufs_lock;
+ struct list_head dmabufs;
};
/* Return the offset (in bytes) the user should pass to libc's mmap() */
@@ -2403,6 +2423,8 @@ struct ib_device_ops {
int comp_vector);
int (*query_port)(struct ib_device *device, u32 port_num,
struct ib_port_attr *port_attr);
+ int (*query_port_speed)(struct ib_device *device, u32 port_num,
+ u64 *speed);
int (*modify_port)(struct ib_device *device, u32 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
@@ -2483,6 +2505,11 @@ struct ib_device_ops {
* Therefore needs to be implemented by the driver in mmap_free.
*/
void (*mmap_free)(struct rdma_user_mmap_entry *entry);
+ int (*mmap_get_pfns)(struct rdma_user_mmap_entry *entry,
+ struct phys_vec *phys_vec,
+ struct p2pdma_provider **provider);
+ struct rdma_user_mmap_entry *(*pgoff_to_mmap_entry)(struct ib_ucontext *ucontext,
+ off_t pg_off);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
@@ -4249,6 +4276,47 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
dma_unmap_page(dev->dma_device, addr, size, direction);
}
+/**
+ * ib_dma_map_bvec - Map a bio_vec to DMA address
+ * @dev: The device for which the dma_addr is to be created
+ * @bvec: The bio_vec to map
+ * @direction: The direction of the DMA
+ *
+ * Returns a DMA address for the bio_vec. The caller must check the
+ * result with ib_dma_mapping_error() before use; a failed mapping
+ * must not be passed to ib_dma_unmap_bvec().
+ *
+ * For software RDMA devices (rxe, siw), returns a virtual address
+ * and no actual DMA mapping occurs.
+ */
+static inline u64 ib_dma_map_bvec(struct ib_device *dev,
+ struct bio_vec *bvec,
+ enum dma_data_direction direction)
+{
+ if (ib_uses_virt_dma(dev))
+ return (uintptr_t)bvec_virt(bvec);
+ return dma_map_phys(dev->dma_device, bvec_phys(bvec),
+ bvec->bv_len, direction, 0);
+}
+
+/**
+ * ib_dma_unmap_bvec - Unmap a bio_vec DMA mapping
+ * @dev: The device for which the DMA address was created
+ * @addr: The DMA address returned by ib_dma_map_bvec()
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ *
+ * Releases a DMA mapping created by ib_dma_map_bvec(). For software
+ * RDMA devices this is a no-op since no actual mapping occurred.
+ */
+static inline void ib_dma_unmap_bvec(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_phys(dev->dma_device, addr, size, direction, 0);
+}
+
int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
@@ -4545,8 +4613,6 @@ static inline bool ib_device_try_get(struct ib_device *dev)
void ib_device_put(struct ib_device *device);
struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
enum rdma_driver_id driver_id);
-struct ib_device *ib_device_get_by_name(const char *name,
- enum rdma_driver_id driver_id);
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 9bd930a83e6e..6de6fd8bd15e 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -169,6 +169,23 @@ struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
void rdma_destroy_id(struct rdma_cm_id *id);
/**
+ * rdma_restrict_node_type - Restrict an RDMA identifier to specific
+ * RDMA device node type.
+ *
+ * @id: RDMA identifier.
+ * @node_type: The device node type. Only RDMA_NODE_UNSPECIFIED (default),
+ * RDMA_NODE_RNIC and RDMA_NODE_IB_CA are allowed
+ *
+ * This allows the caller to restrict the possible devices
+ * used to iWarp (RDMA_NODE_RNIC) or InfiniBand/RoCEv1/RoCEv2 (RDMA_NODE_IB_CA).
+ *
+ * It needs to be called before the RDMA identifier is bound
+ * to an device, which mean it should be called before
+ * rdma_bind_addr(), rdma_bind_addr() and rdma_listen().
+ */
+int rdma_restrict_node_type(struct rdma_cm_id *id, u8 node_type);
+
+/**
* rdma_bind_addr - Bind an RDMA identifier to a source address and
* associated RDMA device, if needed.
*
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
index d606cac48233..6a1d08614e09 100644
--- a/include/rdma/rw.h
+++ b/include/rdma/rw.h
@@ -5,6 +5,7 @@
#ifndef _RDMA_RW_H
#define _RDMA_RW_H
+#include <linux/bvec.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <rdma/ib_verbs.h>
@@ -31,6 +32,14 @@ struct rdma_rw_ctx {
struct ib_rdma_wr *wrs;
} map;
+ /* for IOVA-based mapping of bvecs into contiguous DMA range: */
+ struct {
+ struct dma_iova_state state;
+ struct ib_sge sge;
+ struct ib_rdma_wr wr;
+ size_t mapped_len;
+ } iova;
+
/* for registering multiple WRs: */
struct rdma_rw_reg_ctx {
struct ib_sge sge;
@@ -38,6 +47,7 @@ struct rdma_rw_ctx {
struct ib_reg_wr reg_wr;
struct ib_send_wr inv_wr;
struct ib_mr *mr;
+ struct sg_table sgt;
} *reg;
};
};
@@ -49,6 +59,16 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u32 port_num, struct scatterlist *sg, u32 sg_cnt,
enum dma_data_direction dir);
+struct bio_vec;
+
+int rdma_rw_ctx_init_bvec(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, const struct bio_vec *bvecs, u32 nr_bvec,
+ struct bvec_iter iter, u64 remote_addr, u32 rkey,
+ enum dma_data_direction dir);
+void rdma_rw_ctx_destroy_bvec(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, const struct bio_vec *bvecs, u32 nr_bvec,
+ enum dma_data_direction dir);
+
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
@@ -66,6 +86,8 @@ int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
unsigned int maxpages);
+unsigned int rdma_rw_max_send_wr(struct ib_device *dev, u32 port_num,
+ unsigned int max_rdma_ctxs, u32 create_flags);
void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
void rdma_rw_cleanup_mrs(struct ib_qp *qp);
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index 26ba919ac245..6a253b7dc5ea 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -186,6 +186,7 @@ struct ib_uverbs_file {
extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class;
int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
+int uverbs_uobject_release(struct ib_uobject *uobj);
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
diff --git a/include/rv/automata.h b/include/rv/automata.h
index eb9e636809a0..4a4eb40cf09a 100644
--- a/include/rv/automata.h
+++ b/include/rv/automata.h
@@ -6,70 +6,76 @@
* models in C generated by the dot2k tool.
*/
+#ifndef _RV_AUTOMATA_H
+#define _RV_AUTOMATA_H
+
+#ifndef MONITOR_NAME
+#error "MONITOR_NAME macro is not defined. Did you include $(MODEL_NAME).h generated by rvgen?"
+#endif
+
+#define RV_AUTOMATON_NAME CONCATENATE(automaton_, MONITOR_NAME)
+#define EVENT_MAX CONCATENATE(event_max_, MONITOR_NAME)
+#define STATE_MAX CONCATENATE(state_max_, MONITOR_NAME)
+#define events CONCATENATE(events_, MONITOR_NAME)
+#define states CONCATENATE(states_, MONITOR_NAME)
+
+/*
+ * model_get_state_name - return the (string) name of the given state
+ */
+static char *model_get_state_name(enum states state)
+{
+ if ((state < 0) || (state >= STATE_MAX))
+ return "INVALID";
+
+ return RV_AUTOMATON_NAME.state_names[state];
+}
+
+/*
+ * model_get_event_name - return the (string) name of the given event
+ */
+static char *model_get_event_name(enum events event)
+{
+ if ((event < 0) || (event >= EVENT_MAX))
+ return "INVALID";
+
+ return RV_AUTOMATON_NAME.event_names[event];
+}
+
/*
- * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata
+ * model_get_initial_state - return the automaton's initial state
+ */
+static inline enum states model_get_initial_state(void)
+{
+ return RV_AUTOMATON_NAME.initial_state;
+}
+
+/*
+ * model_get_next_state - process an automaton event occurrence
*
- * Define a set of helper functions for automata. The 'name' argument is used
- * as suffix for the functions and data. These functions will handle automaton
- * with data type 'type'.
+ * Given the current state (curr_state) and the event (event), returns
+ * the next state, or INVALID_STATE in case of error.
+ */
+static inline enum states model_get_next_state(enum states curr_state,
+ enum events event)
+{
+ if ((curr_state < 0) || (curr_state >= STATE_MAX))
+ return INVALID_STATE;
+
+ if ((event < 0) || (event >= EVENT_MAX))
+ return INVALID_STATE;
+
+ return RV_AUTOMATON_NAME.function[curr_state][event];
+}
+
+/*
+ * model_is_final_state - check if the given state is a final state
*/
-#define DECLARE_AUTOMATA_HELPERS(name, type) \
- \
-/* \
- * model_get_state_name_##name - return the (string) name of the given state \
- */ \
-static char *model_get_state_name_##name(enum states_##name state) \
-{ \
- if ((state < 0) || (state >= state_max_##name)) \
- return "INVALID"; \
- \
- return automaton_##name.state_names[state]; \
-} \
- \
-/* \
- * model_get_event_name_##name - return the (string) name of the given event \
- */ \
-static char *model_get_event_name_##name(enum events_##name event) \
-{ \
- if ((event < 0) || (event >= event_max_##name)) \
- return "INVALID"; \
- \
- return automaton_##name.event_names[event]; \
-} \
- \
-/* \
- * model_get_initial_state_##name - return the automaton's initial state \
- */ \
-static inline type model_get_initial_state_##name(void) \
-{ \
- return automaton_##name.initial_state; \
-} \
- \
-/* \
- * model_get_next_state_##name - process an automaton event occurrence \
- * \
- * Given the current state (curr_state) and the event (event), returns \
- * the next state, or INVALID_STATE in case of error. \
- */ \
-static inline type model_get_next_state_##name(enum states_##name curr_state, \
- enum events_##name event) \
-{ \
- if ((curr_state < 0) || (curr_state >= state_max_##name)) \
- return INVALID_STATE; \
- \
- if ((event < 0) || (event >= event_max_##name)) \
- return INVALID_STATE; \
- \
- return automaton_##name.function[curr_state][event]; \
-} \
- \
-/* \
- * model_is_final_state_##name - check if the given state is a final state \
- */ \
-static inline bool model_is_final_state_##name(enum states_##name state) \
-{ \
- if ((state < 0) || (state >= state_max_##name)) \
- return 0; \
- \
- return automaton_##name.final_states[state]; \
+static inline bool model_is_final_state(enum states state)
+{
+ if ((state < 0) || (state >= STATE_MAX))
+ return 0;
+
+ return RV_AUTOMATON_NAME.final_states[state];
}
+
+#endif
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 0cef64366538..db11d41bb438 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -8,90 +8,91 @@
* The dot2k tool is available at tools/verification/dot2k/
*
* For further information, see:
- * Documentation/trace/rv/da_monitor_synthesis.rst
+ * Documentation/trace/rv/monitor_synthesis.rst
*/
+#ifndef _RV_DA_MONITOR_H
+#define _RV_DA_MONITOR_H
+
#include <rv/automata.h>
#include <linux/rv.h>
+#include <linux/stringify.h>
#include <linux/bug.h>
#include <linux/sched.h>
+static struct rv_monitor rv_this;
+
+static void react(enum states curr_state, enum events event)
+{
+ rv_react(&rv_this,
+ "rv: monitor %s does not allow event %s on state %s\n",
+ __stringify(MONITOR_NAME),
+ model_get_event_name(event),
+ model_get_state_name(curr_state));
+}
+
+/*
+ * da_monitor_reset - reset a monitor and setting it to init state
+ */
+static inline void da_monitor_reset(struct da_monitor *da_mon)
+{
+ da_mon->monitoring = 0;
+ da_mon->curr_state = model_get_initial_state();
+}
+
+/*
+ * da_monitor_start - start monitoring
+ *
+ * The monitor will ignore all events until monitoring is set to true. This
+ * function needs to be called to tell the monitor to start monitoring.
+ */
+static inline void da_monitor_start(struct da_monitor *da_mon)
+{
+ da_mon->curr_state = model_get_initial_state();
+ da_mon->monitoring = 1;
+}
+
/*
- * Generic helpers for all types of deterministic automata monitors.
- */
-#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
- \
-static void react_##name(type curr_state, type event) \
-{ \
- rv_react(&rv_##name, \
- "rv: monitor %s does not allow event %s on state %s\n", \
- #name, \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(curr_state)); \
-} \
- \
-/* \
- * da_monitor_reset_##name - reset a monitor and setting it to init state \
- */ \
-static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
-{ \
- da_mon->monitoring = 0; \
- da_mon->curr_state = model_get_initial_state_##name(); \
-} \
- \
-/* \
- * da_monitor_start_##name - start monitoring \
- * \
- * The monitor will ignore all events until monitoring is set to true. This \
- * function needs to be called to tell the monitor to start monitoring. \
- */ \
-static inline void da_monitor_start_##name(struct da_monitor *da_mon) \
-{ \
- da_mon->curr_state = model_get_initial_state_##name(); \
- da_mon->monitoring = 1; \
-} \
- \
-/* \
- * da_monitoring_##name - returns true if the monitor is processing events \
- */ \
-static inline bool da_monitoring_##name(struct da_monitor *da_mon) \
-{ \
- return da_mon->monitoring; \
-} \
- \
-/* \
- * da_monitor_enabled_##name - checks if the monitor is enabled \
- */ \
-static inline bool da_monitor_enabled_##name(void) \
-{ \
- /* global switch */ \
- if (unlikely(!rv_monitoring_on())) \
- return 0; \
- \
- /* monitor enabled */ \
- if (unlikely(!rv_##name.enabled)) \
- return 0; \
- \
- return 1; \
-} \
- \
-/* \
- * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \
- */ \
-static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \
-{ \
- \
- if (!da_monitor_enabled_##name()) \
- return 0; \
- \
- /* monitor is actually monitoring */ \
- if (unlikely(!da_monitoring_##name(da_mon))) \
- return 0; \
- \
- return 1; \
+ * da_monitoring - returns true if the monitor is processing events
+ */
+static inline bool da_monitoring(struct da_monitor *da_mon)
+{
+ return da_mon->monitoring;
}
/*
+ * da_monitor_enabled - checks if the monitor is enabled
+ */
+static inline bool da_monitor_enabled(void)
+{
+ /* global switch */
+ if (unlikely(!rv_monitoring_on()))
+ return 0;
+
+ /* monitor enabled */
+ if (unlikely(!rv_this.enabled))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * da_monitor_handling_event - checks if the monitor is ready to handle events
+ */
+static inline bool da_monitor_handling_event(struct da_monitor *da_mon)
+{
+ if (!da_monitor_enabled())
+ return 0;
+
+ /* monitor is actually monitoring */
+ if (unlikely(!da_monitoring(da_mon)))
+ return 0;
+
+ return 1;
+}
+
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+/*
* Event handler for implicit monitors. Implicit monitor is the one which the
* handler does not need to specify which da_monitor to manipulate. Examples
* of implicit monitor are the per_cpu or the global ones.
@@ -100,38 +101,39 @@ static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon)
* warn and reset the monitor if it runs out of retries. The monitor should be
* able to handle various orders.
*/
-#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
- \
-static inline bool \
-da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
-{ \
- enum states_##name curr_state, next_state; \
- \
- curr_state = READ_ONCE(da_mon->curr_state); \
- for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
- next_state = model_get_next_state_##name(curr_state, event); \
- if (next_state == INVALID_STATE) { \
- react_##name(curr_state, event); \
- trace_error_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- return false; \
- } \
- if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
- trace_event_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- return true; \
- } \
- } \
- \
- trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
- pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
- " retries reached for event %s, resetting monitor %s", \
- model_get_event_name_##name(event), #name); \
- return false; \
-} \
+static inline bool da_event(struct da_monitor *da_mon, enum events event)
+{
+ enum states curr_state, next_state;
+
+ curr_state = READ_ONCE(da_mon->curr_state);
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
+ next_state = model_get_next_state(curr_state, event);
+ if (next_state == INVALID_STATE) {
+ react(curr_state, event);
+ CONCATENATE(trace_error_, MONITOR_NAME)(
+ model_get_state_name(curr_state),
+ model_get_event_name(event));
+ return false;
+ }
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
+ CONCATENATE(trace_event_, MONITOR_NAME)(
+ model_get_state_name(curr_state),
+ model_get_event_name(event),
+ model_get_state_name(next_state),
+ model_is_final_state(next_state));
+ return true;
+ }
+ }
+
+ trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
+ " retries reached for event %s, resetting monitor %s",
+ model_get_event_name(event), __stringify(MONITOR_NAME));
+ return false;
+}
+
+#elif RV_MON_TYPE == RV_MON_PER_TASK
/*
* Event handler for per_task monitors.
*
@@ -139,395 +141,358 @@ da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
* warn and reset the monitor if it runs out of retries. The monitor should be
* able to handle various orders.
*/
-#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
- \
-static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
- enum events_##name event) \
-{ \
- enum states_##name curr_state, next_state; \
- \
- curr_state = READ_ONCE(da_mon->curr_state); \
- for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
- next_state = model_get_next_state_##name(curr_state, event); \
- if (next_state == INVALID_STATE) { \
- react_##name(curr_state, event); \
- trace_error_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- return false; \
- } \
- if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
- trace_event_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- return true; \
- } \
- } \
- \
- trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
- pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
- " retries reached for event %s, resetting monitor %s", \
- model_get_event_name_##name(event), #name); \
- return false; \
+
+static inline bool da_event(struct da_monitor *da_mon, struct task_struct *tsk,
+ enum events event)
+{
+ enum states curr_state, next_state;
+
+ curr_state = READ_ONCE(da_mon->curr_state);
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
+ next_state = model_get_next_state(curr_state, event);
+ if (next_state == INVALID_STATE) {
+ react(curr_state, event);
+ CONCATENATE(trace_error_, MONITOR_NAME)(tsk->pid,
+ model_get_state_name(curr_state),
+ model_get_event_name(event));
+ return false;
+ }
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
+ CONCATENATE(trace_event_, MONITOR_NAME)(tsk->pid,
+ model_get_state_name(curr_state),
+ model_get_event_name(event),
+ model_get_state_name(next_state),
+ model_is_final_state(next_state));
+ return true;
+ }
+ }
+
+ trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
+ " retries reached for event %s, resetting monitor %s",
+ model_get_event_name(event), __stringify(MONITOR_NAME));
+ return false;
}
+#endif /* RV_MON_TYPE */
+#if RV_MON_TYPE == RV_MON_GLOBAL
/*
* Functions to define, init and get a global monitor.
*/
-#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \
- \
-/* \
- * global monitor (a single variable) \
- */ \
-static struct da_monitor da_mon_##name; \
- \
-/* \
- * da_get_monitor_##name - return the global monitor address \
- */ \
-static struct da_monitor *da_get_monitor_##name(void) \
-{ \
- return &da_mon_##name; \
-} \
- \
-/* \
- * da_monitor_reset_all_##name - reset the single monitor \
- */ \
-static void da_monitor_reset_all_##name(void) \
-{ \
- da_monitor_reset_##name(da_get_monitor_##name()); \
-} \
- \
-/* \
- * da_monitor_init_##name - initialize a monitor \
- */ \
-static inline int da_monitor_init_##name(void) \
-{ \
- da_monitor_reset_all_##name(); \
- return 0; \
-} \
- \
-/* \
- * da_monitor_destroy_##name - destroy the monitor \
- */ \
-static inline void da_monitor_destroy_##name(void) \
-{ \
- return; \
+
+/*
+ * global monitor (a single variable)
+ */
+static struct da_monitor da_mon_this;
+
+/*
+ * da_get_monitor - return the global monitor address
+ */
+static struct da_monitor *da_get_monitor(void)
+{
+ return &da_mon_this;
+}
+
+/*
+ * da_monitor_reset_all - reset the single monitor
+ */
+static void da_monitor_reset_all(void)
+{
+ da_monitor_reset(da_get_monitor());
+}
+
+/*
+ * da_monitor_init - initialize a monitor
+ */
+static inline int da_monitor_init(void)
+{
+ da_monitor_reset_all();
+ return 0;
}
/*
+ * da_monitor_destroy - destroy the monitor
+ */
+static inline void da_monitor_destroy(void) { }
+
+#elif RV_MON_TYPE == RV_MON_PER_CPU
+/*
* Functions to define, init and get a per-cpu monitor.
*/
-#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \
- \
-/* \
- * per-cpu monitor variables \
- */ \
-static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
- \
-/* \
- * da_get_monitor_##name - return current CPU monitor address \
- */ \
-static struct da_monitor *da_get_monitor_##name(void) \
-{ \
- return this_cpu_ptr(&da_mon_##name); \
-} \
- \
-/* \
- * da_monitor_reset_all_##name - reset all CPUs' monitor \
- */ \
-static void da_monitor_reset_all_##name(void) \
-{ \
- struct da_monitor *da_mon; \
- int cpu; \
- for_each_cpu(cpu, cpu_online_mask) { \
- da_mon = per_cpu_ptr(&da_mon_##name, cpu); \
- da_monitor_reset_##name(da_mon); \
- } \
-} \
- \
-/* \
- * da_monitor_init_##name - initialize all CPUs' monitor \
- */ \
-static inline int da_monitor_init_##name(void) \
-{ \
- da_monitor_reset_all_##name(); \
- return 0; \
-} \
- \
-/* \
- * da_monitor_destroy_##name - destroy the monitor \
- */ \
-static inline void da_monitor_destroy_##name(void) \
-{ \
- return; \
+
+/*
+ * per-cpu monitor variables
+ */
+static DEFINE_PER_CPU(struct da_monitor, da_mon_this);
+
+/*
+ * da_get_monitor - return current CPU monitor address
+ */
+static struct da_monitor *da_get_monitor(void)
+{
+ return this_cpu_ptr(&da_mon_this);
}
/*
+ * da_monitor_reset_all - reset all CPUs' monitor
+ */
+static void da_monitor_reset_all(void)
+{
+ struct da_monitor *da_mon;
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ da_mon = per_cpu_ptr(&da_mon_this, cpu);
+ da_monitor_reset(da_mon);
+ }
+}
+
+/*
+ * da_monitor_init - initialize all CPUs' monitor
+ */
+static inline int da_monitor_init(void)
+{
+ da_monitor_reset_all();
+ return 0;
+}
+
+/*
+ * da_monitor_destroy - destroy the monitor
+ */
+static inline void da_monitor_destroy(void) { }
+
+#elif RV_MON_TYPE == RV_MON_PER_TASK
+/*
* Functions to define, init and get a per-task monitor.
*/
-#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \
- \
-/* \
- * The per-task monitor is stored a vector in the task struct. This variable \
- * stores the position on the vector reserved for this monitor. \
- */ \
-static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
- \
-/* \
- * da_get_monitor_##name - return the monitor in the allocated slot for tsk \
- */ \
-static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \
-{ \
- return &tsk->rv[task_mon_slot_##name].da_mon; \
-} \
- \
-static void da_monitor_reset_all_##name(void) \
-{ \
- struct task_struct *g, *p; \
- int cpu; \
- \
- read_lock(&tasklist_lock); \
- for_each_process_thread(g, p) \
- da_monitor_reset_##name(da_get_monitor_##name(p)); \
- for_each_present_cpu(cpu) \
- da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu))); \
- read_unlock(&tasklist_lock); \
-} \
- \
-/* \
- * da_monitor_init_##name - initialize the per-task monitor \
- * \
- * Try to allocate a slot in the task's vector of monitors. If there \
- * is an available slot, use it and reset all task's monitor. \
- */ \
-static int da_monitor_init_##name(void) \
-{ \
- int slot; \
- \
- slot = rv_get_task_monitor_slot(); \
- if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \
- return slot; \
- \
- task_mon_slot_##name = slot; \
- \
- da_monitor_reset_all_##name(); \
- return 0; \
-} \
- \
-/* \
- * da_monitor_destroy_##name - return the allocated slot \
- */ \
-static inline void da_monitor_destroy_##name(void) \
-{ \
- if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \
- WARN_ONCE(1, "Disabling a disabled monitor: " #name); \
- return; \
- } \
- rv_put_task_monitor_slot(task_mon_slot_##name); \
- task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
- return; \
+
+/*
+ * The per-task monitor is stored a vector in the task struct. This variable
+ * stores the position on the vector reserved for this monitor.
+ */
+static int task_mon_slot = RV_PER_TASK_MONITOR_INIT;
+
+/*
+ * da_get_monitor - return the monitor in the allocated slot for tsk
+ */
+static inline struct da_monitor *da_get_monitor(struct task_struct *tsk)
+{
+ return &tsk->rv[task_mon_slot].da_mon;
+}
+
+static void da_monitor_reset_all(void)
+{
+ struct task_struct *g, *p;
+ int cpu;
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, p)
+ da_monitor_reset(da_get_monitor(p));
+ for_each_present_cpu(cpu)
+ da_monitor_reset(da_get_monitor(idle_task(cpu)));
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * da_monitor_init - initialize the per-task monitor
+ *
+ * Try to allocate a slot in the task's vector of monitors. If there
+ * is an available slot, use it and reset all task's monitor.
+ */
+static int da_monitor_init(void)
+{
+ int slot;
+
+ slot = rv_get_task_monitor_slot();
+ if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT)
+ return slot;
+
+ task_mon_slot = slot;
+
+ da_monitor_reset_all();
+ return 0;
}
/*
- * Handle event for implicit monitor: da_get_monitor_##name() will figure out
+ * da_monitor_destroy - return the allocated slot
+ */
+static inline void da_monitor_destroy(void)
+{
+ if (task_mon_slot == RV_PER_TASK_MONITOR_INIT) {
+ WARN_ONCE(1, "Disabling a disabled monitor: " __stringify(MONITOR_NAME));
+ return;
+ }
+ rv_put_task_monitor_slot(task_mon_slot);
+ task_mon_slot = RV_PER_TASK_MONITOR_INIT;
+}
+#endif /* RV_MON_TYPE */
+
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+/*
+ * Handle event for implicit monitor: da_get_monitor() will figure out
* the monitor.
*/
-#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \
- \
-static inline void __da_handle_event_##name(struct da_monitor *da_mon, \
- enum events_##name event) \
-{ \
- bool retval; \
- \
- retval = da_event_##name(da_mon, event); \
- if (!retval) \
- da_monitor_reset_##name(da_mon); \
-} \
- \
-/* \
- * da_handle_event_##name - handle an event \
- */ \
-static inline void da_handle_event_##name(enum events_##name event) \
-{ \
- struct da_monitor *da_mon = da_get_monitor_##name(); \
- bool retval; \
- \
- retval = da_monitor_handling_event_##name(da_mon); \
- if (!retval) \
- return; \
- \
- __da_handle_event_##name(da_mon, event); \
-} \
- \
-/* \
- * da_handle_start_event_##name - start monitoring or handle event \
- * \
- * This function is used to notify the monitor that the system is returning \
- * to the initial state, so the monitor can start monitoring in the next event. \
- * Thus: \
- * \
- * If the monitor already started, handle the event. \
- * If the monitor did not start yet, start the monitor but skip the event. \
- */ \
-static inline bool da_handle_start_event_##name(enum events_##name event) \
-{ \
- struct da_monitor *da_mon; \
- \
- if (!da_monitor_enabled_##name()) \
- return 0; \
- \
- da_mon = da_get_monitor_##name(); \
- \
- if (unlikely(!da_monitoring_##name(da_mon))) { \
- da_monitor_start_##name(da_mon); \
- return 0; \
- } \
- \
- __da_handle_event_##name(da_mon, event); \
- \
- return 1; \
-} \
- \
-/* \
- * da_handle_start_run_event_##name - start monitoring and handle event \
- * \
- * This function is used to notify the monitor that the system is in the \
- * initial state, so the monitor can start monitoring and handling event. \
- */ \
-static inline bool da_handle_start_run_event_##name(enum events_##name event) \
-{ \
- struct da_monitor *da_mon; \
- \
- if (!da_monitor_enabled_##name()) \
- return 0; \
- \
- da_mon = da_get_monitor_##name(); \
- \
- if (unlikely(!da_monitoring_##name(da_mon))) \
- da_monitor_start_##name(da_mon); \
- \
- __da_handle_event_##name(da_mon, event); \
- \
- return 1; \
+
+static inline void __da_handle_event(struct da_monitor *da_mon,
+ enum events event)
+{
+ bool retval;
+
+ retval = da_event(da_mon, event);
+ if (!retval)
+ da_monitor_reset(da_mon);
+}
+
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(enum events event)
+{
+ struct da_monitor *da_mon = da_get_monitor();
+ bool retval;
+
+ retval = da_monitor_handling_event(da_mon);
+ if (!retval)
+ return;
+
+ __da_handle_event(da_mon, event);
}
/*
+ * da_handle_start_event - start monitoring or handle event
+ *
+ * This function is used to notify the monitor that the system is returning
+ * to the initial state, so the monitor can start monitoring in the next event.
+ * Thus:
+ *
+ * If the monitor already started, handle the event.
+ * If the monitor did not start yet, start the monitor but skip the event.
+ */
+static inline bool da_handle_start_event(enum events event)
+{
+ struct da_monitor *da_mon;
+
+ if (!da_monitor_enabled())
+ return 0;
+
+ da_mon = da_get_monitor();
+
+ if (unlikely(!da_monitoring(da_mon))) {
+ da_monitor_start(da_mon);
+ return 0;
+ }
+
+ __da_handle_event(da_mon, event);
+
+ return 1;
+}
+
+/*
+ * da_handle_start_run_event - start monitoring and handle event
+ *
+ * This function is used to notify the monitor that the system is in the
+ * initial state, so the monitor can start monitoring and handling event.
+ */
+static inline bool da_handle_start_run_event(enum events event)
+{
+ struct da_monitor *da_mon;
+
+ if (!da_monitor_enabled())
+ return 0;
+
+ da_mon = da_get_monitor();
+
+ if (unlikely(!da_monitoring(da_mon)))
+ da_monitor_start(da_mon);
+
+ __da_handle_event(da_mon, event);
+
+ return 1;
+}
+
+#elif RV_MON_TYPE == RV_MON_PER_TASK
+/*
* Handle event for per task.
*/
-#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \
- \
-static inline void \
-__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
- enum events_##name event) \
-{ \
- bool retval; \
- \
- retval = da_event_##name(da_mon, tsk, event); \
- if (!retval) \
- da_monitor_reset_##name(da_mon); \
-} \
- \
-/* \
- * da_handle_event_##name - handle an event \
- */ \
-static inline void \
-da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \
-{ \
- struct da_monitor *da_mon = da_get_monitor_##name(tsk); \
- bool retval; \
- \
- retval = da_monitor_handling_event_##name(da_mon); \
- if (!retval) \
- return; \
- \
- __da_handle_event_##name(da_mon, tsk, event); \
-} \
- \
-/* \
- * da_handle_start_event_##name - start monitoring or handle event \
- * \
- * This function is used to notify the monitor that the system is returning \
- * to the initial state, so the monitor can start monitoring in the next event. \
- * Thus: \
- * \
- * If the monitor already started, handle the event. \
- * If the monitor did not start yet, start the monitor but skip the event. \
- */ \
-static inline bool \
-da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \
-{ \
- struct da_monitor *da_mon; \
- \
- if (!da_monitor_enabled_##name()) \
- return 0; \
- \
- da_mon = da_get_monitor_##name(tsk); \
- \
- if (unlikely(!da_monitoring_##name(da_mon))) { \
- da_monitor_start_##name(da_mon); \
- return 0; \
- } \
- \
- __da_handle_event_##name(da_mon, tsk, event); \
- \
- return 1; \
-} \
- \
-/* \
- * da_handle_start_run_event_##name - start monitoring and handle event \
- * \
- * This function is used to notify the monitor that the system is in the \
- * initial state, so the monitor can start monitoring and handling event. \
- */ \
-static inline bool \
-da_handle_start_run_event_##name(struct task_struct *tsk, enum events_##name event) \
-{ \
- struct da_monitor *da_mon; \
- \
- if (!da_monitor_enabled_##name()) \
- return 0; \
- \
- da_mon = da_get_monitor_##name(tsk); \
- \
- if (unlikely(!da_monitoring_##name(da_mon))) \
- da_monitor_start_##name(da_mon); \
- \
- __da_handle_event_##name(da_mon, tsk, event); \
- \
- return 1; \
+
+static inline void __da_handle_event(struct da_monitor *da_mon,
+ struct task_struct *tsk, enum events event)
+{
+ bool retval;
+
+ retval = da_event(da_mon, tsk, event);
+ if (!retval)
+ da_monitor_reset(da_mon);
}
/*
- * Entry point for the global monitor.
+ * da_handle_event - handle an event
*/
-#define DECLARE_DA_MON_GLOBAL(name, type) \
- \
-DECLARE_AUTOMATA_HELPERS(name, type) \
-DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
-DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
-DECLARE_DA_MON_INIT_GLOBAL(name, type) \
-DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+static inline void da_handle_event(struct task_struct *tsk, enum events event)
+{
+ struct da_monitor *da_mon = da_get_monitor(tsk);
+ bool retval;
+
+ retval = da_monitor_handling_event(da_mon);
+ if (!retval)
+ return;
+
+ __da_handle_event(da_mon, tsk, event);
+}
/*
- * Entry point for the per-cpu monitor.
+ * da_handle_start_event - start monitoring or handle event
+ *
+ * This function is used to notify the monitor that the system is returning
+ * to the initial state, so the monitor can start monitoring in the next event.
+ * Thus:
+ *
+ * If the monitor already started, handle the event.
+ * If the monitor did not start yet, start the monitor but skip the event.
*/
-#define DECLARE_DA_MON_PER_CPU(name, type) \
- \
-DECLARE_AUTOMATA_HELPERS(name, type) \
-DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
-DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
-DECLARE_DA_MON_INIT_PER_CPU(name, type) \
-DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+static inline bool da_handle_start_event(struct task_struct *tsk,
+ enum events event)
+{
+ struct da_monitor *da_mon;
+
+ if (!da_monitor_enabled())
+ return 0;
+
+ da_mon = da_get_monitor(tsk);
+
+ if (unlikely(!da_monitoring(da_mon))) {
+ da_monitor_start(da_mon);
+ return 0;
+ }
+
+ __da_handle_event(da_mon, tsk, event);
+
+ return 1;
+}
/*
- * Entry point for the per-task monitor.
+ * da_handle_start_run_event - start monitoring and handle event
+ *
+ * This function is used to notify the monitor that the system is in the
+ * initial state, so the monitor can start monitoring and handling event.
*/
-#define DECLARE_DA_MON_PER_TASK(name, type) \
- \
-DECLARE_AUTOMATA_HELPERS(name, type) \
-DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
-DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
-DECLARE_DA_MON_INIT_PER_TASK(name, type) \
-DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type)
+static inline bool da_handle_start_run_event(struct task_struct *tsk,
+ enum events event)
+{
+ struct da_monitor *da_mon;
+
+ if (!da_monitor_enabled())
+ return 0;
+
+ da_mon = da_get_monitor(tsk);
+
+ if (unlikely(!da_monitoring(da_mon)))
+ da_monitor_start(da_mon);
+
+ __da_handle_event(da_mon, tsk, event);
+
+ return 1;
+}
+#endif /* RV_MON_TYPE */
+
+#endif
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 183d9fd50d2d..be0ffe1e3395 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -959,7 +959,8 @@ void fc_fcp_destroy(struct fc_lport *);
/*
* SCSI INTERACTION LAYER
*****************************/
-int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+enum scsi_qc_status fc_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *cmnd);
int fc_eh_abort(struct scsi_cmnd *);
int fc_eh_device_reset(struct scsi_cmnd *);
int fc_eh_host_reset(struct scsi_cmnd *);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 7282555adfd5..3d765c77bcd9 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -392,7 +392,8 @@ extern int iscsi_eh_abort(struct scsi_cmnd *sc);
extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
-extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc);
+extern enum scsi_qc_status iscsi_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc);
extern enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc);
/*
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index a0635b128d7a..e76f5744941b 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -689,7 +689,8 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
int sas_phy_enable(struct sas_phy *phy, int enable);
-extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+extern enum scsi_qc_status sas_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *cmd);
extern int sas_target_alloc(struct scsi_target *);
int sas_sdev_configure(struct scsi_device *dev, struct queue_limits *lim);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 96b350366670..08ac3200b4a4 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -106,12 +106,15 @@ enum scsi_disposition {
};
/*
- * Midlevel queue return values.
+ * Status values returned by the .queuecommand() callback if a command has not
+ * been queued.
*/
-#define SCSI_MLQUEUE_HOST_BUSY 0x1055
-#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
-#define SCSI_MLQUEUE_EH_RETRY 0x1057
-#define SCSI_MLQUEUE_TARGET_BUSY 0x1058
+enum scsi_qc_status {
+ SCSI_MLQUEUE_HOST_BUSY = 0x1055,
+ SCSI_MLQUEUE_DEVICE_BUSY = 0x1056,
+ SCSI_MLQUEUE_EH_RETRY = 0x1057,
+ SCSI_MLQUEUE_TARGET_BUSY = 0x1058,
+};
/*
* Use these to separate status msg and our bytes
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index c0e89996bdb3..249cea724abd 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -12,6 +12,9 @@ struct request;
struct scsi_driver {
struct device_driver gendrv;
+ int (*probe)(struct scsi_device *);
+ void (*remove)(struct scsi_device *);
+ void (*shutdown)(struct scsi_device *);
int (*resume)(struct device *);
void (*rescan)(struct device *);
blk_status_t (*init_command)(struct scsi_cmnd *);
@@ -25,9 +28,9 @@ struct scsi_driver {
#define scsi_register_driver(drv) \
__scsi_register_driver(drv, THIS_MODULE)
-int __scsi_register_driver(struct device_driver *, struct module *);
+int __scsi_register_driver(struct scsi_driver *, struct module *);
#define scsi_unregister_driver(drv) \
- driver_unregister(drv);
+ driver_unregister(&(drv)->gendrv);
extern int scsi_register_interface(struct class_interface *);
#define scsi_unregister_interface(intf) \
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index e87cf7eadd26..f6e12565a81d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -84,13 +84,15 @@ struct scsi_host_template {
*
* STATUS: REQUIRED
*/
- int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
+ enum scsi_qc_status (*queuecommand)(struct Scsi_Host *,
+ struct scsi_cmnd *);
/*
* Queue a reserved command (BLK_MQ_REQ_RESERVED). The .queuecommand()
* documentation also applies to the .queue_reserved_command() callback.
*/
- int (*queue_reserved_command)(struct Scsi_Host *, struct scsi_cmnd *);
+ enum scsi_qc_status (*queue_reserved_command)(struct Scsi_Host *,
+ struct scsi_cmnd *);
/*
* The commit_rqs function is used to trigger a hardware
@@ -525,10 +527,12 @@ struct scsi_host_template {
*
*/
#define DEF_SCSI_QCMD(func_name) \
- int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \
+ enum scsi_qc_status func_name(struct Scsi_Host *shost, \
+ struct scsi_cmnd *cmd) \
{ \
unsigned long irq_flags; \
- int rc; \
+ enum scsi_qc_status rc; \
+ \
spin_lock_irqsave(shost->host_lock, irq_flags); \
rc = func_name##_lck(cmd); \
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index b908aacfef48..9f30625aa0d3 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -317,6 +317,15 @@ struct fc_fpin_stats {
u64 cn_device_specific;
};
+#define FC_RPORT_ENCRYPTION_STATUS_MAX_LEN 14
+/*
+ * Encryption Information
+ */
+struct fc_encryption_info {
+ /* Encryption Status */
+ u8 status;
+};
+
/* Macro for use in defining Remote Port attributes */
#define FC_RPORT_ATTR(_name,_mode,_show,_store) \
struct device_attribute dev_attr_rport_##_name = \
@@ -364,6 +373,7 @@ struct fc_rport { /* aka fc_starget_attrs */
u64 port_name;
u32 port_id;
u32 roles;
+ struct fc_encryption_info enc_info;
enum fc_port_state port_state; /* Will only be ONLINE or UNKNOWN */
u32 scsi_target_id;
u32 fast_io_fail_tmo;
@@ -691,6 +701,8 @@ struct fc_function_template {
struct fc_host_statistics * (*get_fc_host_stats)(struct Scsi_Host *);
void (*reset_fc_host_stats)(struct Scsi_Host *);
+ struct fc_encryption_info * (*get_fc_rport_enc_info)(struct fc_rport *);
+
int (*issue_fc_host_lip)(struct Scsi_Host *);
void (*dev_loss_tmo_callbk)(struct fc_rport *);
diff --git a/include/soc/spacemit/ccu.h b/include/soc/spacemit/ccu.h
new file mode 100644
index 000000000000..84dcdecccc05
--- /dev/null
+++ b/include/soc/spacemit/ccu.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_SPACEMIT_CCU_H__
+#define __SOC_SPACEMIT_CCU_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/regmap.h>
+
+/* Auxiliary device used to represent a CCU reset controller */
+struct spacemit_ccu_adev {
+ struct auxiliary_device adev;
+ struct regmap *regmap;
+};
+
+static inline struct spacemit_ccu_adev *
+to_spacemit_ccu_adev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct spacemit_ccu_adev, adev);
+}
+
+#endif /* __SOC_SPACEMIT_CCU_H__ */
diff --git a/include/soc/spacemit/k1-syscon.h b/include/soc/spacemit/k1-syscon.h
index 354751562c55..0be7a2e8d445 100644
--- a/include/soc/spacemit/k1-syscon.h
+++ b/include/soc/spacemit/k1-syscon.h
@@ -5,17 +5,7 @@
#ifndef __SOC_K1_SYSCON_H__
#define __SOC_K1_SYSCON_H__
-/* Auxiliary device used to represent a CCU reset controller */
-struct spacemit_ccu_adev {
- struct auxiliary_device adev;
- struct regmap *regmap;
-};
-
-static inline struct spacemit_ccu_adev *
-to_spacemit_ccu_adev(struct auxiliary_device *adev)
-{
- return container_of(adev, struct spacemit_ccu_adev, adev);
-}
+#include "ccu.h"
/* APBS register offset */
#define APBS_PLL1_SWCR1 0x100
diff --git a/include/soc/spacemit/k3-syscon.h b/include/soc/spacemit/k3-syscon.h
new file mode 100644
index 000000000000..0299bea065a0
--- /dev/null
+++ b/include/soc/spacemit/k3-syscon.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* SpacemiT clock and reset driver definitions for the K3 SoC */
+
+#ifndef __SOC_K3_SYSCON_H__
+#define __SOC_K3_SYSCON_H__
+
+#include "ccu.h"
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+#define APBS_PLL4_SWCR1 0x130
+#define APBS_PLL4_SWCR2 0x134
+#define APBS_PLL4_SWCR3 0x138
+#define APBS_PLL5_SWCR1 0x13c
+#define APBS_PLL5_SWCR2 0x140
+#define APBS_PLL5_SWCR3 0x144
+#define APBS_PLL6_SWCR1 0x148
+#define APBS_PLL6_SWCR2 0x14c
+#define APBS_PLL6_SWCR3 0x150
+#define APBS_PLL7_SWCR1 0x158
+#define APBS_PLL7_SWCR2 0x15c
+#define APBS_PLL7_SWCR3 0x160
+#define APBS_PLL8_SWCR1 0x180
+#define APBS_PLL8_SWCR2 0x184
+#define APBS_PLL8_SWCR3 0x188
+
+/* MPMU register offset */
+#define MPMU_FCCR 0x0008
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(24)
+#define POSR_PLL2_LOCK BIT(25)
+#define POSR_PLL3_LOCK BIT(26)
+#define POSR_PLL4_LOCK BIT(27)
+#define POSR_PLL5_LOCK BIT(28)
+#define POSR_PLL6_LOCK BIT(29)
+#define POSR_PLL7_LOCK BIT(30)
+#define POSR_PLL8_LOCK BIT(31)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+#define MPMU_I2S0_SYSCLK 0x1100
+#define MPMU_I2S2_SYSCLK 0x1104
+#define MPMU_I2S3_SYSCLK 0x1108
+#define MPMU_I2S4_SYSCLK 0x110c
+#define MPMU_I2S5_SYSCLK 0x1110
+#define MPMU_I2S_SYSCLK_CTRL 0x1114
+
+/* APBC register offset */
+#define APBC_UART0_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS0_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS1_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR0_CLK_RST 0x5c
+#define APBC_IR1_CLK_RST 0x1c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_SSPA2_CLK_RST 0x88
+#define APBC_SSPA3_CLK_RST 0x8c
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_CAN1_CLK_RST 0xa4
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+#define APBC_TIMERS2_CLK_RST 0x11c
+#define APBC_TIMERS3_CLK_RST 0x120
+#define APBC_TIMERS4_CLK_RST 0x124
+#define APBC_TIMERS5_CLK_RST 0x128
+#define APBC_TIMERS6_CLK_RST 0x12c
+#define APBC_TIMERS7_CLK_RST 0x130
+
+#define APBC_CAN2_CLK_RST 0x148
+#define APBC_CAN3_CLK_RST 0x14c
+#define APBC_CAN4_CLK_RST 0x150
+#define APBC_UART10_CLK_RST 0x154
+#define APBC_SSP0_CLK_RST 0x158
+#define APBC_SSP1_CLK_RST 0x15c
+#define APBC_SSPA4_CLK_RST 0x160
+#define APBC_SSPA5_CLK_RST 0x164
+
+/* APMU register offset */
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_PMU_CLK_GATE_CTRL 0x040
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_MCB_CLK_RES_CTRL 0x06c
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_DTC_CLK_RES_CTRL 0x0ac
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_UCIE_CTRL 0x11c
+#define APMU_RCPU_CLK_RES_CTRL 0x14c
+#define APMU_TOP_DCLK_CTRL 0x158
+#define APMU_LCD_EDP_CTRL 0x23c
+#define APMU_UFS_CLK_RES_CTRL 0x268
+#define APMU_LCD_CLK_RES_CTRL3 0x26c
+#define APMU_LCD_CLK_RES_CTRL4 0x270
+#define APMU_LCD_CLK_RES_CTRL5 0x274
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_CPU_C2_CLK_CTRL 0x394
+#define APMU_CPU_C3_CLK_CTRL 0x208
+#define APMU_PCIE_CLK_RES_CTRL_A 0x1f0
+#define APMU_PCIE_CLK_RES_CTRL_B 0x1c8
+#define APMU_PCIE_CLK_RES_CTRL_C 0x1d0
+#define APMU_PCIE_CLK_RES_CTRL_D 0x1e0
+#define APMU_PCIE_CLK_RES_CTRL_E 0x1e8
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+#define APMU_EMAC2_CLK_RES_CTRL 0x248
+#define APMU_ESPI_CLK_RES_CTRL 0x240
+#define APMU_SNR_ISIM_VCLK_CTRL 0x3f8
+
+/* DCIU register offsets */
+#define DCIU_DMASYS_CLK_EN 0x234
+#define DCIU_DMASYS_SDMA_CLK_EN 0x238
+#define DCIU_C2_TCM_PIPE_CLK 0x244
+#define DCIU_C3_TCM_PIPE_CLK 0x248
+
+#define DCIU_DMASYS_S0_RSTN 0x204
+#define DCIU_DMASYS_S1_RSTN 0x208
+#define DCIU_DMASYS_A0_RSTN 0x20C
+#define DCIU_DMASYS_A1_RSTN 0x210
+#define DCIU_DMASYS_A2_RSTN 0x214
+#define DCIU_DMASYS_A3_RSTN 0x218
+#define DCIU_DMASYS_A4_RSTN 0x21C
+#define DCIU_DMASYS_A5_RSTN 0x220
+#define DCIU_DMASYS_A6_RSTN 0x224
+#define DCIU_DMASYS_A7_RSTN 0x228
+#define DCIU_DMASYS_RSTN 0x22C
+#define DCIU_DMASYS_SDMA_RSTN 0x230
+
+/* RCPU SYSCTRL register offsets */
+#define RCPU_CAN_CLK_RST 0x4c
+#define RCPU_CAN1_CLK_RST 0xF0
+#define RCPU_CAN2_CLK_RST 0xF4
+#define RCPU_CAN3_CLK_RST 0xF8
+#define RCPU_CAN4_CLK_RST 0xFC
+#define RCPU_IRC_CLK_RST 0x48
+#define RCPU_IRC1_CLK_RST 0xEC
+#define RCPU_GMAC_CLK_RST 0xE4
+#define RCPU_ESPI_CLK_RST 0xDC
+#define RCPU_AUDIO_I2S0_SYS_CLK_CTRL 0x70
+#define RCPU_AUDIO_I2S1_SYS_CLK_CTRL 0x44
+
+/* RCPU UARTCTRL register offsets */
+#define RCPU1_UART0_CLK_RST 0x00
+#define RCPU1_UART1_CLK_RST 0x04
+#define RCPU1_UART2_CLK_RST 0x08
+#define RCPU1_UART3_CLK_RST 0x0c
+#define RCPU1_UART4_CLK_RST 0x10
+#define RCPU1_UART5_CLK_RST 0x14
+
+/* RCPU I2SCTRL register offsets */
+#define RCPU2_AUDIO_I2S0_TX_RX_CLK_CTRL 0x60
+#define RCPU2_AUDIO_I2S1_TX_RX_CLK_CTRL 0x64
+#define RCPU2_AUDIO_I2S2_TX_RX_CLK_CTRL 0x68
+#define RCPU2_AUDIO_I2S3_TX_RX_CLK_CTRL 0x6C
+
+#define RCPU2_AUDIO_I2S2_SYS_CLK_CTRL 0x44
+#define RCPU2_AUDIO_I2S3_SYS_CLK_CTRL 0x54
+
+/* RCPU SPICTRL register offsets */
+#define RCPU3_SSP0_CLK_RST 0x00
+#define RCPU3_SSP1_CLK_RST 0x04
+#define RCPU3_PWR_SSP_CLK_RST 0x08
+
+/* RCPU I2CCTRL register offsets */
+#define RCPU4_I2C0_CLK_RST 0x00
+#define RCPU4_I2C1_CLK_RST 0x04
+#define RCPU4_PWR_I2C_CLK_RST 0x08
+
+/* RPMU register offsets */
+#define RCPU5_AON_PER_CLK_RST_CTRL 0x2C
+#define RCPU5_TIMER1_CLK_RST 0x4C
+#define RCPU5_TIMER2_CLK_RST 0x70
+#define RCPU5_TIMER3_CLK_RST 0x78
+#define RCPU5_TIMER4_CLK_RST 0x7C
+#define RCPU5_GPIO_AND_EDGE_CLK_RST 0x74
+#define RCPU5_RCPU_BUS_CLK_CTRL 0xC0
+#define RCPU5_RT24_CORE0_CLK_CTRL 0xC4
+#define RCPU5_RT24_CORE1_CLK_CTRL 0xC8
+#define RCPU5_RT24_CORE0_SW_RESET 0xCC
+#define RCPU5_RT24_CORE1_SW_RESET 0xD0
+
+/* RCPU PWMCTRL register offsets */
+#define RCPU6_PWM0_CLK_RST 0x00
+#define RCPU6_PWM1_CLK_RST 0x04
+#define RCPU6_PWM2_CLK_RST 0x08
+#define RCPU6_PWM3_CLK_RST 0x0c
+#define RCPU6_PWM4_CLK_RST 0x10
+#define RCPU6_PWM5_CLK_RST 0x14
+#define RCPU6_PWM6_CLK_RST 0x18
+#define RCPU6_PWM7_CLK_RST 0x1c
+#define RCPU6_PWM8_CLK_RST 0x20
+#define RCPU6_PWM9_CLK_RST 0x24
+
+/* APBC2 SEC register offsets */
+#define APBC2_UART1_CLK_RST 0x00
+#define APBC2_SSP2_CLK_RST 0x04
+#define APBC2_TWSI3_CLK_RST 0x08
+#define APBC2_RTC_CLK_RST 0x0c
+#define APBC2_TIMERS_CLK_RST 0x10
+#define APBC2_GPIO_CLK_RST 0x1c
+
+#endif /* __SOC_K3_SYSCON_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index c545875d0ff1..1fd21be02577 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -16,6 +16,7 @@
struct clk;
struct reset_control;
+struct tegra_pmc;
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
@@ -149,11 +150,24 @@ enum tegra_io_pad {
};
#ifdef CONFIG_SOC_TEGRA_PMC
+struct tegra_pmc *devm_tegra_pmc_get(struct device *dev);
+
+int tegra_pmc_powergate_power_on(struct tegra_pmc *pmc, unsigned int id);
+int tegra_pmc_powergate_power_off(struct tegra_pmc *pmc, unsigned int id);
+int tegra_pmc_powergate_remove_clamping(struct tegra_pmc *pmc, unsigned int id);
+
+/* Must be called with clk disabled, and returns with clk enabled */
+int tegra_pmc_powergate_sequence_power_up(struct tegra_pmc *pmc,
+ unsigned int id, struct clk *clk,
+ struct reset_control *rst);
+int tegra_pmc_io_pad_power_enable(struct tegra_pmc *pmc, enum tegra_io_pad id);
+int tegra_pmc_io_pad_power_disable(struct tegra_pmc *pmc, enum tegra_io_pad id);
+
+/* legacy */
int tegra_powergate_power_on(unsigned int id);
int tegra_powergate_power_off(unsigned int id);
int tegra_powergate_remove_clamping(unsigned int id);
-/* Must be called with clk disabled, and returns with clk enabled */
int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst);
@@ -166,6 +180,50 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
bool tegra_pmc_core_domain_state_synced(void);
#else
+static inline struct tegra_pmc *devm_tegra_pmc_get(struct device *dev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int
+tegra_pmc_powergate_power_on(struct tegra_pmc *pmc, unsigned int id)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_powergate_power_off(struct tegra_pmc *pmc, unsigned int id)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_powergate_remove_clamping(struct tegra_pmc *pmc, unsigned int id)
+{
+ return -ENOSYS;
+}
+
+/* Must be called with clk disabled, and returns with clk enabled */
+static inline int
+tegra_pmc_powergate_sequence_power_up(struct tegra_pmc *pmc, unsigned int id,
+ struct clk *clk,
+ struct reset_control *rst)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_io_pad_power_enable(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
+static inline int
+tegra_pmc_io_pad_power_disable(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ return -ENOSYS;
+}
+
static inline int tegra_powergate_power_on(unsigned int id)
{
return -ENOSYS;
diff --git a/include/sound/ak4641.h b/include/sound/ak4641.h
deleted file mode 100644
index 8b1941bbde52..000000000000
--- a/include/sound/ak4641.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AK4641 ALSA SoC Codec driver
- *
- * Copyright 2009 Philipp Zabel
- */
-
-#ifndef __AK4641_H
-#define __AK4641_H
-
-/**
- * struct ak4641_platform_data - platform specific AK4641 configuration
- * @gpio_power: GPIO to control external power to AK4641
- * @gpio_npdn: GPIO connected to AK4641 nPDN pin
- *
- * Both GPIO parameters are optional.
- */
-struct ak4641_platform_data {
- int gpio_power;
- int gpio_npdn;
-};
-
-#endif /* __AK4641_H */
diff --git a/include/sound/cs-amp-lib.h b/include/sound/cs-amp-lib.h
index 61e00017c9aa..e9aa86d76049 100644
--- a/include/sound/cs-amp-lib.h
+++ b/include/sound/cs-amp-lib.h
@@ -58,6 +58,9 @@ int cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid, int amp_
int cs_amp_set_efi_calibration_data(struct device *dev, int amp_index, int num_amps,
const struct cirrus_amp_cal_data *in_data);
int cs_amp_get_vendor_spkid(struct device *dev);
+const char *cs_amp_devm_get_vendor_specific_variant_id(struct device *dev,
+ int ssid_vendor,
+ int ssid_device);
struct dentry *cs_amp_create_debugfs(struct device *dev);
static inline u64 cs_amp_cal_target_u64(const struct cirrus_amp_cal_data *data)
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 5928af539c46..ae1e1489b671 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -9,6 +9,7 @@
#ifndef __CS35L56_H
#define __CS35L56_H
+#include <linux/bits.h>
#include <linux/debugfs.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/regulator/consumer.h>
@@ -26,6 +27,9 @@ struct snd_ctl_elem_value;
#define CS35L56_GLOBAL_ENABLES 0x0002014
#define CS35L56_BLOCK_ENABLES 0x0002018
#define CS35L56_BLOCK_ENABLES2 0x000201C
+#define CS35L56_SYNC_GPIO1_CFG 0x0002410
+#define CS35L56_ASP2_DIO_GPIO13_CFG 0x0002440
+#define CS35L56_UPDATE_REGS 0x0002A0C
#define CS35L56_REFCLK_INPUT 0x0002C04
#define CS35L56_GLOBAL_SAMPLE_RATE 0x0002C0C
#define CS35L56_OTP_MEM_53 0x00300D4
@@ -65,6 +69,9 @@ struct snd_ctl_elem_value;
#define CS35L56_IRQ1_MASK_8 0x000E0AC
#define CS35L56_IRQ1_MASK_18 0x000E0D4
#define CS35L56_IRQ1_MASK_20 0x000E0DC
+#define CS35L56_GPIO_STATUS1 0x000F000
+#define CS35L56_GPIO1_CTRL1 0x000F008
+#define CS35L56_GPIO13_CTRL1 0x000F038
#define CS35L56_MIXER_NGATE_CH1_CFG 0x0010004
#define CS35L56_MIXER_NGATE_CH2_CFG 0x0010008
#define CS35L56_DSP_MBOX_1_RAW 0x0011000
@@ -130,6 +137,17 @@ struct snd_ctl_elem_value;
#define CS35L56_MTLREVID_MASK 0x0000000F
#define CS35L56_REVID_B0 0x000000B0
+/* PAD_INTF */
+#define CS35L56_PAD_GPIO_PULL_MASK GENMASK(3, 2)
+#define CS35L56_PAD_GPIO_IE BIT(0)
+
+#define CS35L56_PAD_PULL_NONE 0
+#define CS35L56_PAD_PULL_UP 1
+#define CS35L56_PAD_PULL_DOWN 2
+
+/* UPDATE_REGS */
+#define CS35L56_UPDT_GPIO_PRES BIT(6)
+
/* ASP_ENABLES1 */
#define CS35L56_ASP_RX2_EN_SHIFT 17
#define CS35L56_ASP_RX1_EN_SHIFT 16
@@ -185,6 +203,12 @@ struct snd_ctl_elem_value;
/* MIXER_NGATE_CHn_CFG */
#define CS35L56_AUX_NGATE_CHn_EN 0x00000001
+/* GPIOn_CTRL1 */
+#define CS35L56_GPIO_DIR_MASK BIT(31)
+#define CS35L56_GPIO_FN_MASK GENMASK(2, 0)
+
+#define CS35L56_GPIO_FN_GPIO 0x00000001
+
/* Mixer input sources */
#define CS35L56_INPUT_SRC_NONE 0x00
#define CS35L56_INPUT_SRC_ASP1RX1 0x08
@@ -279,6 +303,7 @@ struct snd_ctl_elem_value;
#define CS35L56_HALO_STATE_TIMEOUT_US 250000
#define CS35L56_RESET_PULSE_MIN_US 1100
#define CS35L56_WAKE_HOLD_TIME_US 1000
+#define CS35L56_PAD_PULL_SETTLE_US 10
#define CS35L56_CALIBRATION_POLL_US (100 * USEC_PER_MSEC)
#define CS35L56_CALIBRATION_TIMEOUT_US (5 * USEC_PER_SEC)
@@ -289,6 +314,9 @@ struct snd_ctl_elem_value;
#define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5
+#define CS35L56_MAX_GPIO 13
+#define CS35L63_MAX_GPIO 9
+
/* Additional margin for SYSTEM_RESET to control port ready on SPI */
#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
@@ -338,6 +366,10 @@ struct cs35l56_base {
const struct cirrus_amp_cal_controls *calibration_controls;
struct dentry *debugfs;
u64 silicon_uid;
+ u8 onchip_spkid_gpios[5];
+ u8 num_onchip_spkid_gpios;
+ u8 onchip_spkid_pulls[5];
+ u8 num_onchip_spkid_pulls;
};
static inline bool cs35l56_is_otp_register(unsigned int reg)
@@ -413,6 +445,11 @@ void cs35l56_warn_if_firmware_missing(struct cs35l56_base *cs35l56_base);
void cs35l56_log_tuning(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
+int cs35l56_check_and_save_onchip_spkid_gpios(struct cs35l56_base *cs35l56_base,
+ const u32 *gpios, int num_gpios,
+ const u32 *pulls, int num_pulls);
+int cs35l56_configure_onchip_spkid_pads(struct cs35l56_base *cs35l56_base);
+int cs35l56_read_onchip_spkid(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h
index 6e9391b3816c..79bd5a7a0f88 100644
--- a/include/sound/sdca_function.h
+++ b/include/sound/sdca_function.h
@@ -798,6 +798,7 @@ struct sdca_control_range {
* @sel: Identifier used for addressing.
* @nbits: Number of bits used in the Control.
* @values: Holds the Control value for constants and defaults.
+ * @reset: Defined reset value for the Control.
* @cn_list: A bitmask showing the valid Control Numbers within this Control,
* Control Numbers typically represent channels.
* @interrupt_position: SCDA interrupt line that will alert to changes on this
@@ -808,6 +809,7 @@ struct sdca_control_range {
* @layers: Bitmask of access layers of the Control.
* @deferrable: Indicates if the access to the Control can be deferred.
* @has_default: Indicates the Control has a default value to be written.
+ * @has_reset: Indicates the Control has a defined reset value.
* @has_fixed: Indicates the Control only supports a single value.
*/
struct sdca_control {
@@ -816,6 +818,7 @@ struct sdca_control {
int nbits;
int *values;
+ int reset;
u64 cn_list;
int interrupt_position;
@@ -827,6 +830,7 @@ struct sdca_control {
bool deferrable;
bool is_volatile;
bool has_default;
+ bool has_reset;
bool has_fixed;
};
diff --git a/include/sound/sdca_interrupts.h b/include/sound/sdca_interrupts.h
index 8f13417d129a..9bcb5d8fd592 100644
--- a/include/sound/sdca_interrupts.h
+++ b/include/sound/sdca_interrupts.h
@@ -84,4 +84,11 @@ int sdca_irq_populate(struct sdca_function_data *function,
struct sdca_interrupt_info *sdca_irq_allocate(struct device *dev,
struct regmap *regmap, int irq);
+void sdca_irq_enable_early(struct sdca_function_data *function,
+ struct sdca_interrupt_info *info);
+void sdca_irq_enable(struct sdca_function_data *function,
+ struct sdca_interrupt_info *info);
+void sdca_irq_disable(struct sdca_function_data *function,
+ struct sdca_interrupt_info *info);
+
#endif
diff --git a/include/sound/sdca_jack.h b/include/sound/sdca_jack.h
new file mode 100644
index 000000000000..3ec22046d3eb
--- /dev/null
+++ b/include/sound/sdca_jack.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_JACK_H__
+#define __SDCA_JACK_H__
+
+struct sdca_interrupt;
+struct snd_kcontrol;
+struct snd_soc_jack;
+
+/**
+ * struct jack_state - Jack state structure to keep data between interrupts
+ * @kctl: Pointer to the ALSA control attached to this jack
+ * @jack: Pointer to the ASoC jack struct for this jack
+ */
+struct jack_state {
+ struct snd_kcontrol *kctl;
+ struct snd_soc_jack *jack;
+};
+
+int sdca_jack_alloc_state(struct sdca_interrupt *interrupt);
+int sdca_jack_process(struct sdca_interrupt *interrupt);
+int sdca_jack_set_jack(struct sdca_interrupt_info *info, struct snd_soc_jack *jack);
+int sdca_jack_report(struct sdca_interrupt *interrupt);
+
+#endif // __SDCA_JACK_H__
diff --git a/include/sound/seq_device.h b/include/sound/seq_device.h
index dead74b022f4..a72380c202e9 100644
--- a/include/sound/seq_device.h
+++ b/include/sound/seq_device.h
@@ -43,6 +43,8 @@ struct snd_seq_device {
* Typically, call snd_device_free(dev->card, dev->driver_data)
*/
struct snd_seq_driver {
+ int (*probe)(struct snd_seq_device *dev);
+ void (*remove)(struct snd_seq_device *dev);
struct device_driver driver;
char *id;
int argsize;
diff --git a/include/sound/soc-acpi-intel-ssp-common.h b/include/sound/soc-acpi-intel-ssp-common.h
index b4597c8dac78..fdb2fce42115 100644
--- a/include/sound/soc-acpi-intel-ssp-common.h
+++ b/include/sound/soc-acpi-intel-ssp-common.h
@@ -37,6 +37,9 @@
#define RT5682_ACPI_HID "10EC5682"
#define RT5682S_ACPI_HID "RTL5682"
+/* Texas Instruments */
+#define TAS2563_ACPI_HID "TXNW2563"
+
enum snd_soc_acpi_intel_codec {
CODEC_NONE,
@@ -63,6 +66,7 @@ enum snd_soc_acpi_intel_codec {
CODEC_RT1015P,
CODEC_RT1019P,
CODEC_RT1308,
+ CODEC_TAS2563,
};
enum snd_soc_acpi_intel_codec
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index d78cda866888..2a2b74b24a60 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -237,8 +237,7 @@ struct snd_soc_component {
* the driver will be marked as BROKEN when these fields are removed.
*/
- /* Don't use these, use snd_soc_component_get_dapm() */
- struct snd_soc_dapm_context dapm;
+ struct snd_soc_dapm_context *dapm;
/* machine specific init */
int (*init)(struct snd_soc_component *component);
@@ -268,12 +267,9 @@ struct snd_soc_component {
static inline struct snd_soc_dapm_context *snd_soc_component_to_dapm(
struct snd_soc_component *component)
{
- return &component->dapm;
+ return component->dapm;
}
-// FIXME
-#define snd_soc_component_get_dapm snd_soc_component_to_dapm
-
/**
* snd_soc_component_cache_sync() - Sync the register cache with the hardware
* @component: COMPONENT to sync
@@ -368,27 +364,6 @@ snd_soc_component_active(struct snd_soc_component *component)
return component->active;
}
-/* component pin */
-int snd_soc_component_enable_pin(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_disable_pin(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_nc_pin(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_get_pin_status(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
- const char *pin);
-int snd_soc_component_force_enable_pin_unlocked(
- struct snd_soc_component *component,
- const char *pin);
-
/* component controls */
struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component,
const char * const ctl);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 75941324886b..49f0fe05db01 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -20,6 +20,7 @@ struct regulator;
struct soc_enum;
struct snd_pcm_substream;
struct snd_soc_pcm_runtime;
+struct snd_soc_dapm_context;
/* widget has no PM register bit */
#define SND_SOC_NOPM -1
@@ -579,28 +580,6 @@ struct snd_soc_dapm_update {
bool has_second_set;
};
-/* DAPM context */
-struct snd_soc_dapm_context {
- enum snd_soc_bias_level bias_level;
-
- bool idle_bias; /* Use BIAS_OFF instead of STANDBY when false */
-
- struct device *dev; /* from parent - for debug */ /* REMOVE ME */
- struct snd_soc_component *component; /* parent component */
- struct snd_soc_card *card; /* parent card */
-
- /* used during DAPM updates */
- enum snd_soc_bias_level target_bias_level;
- struct list_head list;
-
- struct snd_soc_dapm_widget *wcache_sink;
- struct snd_soc_dapm_widget *wcache_source;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_dapm;
-#endif
-};
-
/* A list of widgets associated with an object, typically a snd_kcontrol */
struct snd_soc_dapm_widget_list {
int num_widgets;
@@ -628,6 +607,8 @@ enum snd_soc_dapm_direction {
#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+struct snd_soc_dapm_context *snd_soc_dapm_alloc(struct device *dev);
+
int snd_soc_dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
int snd_soc_dapm_clock_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
int snd_soc_dapm_pinctrl_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
@@ -705,16 +686,6 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, co
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin);
void snd_soc_dapm_mark_endpoints_dirty(struct snd_soc_card *card);
-/*
- * Marks the specified pin as being not connected, disabling it along
- * any parent or child widgets. At present this is identical to
- * snd_soc_dapm_disable_pin[_unlocked]() but in future it will be extended to do
- * additional things such as disabling controls which only affect
- * paths through the pin.
- */
-#define snd_soc_dapm_nc_pin snd_soc_dapm_disable_pin
-#define snd_soc_dapm_nc_pin_unlocked snd_soc_dapm_disable_pin_unlocked
-
/* dapm path query */
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list,
@@ -730,15 +701,6 @@ int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_so
enum snd_soc_bias_level snd_soc_dapm_get_bias_level(struct snd_soc_dapm_context *dapm);
void snd_soc_dapm_init_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
-// REMOVE ME !!
-#define snd_soc_component_force_bias_level(c, l) snd_soc_dapm_force_bias_level(&(c)->dapm, l)
-#define snd_soc_component_get_bias_level(c) snd_soc_dapm_get_bias_level(&(c)->dapm)
-#define snd_soc_component_init_bias_level(c, l) snd_soc_dapm_init_bias_level(&(c)->dapm, l)
-#define snd_soc_dapm_kcontrol_widget snd_soc_dapm_kcontrol_to_widget
-#define snd_soc_dapm_kcontrol_dapm snd_soc_dapm_kcontrol_to_dapm
-#define dapm_kcontrol_get_value snd_soc_dapm_kcontrol_get_value
-#define snd_soc_dapm_kcontrol_component snd_soc_dapm_kcontrol_to_component
-
#define for_each_dapm_widgets(list, i, widget) \
for ((i) = 0; \
(i) < list->num_widgets && (widget = list->widgets[i]); \
diff --git a/include/sound/soc.h b/include/sound/soc.h
index aa0fe6b80293..7d8376c8e1be 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1076,7 +1076,7 @@ struct snd_soc_card {
struct list_head dobj_list;
/* Generic DAPM context for the card */
- struct snd_soc_dapm_context dapm;
+ struct snd_soc_dapm_context *dapm;
struct snd_soc_dapm_stats dapm_stats;
#ifdef CONFIG_DEBUG_FS
@@ -1136,7 +1136,7 @@ static inline int snd_soc_card_is_instantiated(struct snd_soc_card *card)
static inline struct snd_soc_dapm_context *snd_soc_card_to_dapm(struct snd_soc_card *card)
{
- return &card->dapm;
+ return card->dapm;
}
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
diff --git a/include/sound/sof.h b/include/sound/sof.h
index eddea82c7b5a..38d6c8cb5e83 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -159,6 +159,9 @@ struct sof_dev_desc {
/* The platform supports DSPless mode */
bool dspless_mode_supported;
+ /* On demand DSP booting is possible on the platform */
+ bool on_demand_dsp_boot;
+
/* defaults paths for firmware, library and topology files */
const char *default_fw_path[SOF_IPC_TYPE_COUNT];
const char *default_lib_path[SOF_IPC_TYPE_COUNT];
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index 15fac532688e..4554e5e8cab5 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -352,6 +352,10 @@ struct sof_ipc4_base_module_cfg {
#define SOF_IPC4_MOD_EXT_DOMAIN_MASK BIT(28)
#define SOF_IPC4_MOD_EXT_DOMAIN(x) ((x) << SOF_IPC4_MOD_EXT_DOMAIN_SHIFT)
+#define SOF_IPC4_MOD_EXT_EXTENDED_INIT_SHIFT 29
+#define SOF_IPC4_MOD_EXT_EXTENDED_INIT_MASK BIT(29)
+#define SOF_IPC4_MOD_EXT_EXTENDED_INIT(x) ((x) << SOF_IPC4_MOD_EXT_EXTENDED_SHIFT)
+
/* bind/unbind module ipc msg */
#define SOF_IPC4_MOD_EXT_DST_MOD_ID_SHIFT 0
#define SOF_IPC4_MOD_EXT_DST_MOD_ID_MASK GENMASK(15, 0)
@@ -586,6 +590,77 @@ struct sof_ipc4_notify_module_data {
#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_VAL 0xA15A0000
#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_PARAMID_MASK GENMASK(15, 0)
+/*
+ * Macros for creating struct sof_ipc4_module_init_ext_init payload
+ * with its associated data. ext_init payload should be the first
+ * piece of payload following SOF_IPC4_MOD_INIT_INSTANCE msg, and its
+ * existence is indicated with SOF_IPC4_MOD_EXT_EXTENDED-bit.
+ *
+ * The macros below apply to sof_ipc4_module_init_ext_init.word0
+ */
+#define SOF_IPC4_MOD_INIT_EXT_RTOS_DOMAIN_SHIFT 0
+#define SOF_IPC4_MOD_INIT_EXT_RTOS_DOMAIN_MASK BIT(0)
+#define SOF_IPC4_MOD_INIT_EXT_RTOS_DOMAIN(x) ((x) << SOF_IPC4_MOD_INIT_EXT_RTOS_DOMAIN_SHIFT)
+
+#define SOF_IPC4_MOD_INIT_EXT_GNA_USED_SHIFT 1
+#define SOF_IPC4_MOD_INIT_EXT_GNA_USED_MASK BIT(1)
+#define SOF_IPC4_MOD_INIT_EXT_GNA_USED(x) ((x) << SOF_IPC4_MOD_INIT_EXT_GNA_USED_SHIFT)
+
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_ARRAY_SHIFT 2
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_ARRAY_MASK BIT(2)
+#define SOF_IPC4_MOD_INIT_EXT_DATA_ARRAY(x) ((x) << SOF_IPC4_MOD_INIT_EXT_OBJ_ARRAY_SHIFT)
+
+struct sof_ipc4_module_init_ext_init {
+ u32 word0;
+ u32 rsvd1;
+ u32 rsvd2;
+} __packed __aligned(4);
+
+/*
+ * SOF_IPC4_MOD_EXT_EXTENDED payload may be followed by arbitrary
+ * number of object array objects. SOF_IPC4_MOD_INIT_EXT_DATA_ARRAY
+ * -bit indicates that an array object follows struct
+ * sof_ipc4_module_init_ext_init.
+ *
+ * The object header's SOF_IPC4_MOD_INIT_EXT_OBJ_LAST-bit in struct
+ * sof_ipc4_module_init_ext_object indicates if the array is continued
+ * with another object. The header has also fields to identify the
+ * object, SOF_IPC4_MOD_INIT_EXT_OBJ_ID, and to indicate the object's
+ * size in 32-bit words, SOF_IPC4_MOD_INIT_EXT_OBJ_WORDS, not
+ * including the header itself.
+ *
+ * The macros below apply to sof_ipc4_module_init_ext_object.header
+ */
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_LAST_SHIFT 0
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_LAST_MASK BIT(0)
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_LAST(x) ((x) << SOF_IPC4_MOD_INIT_EXT_OBJ_LAST_SHIFT)
+
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_ID_SHIFT 1
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_ID_MASK GENMASK(15, 1)
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_ID(x) ((x) << SOF_IPC4_MOD_INIT_EXT_OBJ_ID_SHIFT)
+
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_WORDS_SHIFT 16
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_WORDS_MASK GENMASK(31, 16)
+#define SOF_IPC4_MOD_INIT_EXT_OBJ_WORDS(x) ((x) << SOF_IPC4_MOD_INIT_EXT_OBJ_WORDS_SHIFT)
+
+struct sof_ipc4_module_init_ext_object {
+ u32 header;
+ u32 data[];
+} __packed __aligned(4);
+
+enum sof_ipc4_mod_init_ext_obj_id {
+ SOF_IPC4_MOD_INIT_DATA_ID_INVALID = 0,
+ SOF_IPC4_MOD_INIT_DATA_ID_DP_DATA,
+ SOF_IPC4_MOD_INIT_DATA_ID_MAX = SOF_IPC4_MOD_INIT_DATA_ID_DP_DATA,
+};
+
+/* DP module memory configuration data object for ext_init object array */
+struct sof_ipc4_mod_init_ext_dp_memory_data {
+ u32 domain_id; /* userspace domain ID */
+ u32 stack_bytes; /* stack size in bytes, 0 means default size */
+ u32 heap_bytes; /* stack size in bytes, 0 means default size */
+} __packed __aligned(4);
+
/** @}*/
#endif
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 9d3c54cb8223..7c03bdc951bb 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
//
-// Copyright (C) 2022 - 2025 Texas Instruments Incorporated
+// Copyright (C) 2022 - 2026 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2563/TAS2781 driver implements a flexible and configurable
@@ -233,7 +233,6 @@ struct tasdevice_priv {
bool playback_started;
bool isacpi;
bool isspi;
- bool is_user_space_calidata;
unsigned int global_addr;
int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7016d93fa383..b62d5fcce950 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -108,6 +108,9 @@
#define SE_MODE_PAGE_BUF 512
#define SE_SENSE_BUF 96
+/* Peripheral Device Text Identification Information */
+#define PD_TEXT_ID_INFO_LEN 256
+
enum target_submit_type {
/* Use the fabric driver's default submission type */
TARGET_FABRIC_DEFAULT_SUBMIT,
@@ -348,6 +351,7 @@ struct t10_wwn {
struct se_device *t10_dev;
struct config_group t10_wwn_group;
struct list_head t10_vpd_list;
+ char pd_text_id_info[PD_TEXT_ID_INFO_LEN];
};
struct t10_pr_registration {
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index ba9229af9a34..b736da06340a 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -16,7 +16,7 @@ DECLARE_EVENT_CLASS(cgroup_root,
TP_STRUCT__entry(
__field( int, root )
- __field( u16, ss_mask )
+ __field( u32, ss_mask )
__string( name, root->name )
),
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 852d725afea2..24fc402ab3c8 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,6 +9,47 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(damos_stat_after_apply_interval,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ struct damos_stat *stat),
+
+ TP_ARGS(context_idx, scheme_idx, stat),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, nr_tried)
+ __field(unsigned long, sz_tried)
+ __field(unsigned long, nr_applied)
+ __field(unsigned long, sz_applied)
+ __field(unsigned long, sz_ops_filter_passed)
+ __field(unsigned long, qt_exceeds)
+ __field(unsigned long, nr_snapshots)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->nr_tried = stat->nr_tried;
+ __entry->sz_tried = stat->sz_tried;
+ __entry->nr_applied = stat->nr_applied;
+ __entry->sz_applied = stat->sz_applied;
+ __entry->sz_ops_filter_passed = stat->sz_ops_filter_passed;
+ __entry->qt_exceeds = stat->qt_exceeds;
+ __entry->nr_snapshots = stat->nr_snapshots;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u nr_tried=%lu sz_tried=%lu "
+ "nr_applied=%lu sz_tried=%lu sz_ops_filter_passed=%lu "
+ "qt_exceeds=%lu nr_snapshots=%lu",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->nr_tried, __entry->sz_tried,
+ __entry->nr_applied, __entry->sz_applied,
+ __entry->sz_ops_filter_passed, __entry->qt_exceeds,
+ __entry->nr_snapshots)
+);
+
TRACE_EVENT(damos_esz,
TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
diff --git a/include/trace/events/dma_buf.h b/include/trace/events/dma_buf.h
new file mode 100644
index 000000000000..3bb88d05bcc8
--- /dev/null
+++ b/include/trace/events/dma_buf.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma_buf
+
+#if !defined(_TRACE_DMA_BUF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_BUF_H
+
+#include <linux/dma-buf.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(dma_buf,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf),
+
+ TP_STRUCT__entry(
+ __string( exp_name, dmabuf->exp_name)
+ __field( size_t, size)
+ __field( ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(dma_buf_attach_dev,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev),
+
+ TP_STRUCT__entry(
+ __string( dev_name, dev_name(dev))
+ __string( exp_name, dmabuf->exp_name)
+ __field( size_t, size)
+ __field( ino_t, ino)
+ __field( struct dma_buf_attachment *, attach)
+ __field( bool, is_dynamic)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ __entry->is_dynamic = is_dynamic;
+ __entry->attach = attach;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu attachment:%p is_dynamic=%d dev_name=%s",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino,
+ __entry->attach,
+ __entry->is_dynamic,
+ __get_str(dev_name))
+);
+
+DECLARE_EVENT_CLASS(dma_buf_fd,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd),
+
+ TP_STRUCT__entry(
+ __string( exp_name, dmabuf->exp_name)
+ __field( size_t, size)
+ __field( ino_t, ino)
+ __field( int, fd)
+ ),
+
+ TP_fast_assign(
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ __entry->fd = fd;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu fd=%d",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino,
+ __entry->fd)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_export,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_mmap_internal,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_mmap,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_put,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf_attach_dev, dma_buf_dynamic_attach,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev)
+);
+
+DEFINE_EVENT(dma_buf_attach_dev, dma_buf_detach,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev)
+);
+
+DEFINE_EVENT_CONDITION(dma_buf_fd, dma_buf_fd,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd),
+
+ TP_CONDITION(fd >= 0)
+);
+
+DEFINE_EVENT(dma_buf_fd, dma_buf_get,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd)
+);
+
+#endif /* _TRACE_DMA_BUF_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index dad7360f42f9..def20d06507b 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -82,9 +82,9 @@ TRACE_EVENT(erofs_fill_inode,
TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct folio *folio, bool raw),
+ TP_PROTO(struct inode *inode, struct folio *folio, bool raw),
- TP_ARGS(folio, raw),
+ TP_ARGS(inode, folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -96,9 +96,9 @@ TRACE_EVENT(erofs_read_folio,
),
TP_fast_assign(
- __entry->dev = folio->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(folio->mapping->host)->nid;
- __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = EROFS_I(inode)->nid;
+ __entry->dir = S_ISDIR(inode->i_mode);
__entry->index = folio->index;
__entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index fd76d14c2776..a3e8fe414df8 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -102,6 +102,9 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_MIGRATE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_VERITY);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_MOVE_EXT);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
#define show_fc_reason(reason) \
@@ -115,7 +118,10 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
{ EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \
{ EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \
{ EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \
- { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"})
+ { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"}, \
+ { EXT4_FC_REASON_MIGRATE, "MIGRATE"}, \
+ { EXT4_FC_REASON_VERITY, "VERITY"}, \
+ { EXT4_FC_REASON_MOVE_EXT, "MOVE_EXT"})
TRACE_DEFINE_ENUM(CR_POWER2_ALIGNED);
TRACE_DEFINE_ENUM(CR_GOAL_LEN_FAST);
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 4cde53b45a85..4e41bff31888 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -37,7 +37,8 @@
EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
EM( SCAN_STORE_FAILED, "store_failed") \
EM( SCAN_COPY_MC, "copy_poisoned_page") \
- EMe(SCAN_PAGE_FILLED, "page_filled")
+ EM( SCAN_PAGE_FILLED, "page_filled") \
+ EMe(SCAN_PAGE_DIRTY_OR_WRITEBACK, "page_dirty_or_writeback")
#undef EM
#undef EMe
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 085b749cdd97..269d949b2025 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -5,7 +5,13 @@
#if !defined(_TRACE_MPTCP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MPTCP_H
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
#include <linux/tracepoint.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/sock_diag.h>
+#include <net/rstreason.h>
#define show_mapping_status(status) \
__print_symbolic(status, \
@@ -178,6 +184,80 @@ TRACE_EVENT(subflow_check_data_avail,
__entry->skb)
);
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(mptcp_rcvbuf_grow,
+
+ TP_PROTO(struct sock *sk, int time),
+
+ TP_ARGS(sk, time),
+
+ TP_STRUCT__entry(
+ __field(int, time)
+ __field(__u32, rtt_us)
+ __field(__u32, copied)
+ __field(__u32, inq)
+ __field(__u32, space)
+ __field(__u32, ooo_space)
+ __field(__u32, rcvbuf)
+ __field(__u32, rcv_wnd)
+ __field(__u8, scaling_ratio)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ __field(const void *, skaddr)
+ ),
+
+ TP_fast_assign(
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ bool ofo_empty;
+ __be32 *p32;
+
+ __entry->time = time;
+ __entry->rtt_us = msk->rcvq_space.rtt_us >> 3;
+ __entry->copied = msk->rcvq_space.copied;
+ __entry->inq = mptcp_inq_hint(sk);
+ __entry->space = msk->rcvq_space.space;
+ ofo_empty = RB_EMPTY_ROOT(&msk->out_of_order_queue);
+ __entry->ooo_space = ofo_empty ? 0 :
+ MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq -
+ msk->ack_seq;
+
+ __entry->rcvbuf = sk->sk_rcvbuf;
+ __entry->rcv_wnd = atomic64_read(&msk->rcv_wnd_sent) -
+ msk->ack_seq;
+ __entry->scaling_ratio = msk->scaling_ratio;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ p32 = (__be32 *)__entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *)__entry->daddr;
+ *p32 = inet->inet_daddr;
+
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->skaddr = sk;
+ ),
+
+ TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u "
+ "rcvbuf=%u rcv_wnd=%u family=%d sport=%hu dport=%hu saddr=%pI4 "
+ "daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c skaddr=%p",
+ __entry->time, __entry->rtt_us, __entry->copied,
+ __entry->inq, __entry->space, __entry->ooo_space,
+ __entry->scaling_ratio, __entry->rcvbuf, __entry->rcv_wnd,
+ __entry->family, __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr, __entry->saddr_v6,
+ __entry->daddr_v6, __entry->skaddr)
+);
#endif /* _TRACE_MPTCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/pci.h b/include/trace/events/pci.h
new file mode 100644
index 000000000000..9a9122f62fd3
--- /dev/null
+++ b/include/trace/events/pci.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pci
+
+#if !defined(_TRACE_HW_EVENT_PCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HW_EVENT_PCI_H
+
+#include <uapi/linux/pci_regs.h>
+#include <linux/tracepoint.h>
+
+#define PCI_HOTPLUG_EVENT \
+ EM(PCI_HOTPLUG_LINK_UP, "LINK_UP") \
+ EM(PCI_HOTPLUG_LINK_DOWN, "LINK_DOWN") \
+ EM(PCI_HOTPLUG_CARD_PRESENT, "CARD_PRESENT") \
+ EMe(PCI_HOTPLUG_CARD_NOT_PRESENT, "CARD_NOT_PRESENT")
+
+/* Enums require being exported to userspace, for user tool parsing */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+PCI_HOTPLUG_EVENT
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+/*
+ * Note: For generic PCI hotplug events, we pass already-resolved strings
+ * (port_name, slot) instead of driver-specific structures like 'struct
+ * controller'. This is because different PCI hotplug drivers (pciehp, cpqphp,
+ * ibmphp, shpchp) define their own versions of 'struct controller' with
+ * different fields and helper functions. Using driver-specific structures would
+ * make the tracepoint interface non-generic and cause compatibility issues
+ * across different drivers.
+ */
+TRACE_EVENT(pci_hp_event,
+
+ TP_PROTO(const char *port_name,
+ const char *slot,
+ const int event),
+
+ TP_ARGS(port_name, slot, event),
+
+ TP_STRUCT__entry(
+ __string( port_name, port_name )
+ __string( slot, slot )
+ __field( int, event )
+ ),
+
+ TP_fast_assign(
+ __assign_str(port_name);
+ __assign_str(slot);
+ __entry->event = event;
+ ),
+
+ TP_printk("%s slot:%s, event:%s\n",
+ __get_str(port_name),
+ __get_str(slot),
+ __print_symbolic(__entry->event, PCI_HOTPLUG_EVENT)
+ )
+);
+
+#define PCI_EXP_LNKSTA_LINK_STATUS_MASK (PCI_EXP_LNKSTA_LBMS | \
+ PCI_EXP_LNKSTA_LABS | \
+ PCI_EXP_LNKSTA_LT | \
+ PCI_EXP_LNKSTA_DLLLA)
+
+#define LNKSTA_FLAGS \
+ { PCI_EXP_LNKSTA_LT, "LT"}, \
+ { PCI_EXP_LNKSTA_DLLLA, "DLLLA"}, \
+ { PCI_EXP_LNKSTA_LBMS, "LBMS"}, \
+ { PCI_EXP_LNKSTA_LABS, "LABS"}
+
+TRACE_EVENT(pcie_link_event,
+
+ TP_PROTO(struct pci_bus *bus,
+ unsigned int reason,
+ unsigned int width,
+ unsigned int status
+ ),
+
+ TP_ARGS(bus, reason, width, status),
+
+ TP_STRUCT__entry(
+ __string( port_name, pci_name(bus->self))
+ __field( unsigned int, type )
+ __field( unsigned int, reason )
+ __field( unsigned int, cur_bus_speed )
+ __field( unsigned int, max_bus_speed )
+ __field( unsigned int, width )
+ __field( unsigned int, flit_mode )
+ __field( unsigned int, link_status )
+ ),
+
+ TP_fast_assign(
+ __assign_str(port_name);
+ __entry->type = pci_pcie_type(bus->self);
+ __entry->reason = reason;
+ __entry->cur_bus_speed = bus->cur_bus_speed;
+ __entry->max_bus_speed = bus->max_bus_speed;
+ __entry->width = width;
+ __entry->flit_mode = bus->flit_mode;
+ __entry->link_status = status;
+ ),
+
+ TP_printk("%s type:%d, reason:%d, cur_bus_speed:%d, max_bus_speed:%d, width:%u, flit_mode:%u, status:%s\n",
+ __get_str(port_name),
+ __entry->type,
+ __entry->reason,
+ __entry->cur_bus_speed,
+ __entry->max_bus_speed,
+ __entry->width,
+ __entry->flit_mode,
+ __print_flags((unsigned long)__entry->link_status, "|",
+ LNKSTA_FLAGS)
+ )
+);
+
+#endif /* _TRACE_HW_EVENT_PCI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 6757233bd064..f155f95cdb6e 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -305,7 +305,7 @@ TRACE_EVENT(tcp_retransmit_synack,
),
TP_fast_assign(
- struct inet_request_sock *ireq = inet_rsk(req);
+ const struct inet_request_sock *ireq = inet_rsk(req);
__be32 *p32;
__entry->skaddr = sk;
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 490958fa10de..ea58e4656abf 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -40,6 +40,16 @@
{_VMSCAN_THROTTLE_CONGESTED, "VMSCAN_THROTTLE_CONGESTED"} \
) : "VMSCAN_THROTTLE_NONE"
+TRACE_DEFINE_ENUM(KSWAPD_CLEAR_HOPELESS_OTHER);
+TRACE_DEFINE_ENUM(KSWAPD_CLEAR_HOPELESS_KSWAPD);
+TRACE_DEFINE_ENUM(KSWAPD_CLEAR_HOPELESS_DIRECT);
+TRACE_DEFINE_ENUM(KSWAPD_CLEAR_HOPELESS_PCP);
+
+#define kswapd_clear_hopeless_reason_ops \
+ {KSWAPD_CLEAR_HOPELESS_KSWAPD, "KSWAPD"}, \
+ {KSWAPD_CLEAR_HOPELESS_DIRECT, "DIRECT"}, \
+ {KSWAPD_CLEAR_HOPELESS_PCP, "PCP"}, \
+ {KSWAPD_CLEAR_HOPELESS_OTHER, "OTHER"}
#define trace_reclaim_flags(file) ( \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
@@ -535,6 +545,47 @@ TRACE_EVENT(mm_vmscan_throttled,
__entry->usec_delayed,
show_throttle_flags(__entry->reason))
);
+
+TRACE_EVENT(mm_vmscan_kswapd_reclaim_fail,
+
+ TP_PROTO(int nid, int failures),
+
+ TP_ARGS(nid, failures),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, failures)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->failures = failures;
+ ),
+
+ TP_printk("nid=%d failures=%d",
+ __entry->nid, __entry->failures)
+);
+
+TRACE_EVENT(mm_vmscan_kswapd_clear_hopeless,
+
+ TP_PROTO(int nid, int reason),
+
+ TP_ARGS(nid, reason),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("nid=%d reason=%s",
+ __entry->nid,
+ __print_symbolic(__entry->reason, kswapd_clear_hopeless_reason_ops))
+);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 311a341e6fe4..4d3d8c8f3a1b 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -42,7 +42,6 @@
EM( WB_REASON_VMSCAN, "vmscan") \
EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \
- EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
EM( WB_REASON_FORKER_THREAD, "forker_thread") \
EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
@@ -856,12 +855,6 @@ DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
TP_ARGS(inode)
);
-DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode)
-);
-
DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
TP_PROTO(struct inode *inode),
diff --git a/include/uapi/asm-generic/errno.h b/include/uapi/asm-generic/errno.h
index cf9c51ac49f9..92e7ae493ee3 100644
--- a/include/uapi/asm-generic/errno.h
+++ b/include/uapi/asm-generic/errno.h
@@ -55,6 +55,7 @@
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
+#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
@@ -98,6 +99,7 @@
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 942370b3f5d2..a627acc8fb5f 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -860,8 +860,11 @@ __SYSCALL(__NR_file_setattr, sys_file_setattr)
#define __NR_listns 470
__SYSCALL(__NR_listns, sys_listns)
+#define __NR_rseq_slice_yield 471
+__SYSCALL(__NR_rseq_slice_yield, sys_rseq_slice_yield)
+
#undef __NR_syscalls
-#define __NR_syscalls 471
+#define __NR_syscalls 472
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index f80aa4c9d88f..1d34daa0ebcd 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -105,8 +105,6 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
- *
- * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -115,15 +113,13 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
-#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
- AMDGPU_GEM_DOMAIN_DOORBELL | \
- AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -883,7 +879,7 @@ struct drm_amdgpu_gem_list_handles_entry {
#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
/* executable mapping, new for VI */
#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
-/* partially resident texture */
+/* unmapped page of partially resident textures */
#define AMDGPU_VM_PAGE_PRT (1 << 4)
/* MTYPE flags use bit 5 to 8 */
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
@@ -1427,6 +1423,7 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
#define AMDGPU_VRAM_TYPE_HBM3E 13
+#define AMDGPU_VRAM_TYPE_HBM4 14
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1629,9 +1626,25 @@ struct drm_amdgpu_info_uq_metadata_gfx {
__u32 csa_alignment;
};
+struct drm_amdgpu_info_uq_metadata_compute {
+ /* EOP size for gfx11 */
+ __u32 eop_size;
+ /* EOP base virtual alignment for gfx11 */
+ __u32 eop_alignment;
+};
+
+struct drm_amdgpu_info_uq_metadata_sdma {
+ /* context save area size for sdma6 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for sdma6 */
+ __u32 csa_alignment;
+};
+
struct drm_amdgpu_info_uq_metadata {
union {
struct drm_amdgpu_info_uq_metadata_gfx gfx;
+ struct drm_amdgpu_info_uq_metadata_compute compute;
+ struct drm_amdgpu_info_uq_metadata_sdma sdma;
};
};
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index 62c917fd4f7b..9c44db2b3dcd 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -19,6 +19,14 @@ extern "C" {
#define AMDXDNA_INVALID_BO_HANDLE 0
#define AMDXDNA_INVALID_FENCE_HANDLE 0
+/*
+ * Define hardware context priority
+ */
+#define AMDXDNA_QOS_REALTIME_PRIORITY 0x100
+#define AMDXDNA_QOS_HIGH_PRIORITY 0x180
+#define AMDXDNA_QOS_NORMAL_PRIORITY 0x200
+#define AMDXDNA_QOS_LOW_PRIORITY 0x280
+
enum amdxdna_device_type {
AMDXDNA_DEV_TYPE_UNKNOWN = -1,
AMDXDNA_DEV_TYPE_KMQ,
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 1956431bb391..50d5337f35ef 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -24,6 +24,8 @@ extern "C" {
#define DRM_PANFROST_SET_LABEL_BO 0x09
#define DRM_PANFROST_JM_CTX_CREATE 0x0a
#define DRM_PANFROST_JM_CTX_DESTROY 0x0b
+#define DRM_PANFROST_SYNC_BO 0x0c
+#define DRM_PANFROST_QUERY_BO_INFO 0x0d
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -35,6 +37,8 @@ extern "C" {
#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
#define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create)
#define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy)
+#define DRM_IOCTL_PANFROST_SYNC_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SYNC_BO, struct drm_panfrost_sync_bo)
+#define DRM_IOCTL_PANFROST_QUERY_BO_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_QUERY_BO_INFO, struct drm_panfrost_query_bo_info)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -120,9 +124,12 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns;
};
-/* Valid flags to pass to drm_panfrost_create_bo */
+/* Valid flags to pass to drm_panfrost_create_bo.
+ * PANFROST_BO_WB_MMAP can't be set if PANFROST_BO_HEAP is.
+ */
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
+#define PANFROST_BO_WB_MMAP 4
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
@@ -228,6 +235,13 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY,
DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES,
+ DRM_PANFROST_PARAM_SELECTED_COHERENCY,
+};
+
+enum drm_panfrost_gpu_coherency {
+ DRM_PANFROST_GPU_COHERENCY_ACE_LITE = 0,
+ DRM_PANFROST_GPU_COHERENCY_ACE = 1,
+ DRM_PANFROST_GPU_COHERENCY_NONE = 31,
};
struct drm_panfrost_get_param {
@@ -301,6 +315,66 @@ struct drm_panfrost_set_label_bo {
__u64 label;
};
+/* Valid flags to pass to drm_panfrost_bo_sync_op */
+#define PANFROST_BO_SYNC_CPU_CACHE_FLUSH 0
+#define PANFROST_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE 1
+
+/**
+ * struct drm_panthor_bo_flush_map_op - BO map sync op
+ */
+struct drm_panfrost_bo_sync_op {
+ /** @handle: Handle of the buffer object to sync. */
+ __u32 handle;
+
+ /** @type: Type of sync operation. */
+ __u32 type;
+
+ /**
+ * @offset: Offset into the BO at which the sync range starts.
+ *
+ * This will be rounded down to the nearest cache line as needed.
+ */
+ __u32 offset;
+
+ /**
+ * @size: Size of the range to sync
+ *
+ * @size + @offset will be rounded up to the nearest cache line as
+ * needed.
+ */
+ __u32 size;
+};
+
+/**
+ * struct drm_panfrost_sync_bo - ioctl argument for syncing BO maps
+ */
+struct drm_panfrost_sync_bo {
+ /** Array of struct drm_panfrost_bo_sync_op */
+ __u64 ops;
+
+ /** Number of BO sync ops */
+ __u32 op_count;
+
+ __u32 pad;
+};
+
+/** BO comes from a different subsystem. */
+#define DRM_PANFROST_BO_IS_IMPORTED (1 << 0)
+
+struct drm_panfrost_query_bo_info {
+ /** Handle of the object being queried. */
+ __u32 handle;
+
+ /** Extra flags that are not coming from the BO_CREATE ioctl(). */
+ __u32 extra_flags;
+
+ /** Flags passed at creation time. */
+ __u32 create_flags;
+
+ /** Will be zero on return. */
+ __u32 pad;
+};
+
/* Definitions for coredump decoding in user space */
#define PANFROSTDUMP_MAJOR 1
#define PANFROSTDUMP_MINOR 0
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index 467d365ed7ba..b401ac585d6a 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -144,6 +144,16 @@ enum drm_panthor_ioctl_id {
* pgoff_t size.
*/
DRM_PANTHOR_SET_USER_MMIO_OFFSET,
+
+ /** @DRM_PANTHOR_BO_SYNC: Sync BO data to/from the device */
+ DRM_PANTHOR_BO_SYNC,
+
+ /**
+ * @DRM_PANTHOR_BO_QUERY_INFO: Query information about a BO.
+ *
+ * This is useful for imported BOs.
+ */
+ DRM_PANTHOR_BO_QUERY_INFO,
};
/**
@@ -246,6 +256,26 @@ enum drm_panthor_dev_query_type {
};
/**
+ * enum drm_panthor_gpu_coherency: Type of GPU coherency
+ */
+enum drm_panthor_gpu_coherency {
+ /**
+ * @DRM_PANTHOR_GPU_COHERENCY_ACE_LITE: ACE Lite coherency.
+ */
+ DRM_PANTHOR_GPU_COHERENCY_ACE_LITE = 0,
+
+ /**
+ * @DRM_PANTHOR_GPU_COHERENCY_ACE: ACE coherency.
+ */
+ DRM_PANTHOR_GPU_COHERENCY_ACE = 1,
+
+ /**
+ * @DRM_PANTHOR_GPU_COHERENCY_NONE: No coherency.
+ */
+ DRM_PANTHOR_GPU_COHERENCY_NONE = 31,
+};
+
+/**
* struct drm_panthor_gpu_info - GPU information
*
* Structure grouping all queryable information relating to the GPU.
@@ -301,7 +331,16 @@ struct drm_panthor_gpu_info {
*/
__u32 thread_max_barrier_size;
- /** @coherency_features: Coherency features. */
+ /**
+ * @coherency_features: Coherency features.
+ *
+ * Combination of drm_panthor_gpu_coherency flags.
+ *
+ * Note that this is just what the coherency protocols supported by the
+ * GPU, but the actual coherency in place depends on the SoC
+ * integration and is reflected by
+ * drm_panthor_gpu_info::selected_coherency.
+ */
__u32 coherency_features;
/** @texture_features: Texture features. */
@@ -310,8 +349,12 @@ struct drm_panthor_gpu_info {
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
- /** @pad0: MBZ. */
- __u32 pad0;
+ /**
+ * @selected_coherency: Coherency selected for this device.
+ *
+ * One of drm_panthor_gpu_coherency.
+ */
+ __u32 selected_coherency;
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
@@ -638,6 +681,15 @@ struct drm_panthor_vm_get_state {
enum drm_panthor_bo_flags {
/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
+
+ /**
+ * @DRM_PANTHOR_BO_WB_MMAP: Force "Write-Back Cacheable" CPU mapping.
+ *
+ * CPU map the buffer object in userspace by forcing the "Write-Back
+ * Cacheable" cacheability attribute. The mapping otherwise uses the
+ * "Non-Cacheable" attribute if the GPU is not IO coherent.
+ */
+ DRM_PANTHOR_BO_WB_MMAP = (1 << 1),
};
/**
@@ -1041,6 +1093,101 @@ struct drm_panthor_set_user_mmio_offset {
};
/**
+ * enum drm_panthor_bo_sync_op_type - BO sync type
+ */
+enum drm_panthor_bo_sync_op_type {
+ /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: Flush CPU caches. */
+ DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH = 0,
+
+ /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: Flush and invalidate CPU caches. */
+ DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE = 1,
+};
+
+/**
+ * struct drm_panthor_bo_sync_op - BO map sync op
+ */
+struct drm_panthor_bo_sync_op {
+ /** @handle: Handle of the buffer object to sync. */
+ __u32 handle;
+
+ /** @type: Type of operation. */
+ __u32 type;
+
+ /**
+ * @offset: Offset into the BO at which the sync range starts.
+ *
+ * This will be rounded down to the nearest cache line as needed.
+ */
+ __u64 offset;
+
+ /**
+ * @size: Size of the range to sync
+ *
+ * @size + @offset will be rounded up to the nearest cache line as
+ * needed.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_panthor_bo_sync - BO map sync request
+ */
+struct drm_panthor_bo_sync {
+ /**
+ * @ops: Array of struct drm_panthor_bo_sync_op sync operations.
+ */
+ struct drm_panthor_obj_array ops;
+};
+
+/**
+ * enum drm_panthor_bo_extra_flags - Set of flags returned on a BO_QUERY_INFO request
+ *
+ * Those are flags reflecting BO properties that are not directly coming from the flags
+ * passed are creation time, or information on BOs that were imported from other drivers.
+ */
+enum drm_panthor_bo_extra_flags {
+ /**
+ * @DRM_PANTHOR_BO_IS_IMPORTED: BO has been imported from an external driver.
+ *
+ * Note that imported dma-buf handles are not flagged as imported if they
+ * where exported by panthor. Only buffers that are coming from other drivers
+ * (dma heaps, other GPUs, display controllers, V4L, ...).
+ *
+ * It's also important to note that all imported BOs are mapped cached and can't
+ * be considered IO-coherent even if the GPU is. This means they require explicit
+ * syncs that must go through the DRM_PANTHOR_BO_SYNC ioctl (userland cache
+ * maintenance is not allowed in that case, because extra operations might be
+ * needed to make changes visible to the CPU/device, like buffer migration when the
+ * exporter is a GPU with its own VRAM).
+ */
+ DRM_PANTHOR_BO_IS_IMPORTED = (1 << 0),
+};
+
+/**
+ * struct drm_panthor_bo_query_info - Query BO info
+ */
+struct drm_panthor_bo_query_info {
+ /** @handle: Handle of the buffer object to query flags on. */
+ __u32 handle;
+
+ /**
+ * @extra_flags: Combination of enum drm_panthor_bo_extra_flags flags.
+ */
+ __u32 extra_flags;
+
+ /**
+ * @create_flags: Flags passed at creation time.
+ *
+ * Combination of enum drm_panthor_bo_flags flags.
+ * Will be zero if the buffer comes from a different driver.
+ */
+ __u32 create_flags;
+
+ /** @pad: Will be zero on return. */
+ __u32 pad;
+};
+
+/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
@@ -1086,6 +1233,10 @@ enum {
DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
+ DRM_IOCTL_PANTHOR_BO_SYNC =
+ DRM_IOCTL_PANTHOR(WR, BO_SYNC, bo_sync),
+ DRM_IOCTL_PANTHOR_BO_QUERY_INFO =
+ DRM_IOCTL_PANTHOR(WR, BO_QUERY_INFO, bo_query_info),
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/rocket_accel.h b/include/uapi/drm/rocket_accel.h
index 14b2e12b7c49..d0685e372b79 100644
--- a/include/uapi/drm/rocket_accel.h
+++ b/include/uapi/drm/rocket_accel.h
@@ -26,20 +26,27 @@ extern "C" {
*
*/
struct drm_rocket_create_bo {
- /** Input: Size of the requested BO. */
+ /**
+ * @size: Input: Size of the requested BO.
+ */
__u32 size;
- /** Output: GEM handle for the BO. */
+ /**
+ * @handle: Output: GEM handle for the BO.
+ */
__u32 handle;
/**
- * Output: DMA address for the BO in the NPU address space. This address
- * is private to the DRM fd and is valid for the lifetime of the GEM
- * handle.
+ * @dma_address: Output: DMA address for the BO in the NPU address
+ * space. This address is private to the DRM fd and is valid for
+ * the lifetime of the GEM handle.
*/
__u64 dma_address;
- /** Output: Offset into the drm node to use for subsequent mmap call. */
+ /**
+ * @offset: Output: Offset into the drm node to use for subsequent
+ * mmap call.
+ */
__u64 offset;
};
@@ -50,13 +57,19 @@ struct drm_rocket_create_bo {
* synchronization.
*/
struct drm_rocket_prep_bo {
- /** Input: GEM handle of the buffer object. */
+ /**
+ * @handle: Input: GEM handle of the buffer object.
+ */
__u32 handle;
- /** Reserved, must be zero. */
+ /**
+ * @reserved: Reserved, must be zero.
+ */
__u32 reserved;
- /** Input: Amount of time to wait for NPU jobs. */
+ /**
+ * @timeout_ns: Input: Amount of time to wait for NPU jobs.
+ */
__s64 timeout_ns;
};
@@ -66,10 +79,14 @@ struct drm_rocket_prep_bo {
* Synchronize caches for NPU access.
*/
struct drm_rocket_fini_bo {
- /** Input: GEM handle of the buffer object. */
+ /**
+ * @handle: Input: GEM handle of the buffer object.
+ */
__u32 handle;
- /** Reserved, must be zero. */
+ /**
+ * @reserved: Reserved, must be zero.
+ */
__u32 reserved;
};
@@ -79,10 +96,15 @@ struct drm_rocket_fini_bo {
* A task is the smallest unit of work that can be run on the NPU.
*/
struct drm_rocket_task {
- /** Input: DMA address to NPU mapping of register command buffer */
+ /**
+ * @regcmd: Input: DMA address to NPU mapping of register command buffer
+ */
__u32 regcmd;
- /** Input: Number of commands in the register command buffer */
+ /**
+ * @regcmd_count: Input: Number of commands in the register command
+ * buffer
+ */
__u32 regcmd_count;
};
@@ -94,25 +116,44 @@ struct drm_rocket_task {
* sequentially on the same core, to benefit from memory residency in SRAM.
*/
struct drm_rocket_job {
- /** Input: Pointer to an array of struct drm_rocket_task. */
+ /**
+ * @tasks: Input: Pointer to an array of struct drm_rocket_task.
+ */
__u64 tasks;
- /** Input: Pointer to a u32 array of the BOs that are read by the job. */
+ /**
+ * @in_bo_handles: Input: Pointer to a u32 array of the BOs that
+ * are read by the job.
+ */
__u64 in_bo_handles;
- /** Input: Pointer to a u32 array of the BOs that are written to by the job. */
+ /**
+ * @out_bo_handles: Input: Pointer to a u32 array of the BOs that
+ * are written to by the job.
+ */
__u64 out_bo_handles;
- /** Input: Number of tasks passed in. */
+ /**
+ * @task_count: Input: Number of tasks passed in.
+ */
__u32 task_count;
- /** Input: Size in bytes of the structs in the @tasks field. */
+ /**
+ * @task_struct_size: Input: Size in bytes of the structs in the
+ * @tasks field.
+ */
__u32 task_struct_size;
- /** Input: Number of input BO handles passed in (size is that times 4). */
+ /**
+ * @in_bo_handle_count: Input: Number of input BO handles passed in
+ * (size is that times 4).
+ */
__u32 in_bo_handle_count;
- /** Input: Number of output BO handles passed in (size is that times 4). */
+ /**
+ * @out_bo_handle_count: Input: Number of output BO handles passed in
+ * (size is that times 4).
+ */
__u32 out_bo_handle_count;
};
@@ -122,16 +163,25 @@ struct drm_rocket_job {
* The kernel will schedule the execution of these jobs in dependency order.
*/
struct drm_rocket_submit {
- /** Input: Pointer to an array of struct drm_rocket_job. */
+ /**
+ * @jobs: Input: Pointer to an array of struct drm_rocket_job.
+ */
__u64 jobs;
- /** Input: Number of jobs passed in. */
+ /**
+ * @job_count: Input: Number of jobs passed in.
+ */
__u32 job_count;
- /** Input: Size in bytes of the structs in the @jobs field. */
+ /**
+ * @job_struct_size: Input: Size in bytes of the structs in the
+ * @jobs field.
+ */
__u32 job_struct_size;
- /** Reserved, must be zero. */
+ /**
+ * @reserved: Reserved, must be zero.
+ */
__u64 reserved;
};
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index f64dc0eff0e6..077e66a682e2 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -106,6 +106,7 @@ extern "C" {
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_MADVISE 0x0c
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0e
/* Must be kept compact -- no holes */
@@ -123,6 +124,7 @@ extern "C" {
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
+#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
/**
* DOC: Xe IOCTL Extensions
@@ -210,8 +212,12 @@ struct drm_xe_ext_set_property {
/** @pad: MBZ */
__u32 pad;
- /** @value: property value */
- __u64 value;
+ union {
+ /** @value: property value */
+ __u64 value;
+ /** @ptr: pointer to user value */
+ __u64 ptr;
+ };
/** @reserved: Reserved */
__u64 reserved[2];
@@ -403,6 +409,9 @@ struct drm_xe_query_mem_regions {
* has low latency hint support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
* device has CPU address mirroring support
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT - Flag is set if the
+ * device supports the userspace hint %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION.
+ * This is exposed only on Xe2+.
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -421,6 +430,7 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT (1 << 3)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -791,6 +801,17 @@ struct drm_xe_device_query {
* need to use VRAM for display surfaces, therefore the kernel requires
* setting this flag for such objects, otherwise an error is thrown on
* small-bar systems.
+ * - %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION - Allows userspace to
+ * hint that compression (CCS) should be disabled for the buffer being
+ * created. This can avoid unnecessary memory operations and CCS state
+ * management.
+ * On pre-Xe2 platforms, this flag is currently rejected as compression
+ * control is not supported via PAT index. On Xe2+ platforms, compression
+ * is controlled via PAT entries. If this flag is set, the driver will reject
+ * any VM bind that requests a PAT index enabling compression for this BO.
+ * Note: On dGPU platforms, there is currently no change in behavior with
+ * this flag, but future improvements may leverage it. The current benefit is
+ * primarily applicable to iGPU platforms.
*
* @cpu_caching supports the following values:
* - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
@@ -837,6 +858,7 @@ struct drm_xe_gem_create {
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+#define DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION (1 << 3)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@@ -1252,6 +1274,17 @@ struct drm_xe_vm_bind {
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
+ * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP - Create a multi-queue group
+ * or add secondary queues to a multi-queue group.
+ * If the extension's 'value' field has %DRM_XE_MULTI_GROUP_CREATE flag set,
+ * then a new multi-queue group is created with this queue as the primary queue
+ * (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
+ * queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
+ * All the other non-relevant bits of extension's 'value' field while adding the
+ * primary or the secondary queues of the group must be set to 0.
+ * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
+ * priority within the multi-queue group. Current valid priority values are 0–2
+ * (default is 1), with higher values indicating higher priority.
*
* The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class
@@ -1292,6 +1325,10 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
+#define DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE 3
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
+#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -1655,6 +1692,9 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
DRM_XE_OA_UNIT_TYPE_OAM_SAG,
+
+ /** @DRM_XE_OA_UNIT_TYPE_MERT: MERT OA unit */
+ DRM_XE_OA_UNIT_TYPE_MERT,
};
/**
@@ -1677,12 +1717,19 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
#define DRM_XE_OA_CAPS_OAM (1 << 4)
+#define DRM_XE_OA_CAPS_OA_UNIT_GT_ID (1 << 5)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
+ /** @gt_id: gt id for this OA unit */
+ __u16 gt_id;
+
+ /** @reserved1: MBZ */
+ __u16 reserved1[3];
+
/** @reserved: MBZ */
- __u64 reserved[4];
+ __u64 reserved[3];
/** @num_engines: number of engines in @eci array */
__u64 num_engines;
@@ -2072,7 +2119,13 @@ struct drm_xe_madvise {
struct {
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
- /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ /**
+ * @preferred_mem_loc.devmem_fd:
+ * Device file-descriptor of the device where the
+ * preferred memory is located, or one of the
+ * above special values. Please also see
+ * @preferred_mem_loc.region_instance below.
+ */
__u32 devmem_fd;
#define DRM_XE_MIGRATE_ALL_PAGES 0
@@ -2080,8 +2133,14 @@ struct drm_xe_madvise {
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u16 migration_policy;
- /** @preferred_mem_loc.pad : MBZ */
- __u16 pad;
+ /**
+ * @preferred_mem_loc.region_instance : Region instance.
+ * MBZ if @devmem_fd <= &DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE.
+ * Otherwise should point to the desired device
+ * VRAM instance of the device indicated by
+ * @preferred_mem_loc.devmem_fd.
+ */
+ __u16 region_instance;
/** @preferred_mem_loc.reserved : Reserved */
__u64 reserved;
@@ -2274,6 +2333,30 @@ struct drm_xe_vm_query_mem_range_attr {
};
+/**
+ * struct drm_xe_exec_queue_set_property - exec queue set property
+ *
+ * Sets execution queue properties dynamically.
+ * Currently only %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY
+ * property can be dynamically set.
+ */
+struct drm_xe_exec_queue_set_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f8d8513eda27..c8d400b7680a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -119,6 +119,14 @@ enum bpf_cgroup_iter_order {
BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
+ /*
+ * Walks the immediate children of the specified parent
+ * cgroup_subsys_state. Unlike BPF_CGROUP_ITER_DESCENDANTS_PRE,
+ * BPF_CGROUP_ITER_DESCENDANTS_POST, and BPF_CGROUP_ITER_ANCESTORS_UP
+ * the iterator does not include the specified parent as one of the
+ * returned iterator elements.
+ */
+ BPF_CGROUP_ITER_CHILDREN,
};
union bpf_iter_link_info {
@@ -918,6 +926,16 @@ union bpf_iter_link_info {
* Number of bytes read from the stream on success, or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
+ * BPF_PROG_ASSOC_STRUCT_OPS
+ * Description
+ * Associate a BPF program with a struct_ops map. The struct_ops
+ * map is identified by *map_fd* and the BPF program is
+ * identified by *prog_fd*.
+ *
+ * Return
+ * 0 on success or -1 if an error occurred (in which case,
+ * *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -974,6 +992,7 @@ enum bpf_cmd {
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
BPF_PROG_STREAM_READ_BY_FD,
+ BPF_PROG_ASSOC_STRUCT_OPS,
__MAX_BPF_CMD,
};
@@ -1134,6 +1153,7 @@ enum bpf_attach_type {
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
BPF_TRACE_UPROBE_SESSION,
+ BPF_TRACE_FSESSION,
__MAX_BPF_ATTACH_TYPE
};
@@ -1373,6 +1393,8 @@ enum {
BPF_NOEXIST = 1, /* create new element if it didn't exist */
BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+ BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
+ BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
};
/* flags for BPF_MAP_CREATE command */
@@ -1894,6 +1916,12 @@ union bpf_attr {
__u32 prog_fd;
} prog_stream_read;
+ struct {
+ __u32 map_fd;
+ __u32 prog_fd;
+ __u32 flags;
+ } prog_assoc_struct_ops;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index e8fd92789423..9165154a274d 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -336,6 +336,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
+#define BTRFS_FEATURE_INCOMPAT_REMAP_TREE (1ULL << 17)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index fc29d273845d..f7843e6bb978 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -76,6 +76,9 @@
/* Tracks RAID stripes in block groups. */
#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
+/* Holds details of remapped addresses after relocation. */
+#define BTRFS_REMAP_TREE_OBJECTID 13ULL
+
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@@ -282,6 +285,10 @@
#define BTRFS_RAID_STRIPE_KEY 230
+#define BTRFS_IDENTITY_REMAP_KEY 234
+#define BTRFS_REMAP_KEY 235
+#define BTRFS_REMAP_BACKREF_KEY 236
+
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@@ -714,9 +721,12 @@ struct btrfs_super_block {
__u8 metadata_uuid[BTRFS_FSID_SIZE];
__u64 nr_global_roots;
+ __le64 remap_root;
+ __le64 remap_root_generation;
+ __u8 remap_root_level;
/* Future expansion */
- __le64 reserved[27];
+ __u8 reserved[199];
__u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
@@ -1161,12 +1171,15 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9)
#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10)
+#define BTRFS_BLOCK_GROUP_REMAPPED (1ULL << 11)
+#define BTRFS_BLOCK_GROUP_METADATA_REMAP (1ULL << 12)
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
- BTRFS_BLOCK_GROUP_METADATA)
+ BTRFS_BLOCK_GROUP_METADATA | \
+ BTRFS_BLOCK_GROUP_METADATA_REMAP)
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
@@ -1219,6 +1232,14 @@ struct btrfs_block_group_item {
__le64 flags;
} __attribute__ ((__packed__));
+struct btrfs_block_group_item_v2 {
+ __le64 used;
+ __le64 chunk_objectid;
+ __le64 flags;
+ __le64 remap_bytes;
+ __le32 identity_remap_count;
+} __attribute__ ((__packed__));
+
struct btrfs_free_space_info {
__le32 extent_count;
__le32 flags;
@@ -1323,4 +1344,13 @@ struct btrfs_verity_descriptor_item {
__u8 encryption;
} __attribute__ ((__packed__));
+/*
+ * For a range identified by a BTRFS_REMAP_KEY item in the remap tree, gives
+ * the address that the start of the range will get remapped to. This
+ * structure is also shared by BTRFS_REMAP_BACKREF_KEY.
+ */
+struct btrfs_remap_item {
+ __le64 address;
+} __attribute__ ((__packed__));
+
#endif /* _BTRFS_CTREE_H_ */
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index b7ff9c44f9aa..de0005f28e5c 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -253,6 +253,7 @@ enum dpll_a_pin {
DPLL_A_PIN_ESYNC_PULSE,
DPLL_A_PIN_REFERENCE_SYNC,
DPLL_A_PIN_PHASE_ADJUST_GRAN,
+ DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET_PPT,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 819ded2d39de..ee30dcd80901 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -545,6 +545,8 @@ typedef struct elf64_shdr {
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NN_RISCV_TAGGED_ADDR_CTRL "LINUX"
#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */
+#define NN_RISCV_USER_CFI "LINUX"
+#define NT_RISCV_USER_CFI 0x903 /* RISC-V shadow stack state */
#define NN_LOONGARCH_CPUCFG "LINUX"
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NN_LOONGARCH_CSR "LINUX"
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index eb7ff2602fbb..b74b80508553 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -15,13 +15,10 @@
#define _UAPI_LINUX_ETHTOOL_H
#include <linux/const.h>
+#include <linux/typelimits.h>
#include <linux/types.h>
#include <linux/if_ether.h>
-#ifndef __KERNEL__
-#include <limits.h> /* for INT_MAX */
-#endif
-
/* All structures exposed to userland should be defined such that they
* have the same layout for 32-bit and 64-bit userland.
*/
@@ -603,6 +600,8 @@ enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
ETHTOOL_LINK_EXT_STATE_OVERHEAT,
ETHTOOL_LINK_EXT_STATE_MODULE,
+ ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION,
+ ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN,
};
/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
@@ -1094,13 +1093,20 @@ enum ethtool_module_fw_flash_status {
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
- * @len: On return, the number of strings in the string set
+ * @len: Number of strings in the string set
* @data: Buffer for strings. Each string is null-padded to a size of
* %ETH_GSTRING_LEN.
*
* Users must use %ETHTOOL_GSSET_INFO to find the number of strings in
* the string set. They must allocate a buffer of the appropriate
* size immediately following this structure.
+ *
+ * Setting @len on input is optional (though preferred), but must be zeroed
+ * otherwise.
+ * When set, @len will return the requested count if it matches the actual
+ * count; otherwise, it will be zero.
+ * This prevents issues when the number of strings is different than the
+ * userspace allocation.
*/
struct ethtool_gstrings {
__u32 cmd;
@@ -1177,13 +1183,20 @@ struct ethtool_test {
/**
* struct ethtool_stats - device-specific statistics
* @cmd: Command number = %ETHTOOL_GSTATS
- * @n_stats: On return, the number of statistics
+ * @n_stats: Number of statistics
* @data: Array of statistics
*
* Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
* number of statistics that will be returned. They must allocate a
* buffer of the appropriate size (8 * number of statistics)
* immediately following this structure.
+ *
+ * Setting @n_stats on input is optional (though preferred), but must be zeroed
+ * otherwise.
+ * When set, @n_stats will return the requested count if it matches the actual
+ * count; otherwise, it will be zero.
+ * This prevents issues when the number of stats is different than the
+ * userspace allocation.
*/
struct ethtool_stats {
__u32 cmd;
@@ -2190,6 +2203,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_40000 40000
#define SPEED_50000 50000
#define SPEED_56000 56000
+#define SPEED_80000 80000
#define SPEED_100000 100000
#define SPEED_200000 200000
#define SPEED_400000 400000
@@ -2200,7 +2214,7 @@ enum ethtool_link_mode_bit_indices {
static inline int ethtool_validate_speed(__u32 speed)
{
- return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
+ return speed <= __KERNEL_INT_MAX || speed == (__u32)SPEED_UNKNOWN;
}
/* Duplex, half or full. */
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index aaa502a7bff4..1749b35ab2c2 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -362,7 +362,7 @@ struct hv_kvp_exchg_msg_value {
__u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
__u32 value_u32;
__u64 value_u64;
- };
+ } __attribute__((packed));
} __attribute__((packed));
struct hv_kvp_msg_enumerate {
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index b35871cbeed7..4f51e198ac2e 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -42,7 +42,7 @@ struct sockaddr_alg_new {
struct af_alg_iv {
__u32 ivlen;
- __u8 iv[];
+ __u8 iv[] __counted_by(ivlen);
};
/* Socket options */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 3b491d96e52e..e9b5f79e1ee1 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1443,6 +1443,7 @@ enum {
IFLA_GENEVE_DF,
IFLA_GENEVE_INNER_PROTO_INHERIT,
IFLA_GENEVE_PORT_RANGE,
+ IFLA_GENEVE_GRO_HINT,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index b5b23c0d5283..fc473af6feb4 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -237,6 +237,18 @@ enum io_uring_sqe_flags_bit {
*/
#define IORING_SETUP_SQE_MIXED (1U << 19)
+/*
+ * When set, io_uring ignores SQ head and tail and fetches SQEs to submit
+ * starting from index 0 instead from the index stored in the head pointer.
+ * IOW, the user should place all SQE at the beginning of the SQ memory
+ * before issuing a submission syscall.
+ *
+ * It requires IORING_SETUP_NO_SQARRAY and is incompatible with
+ * IORING_SETUP_SQPOLL. The user must also never change the SQ head and tail
+ * values and keep it set to 0. Any other value is undefined behaviour.
+ */
+#define IORING_SETUP_SQ_REWIND (1U << 20)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -700,6 +712,9 @@ enum io_uring_register_op {
/* auxiliary zcrx configuration, see enum zcrx_ctrl_op */
IORING_REGISTER_ZCRX_CTRL = 36,
+ /* register bpf filtering programs */
+ IORING_REGISTER_BPF_FILTER = 37,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -805,6 +820,13 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_task_restriction {
+ __u16 flags;
+ __u16 nr_res;
+ __u32 resv[3];
+ __DECLARE_FLEX_ARRAY(struct io_uring_restriction, restrictions);
+};
+
struct io_uring_clock_register {
__u32 clockid;
__u32 __resv[3];
@@ -1082,7 +1104,7 @@ struct io_uring_zcrx_ifq_reg {
struct io_uring_zcrx_offsets offsets;
__u32 zcrx_id;
- __u32 __resv2;
+ __u32 rx_buf_len;
__u64 __resv[3];
};
diff --git a/include/uapi/linux/io_uring/bpf_filter.h b/include/uapi/linux/io_uring/bpf_filter.h
new file mode 100644
index 000000000000..220351b81bc0
--- /dev/null
+++ b/include/uapi/linux/io_uring/bpf_filter.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Header file for the io_uring BPF filters.
+ */
+#ifndef LINUX_IO_URING_BPF_FILTER_H
+#define LINUX_IO_URING_BPF_FILTER_H
+
+#include <linux/types.h>
+
+/*
+ * Struct passed to filters.
+ */
+struct io_uring_bpf_ctx {
+ __u64 user_data;
+ __u8 opcode;
+ __u8 sqe_flags;
+ __u8 pdu_size; /* size of aux data for filter */
+ __u8 pad[5];
+ union {
+ struct {
+ __u32 family;
+ __u32 type;
+ __u32 protocol;
+ } socket;
+ struct {
+ __u64 flags;
+ __u64 mode;
+ __u64 resolve;
+ } open;
+ };
+};
+
+enum {
+ /*
+ * If set, any currently unset opcode will have a deny filter attached
+ */
+ IO_URING_BPF_FILTER_DENY_REST = 1,
+};
+
+struct io_uring_bpf_filter {
+ __u32 opcode; /* io_uring opcode to filter */
+ __u32 flags;
+ __u32 filter_len; /* number of BPF instructions */
+ __u32 resv;
+ __u64 filter_ptr; /* pointer to BPF filter */
+ __u64 resv2[5];
+};
+
+enum {
+ IO_URING_BPF_CMD_FILTER = 1,
+};
+
+struct io_uring_bpf {
+ __u16 cmd_type; /* IO_URING_BPF_* values */
+ __u16 cmd_flags; /* none so far */
+ __u32 resv;
+ union {
+ struct io_uring_bpf_filter filter;
+ };
+};
+
+#endif
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 2c41920b641d..1dafbc552d37 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -466,15 +466,26 @@ struct iommu_hwpt_arm_smmuv3 {
};
/**
+ * struct iommu_hwpt_amd_guest - AMD IOMMU guest I/O page table data
+ * (IOMMU_HWPT_DATA_AMD_GUEST)
+ * @dte: Guest Device Table Entry (DTE)
+ */
+struct iommu_hwpt_amd_guest {
+ __aligned_u64 dte[4];
+};
+
+/**
* enum iommu_hwpt_data_type - IOMMU HWPT Data Type
* @IOMMU_HWPT_DATA_NONE: no data
* @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
* @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table
+ * @IOMMU_HWPT_DATA_AMD_GUEST: AMD IOMMU guest page table
*/
enum iommu_hwpt_data_type {
IOMMU_HWPT_DATA_NONE = 0,
IOMMU_HWPT_DATA_VTD_S1 = 1,
IOMMU_HWPT_DATA_ARM_SMMUV3 = 2,
+ IOMMU_HWPT_DATA_AMD_GUEST = 3,
};
/**
@@ -624,6 +635,32 @@ struct iommu_hw_info_tegra241_cmdqv {
};
/**
+ * struct iommu_hw_info_amd - AMD IOMMU device info
+ *
+ * @efr : Value of AMD IOMMU Extended Feature Register (EFR)
+ * @efr2: Value of AMD IOMMU Extended Feature 2 Register (EFR2)
+ *
+ * Please See description of these registers in the following sections of
+ * the AMD I/O Virtualization Technology (IOMMU) Specification.
+ * (https://docs.amd.com/v/u/en-US/48882_3.10_PUB)
+ *
+ * - MMIO Offset 0030h IOMMU Extended Feature Register
+ * - MMIO Offset 01A0h IOMMU Extended Feature 2 Register
+ *
+ * Note: The EFR and EFR2 are raw values reported by hardware.
+ * VMM is responsible to determine the appropriate flags to be exposed to
+ * the VM since cetertain features are not currently supported by the kernel
+ * for HW-vIOMMU.
+ *
+ * Current VMM-allowed list of feature flags are:
+ * - EFR[GTSup, GASup, GioSup, PPRSup, EPHSup, GATS, GLX, PASmax]
+ */
+struct iommu_hw_info_amd {
+ __aligned_u64 efr;
+ __aligned_u64 efr2;
+};
+
+/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
* @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware
* info
@@ -632,6 +669,7 @@ struct iommu_hw_info_tegra241_cmdqv {
* @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
* @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
* SMMUv3) info type
+ * @IOMMU_HW_INFO_TYPE_AMD: AMD IOMMU info type
*/
enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_NONE = 0,
@@ -639,6 +677,7 @@ enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3,
+ IOMMU_HW_INFO_TYPE_AMD = 4,
};
/**
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 5d1727a6d040..e72359370857 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -44,9 +44,13 @@
* - 1.16 - Add contiguous VRAM allocation flag
* - 1.17 - Add SDMA queue creation with target SDMA engine ID
* - 1.18 - Rename pad in set_memory_policy_args to misc_process_flag
+ * - 1.19 - Add a new ioctl to craete secondary kfd processes
+ * - 1.20 - Trap handler support for expert scheduling mode available
+ * - 1.21 - Debugger support to subscribe to LDS out-of-address exceptions
+ * - 1.22 - Add queue creation with metadata ring base address
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 18
+#define KFD_IOCTL_MINOR_VERSION 22
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -84,7 +88,7 @@ struct kfd_ioctl_create_queue_args {
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
__u32 sdma_engine_id; /* to KFD */
- __u32 pad;
+ __u32 metadata_ring_size; /* to KFD */
};
struct kfd_ioctl_destroy_queue_args {
@@ -145,6 +149,8 @@ struct kfd_dbg_device_info_entry {
__u32 num_xcc;
__u32 capability;
__u32 debug_prop;
+ __u32 capability2;
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -945,6 +951,7 @@ enum kfd_dbg_trap_address_watch_mode {
enum kfd_dbg_trap_flags {
KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
+ KFD_DBG_TRAP_FLAG_LDS_OUT_OF_ADDR_RANGE = 4
};
/* Trap exceptions */
@@ -1671,7 +1678,10 @@ struct kfd_ioctl_dbg_trap_args {
#define AMDKFD_IOC_DBG_TRAP \
AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
+#define AMDKFD_IOC_CREATE_PROCESS \
+ AMDKFD_IO(0x27)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x27
+#define AMDKFD_COMMAND_END 0x28
#endif
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
index 1125fe47959f..0b6ce2f3c887 100644
--- a/include/uapi/linux/kfd_sysfs.h
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -64,7 +64,8 @@
#define HSA_CAP_RESERVED 0x000f8000
#define HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED 0x00000001
-#define HSA_CAP2_RESERVED 0xfffffffe
+#define HSA_CAP2_TRAP_DEBUG_LDS_OUT_OF_ADDR_RANGE_SUPPORTED 0x00000002
+#define HSA_CAP2_RESERVED 0xfffffffc
/* debug_prop bits in node properties */
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index dddb781b0507..88cca0e22ece 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -180,6 +180,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
+#define KVM_EXIT_ARM_LDST64B 42
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -402,7 +403,7 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 75fd7f5e6cc3..f88fa1f68b77 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -117,11 +117,24 @@ struct landlock_ruleset_attr {
* future nested domains, not the one being created. It can also be used
* with a @ruleset_fd value of -1 to mute subdomain logs without creating a
* domain.
+ *
+ * The following flag supports policy enforcement in multithreaded processes:
+ *
+ * %LANDLOCK_RESTRICT_SELF_TSYNC
+ * Applies the new Landlock configuration atomically to all threads of the
+ * current process, including the Landlock domain and logging
+ * configuration. This overrides the Landlock configuration of sibling
+ * threads, irrespective of previously established Landlock domains and
+ * logging configurations on these threads.
+ *
+ * If the calling thread is running with no_new_privs, this operation
+ * enables no_new_privs on the sibling threads as well.
*/
/* clang-format off */
#define LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF (1U << 0)
#define LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON (1U << 1)
#define LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF (1U << 2)
+#define LANDLOCK_RESTRICT_SELF_TSYNC (1U << 3)
/* clang-format on */
/**
@@ -182,11 +195,13 @@ struct landlock_net_port_attr {
* It should be noted that port 0 passed to :manpage:`bind(2)` will bind
* to an available port from the ephemeral port range. This can be
* configured with the ``/proc/sys/net/ipv4/ip_local_port_range`` sysctl
- * (also used for IPv6).
+ * (also used for IPv6), and within that range, on a per-socket basis
+ * with ``setsockopt(IP_LOCAL_PORT_RANGE)``.
*
- * A Landlock rule with port 0 and the ``LANDLOCK_ACCESS_NET_BIND_TCP``
+ * A Landlock rule with port 0 and the %LANDLOCK_ACCESS_NET_BIND_TCP
* right means that requesting to bind on port 0 is allowed and it will
- * automatically translate to binding on the related port range.
+ * automatically translate to binding on a kernel-assigned ephemeral
+ * port.
*/
__u64 port;
};
@@ -329,13 +344,12 @@ struct landlock_net_port_attr {
* These flags enable to restrict a sandboxed process to a set of network
* actions.
*
- * This is supported since Landlock ABI version 4.
- *
* The following access rights apply to TCP port numbers:
*
- * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port.
- * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to
- * a remote port.
+ * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind TCP sockets to the given local
+ * port. Support added in Landlock ABI version 4.
+ * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect TCP sockets to the given
+ * remote port. Support added in Landlock ABI version 4.
*/
/* clang-format off */
#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 638ca21b7a90..4f2da935a76c 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -104,5 +104,6 @@
#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
#define PID_FS_MAGIC 0x50494446 /* "PIDF" */
#define GUEST_MEMFD_MAGIC 0x474d454d /* "GMEM" */
+#define NULL_FS_MAGIC 0x4E554C4C /* "NULL" */
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 8fbbe613611a..6c962d866e86 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -39,6 +39,9 @@ enum {
#define MPOL_MODE_FLAGS \
(MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES | MPOL_F_NUMA_BALANCING)
+/* Whether the nodemask is specified by users */
+#define MPOL_USER_NODEMASK_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+
/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 5d3f8c9e3a62..d9d86598d100 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -61,7 +61,8 @@
/*
* open_tree() flags.
*/
-#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
+#define OPEN_TREE_CLONE (1 << 0) /* Clone the target tree and attach the clone */
+#define OPEN_TREE_NAMESPACE (1 << 1) /* Clone the target tree into a new mount namespace */
#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
/*
@@ -197,7 +198,10 @@ struct statmount {
*/
struct mnt_id_req {
__u32 size;
- __u32 mnt_ns_fd;
+ union {
+ __u32 mnt_ns_fd;
+ __u32 mnt_fd;
+ };
__u64 mnt_id;
__u64 param;
__u64 mnt_ns_id;
@@ -232,4 +236,9 @@ struct mnt_id_req {
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
+/*
+ * @flag bits for statmount(2)
+ */
+#define STATMOUNT_BY_FD 0x00000001U /* want mountinfo for given fd */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
index c97d060ee90b..fe9863d75350 100644
--- a/include/uapi/linux/mptcp_pm.h
+++ b/include/uapi/linux/mptcp_pm.h
@@ -11,7 +11,7 @@
#define MPTCP_PM_VER 1
/**
- * enum mptcp_event_type
+ * enum mptcp_event_type - Netlink MPTCP event types
* @MPTCP_EVENT_UNSPEC: unused event
* @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the
* good time to allocate memory and send ADD_ADDR if needed. Depending on the
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 1610fdbab98d..f6e8d1e05c97 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -10,10 +10,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
-
-#ifndef __KERNEL__
-#include <limits.h> /* for INT_MIN, INT_MAX */
-#endif
+#include <linux/typelimits.h>
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
@@ -31,14 +28,14 @@
#define NF_BR_NUMHOOKS 6
enum nf_br_hook_priorities {
- NF_BR_PRI_FIRST = INT_MIN,
+ NF_BR_PRI_FIRST = __KERNEL_INT_MIN,
NF_BR_PRI_NAT_DST_BRIDGED = -300,
NF_BR_PRI_FILTER_BRIDGED = -200,
NF_BR_PRI_BRNF = 0,
NF_BR_PRI_NAT_DST_OTHER = 100,
NF_BR_PRI_FILTER_OTHER = 200,
NF_BR_PRI_NAT_SRC = 300,
- NF_BR_PRI_LAST = INT_MAX,
+ NF_BR_PRI_LAST = __KERNEL_INT_MAX,
};
#endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index 155e77d6a42d..439d3c59862b 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -7,12 +7,11 @@
#include <linux/netfilter.h>
+#include <linux/typelimits.h>
/* only for userspace compatibility */
#ifndef __KERNEL__
-#include <limits.h> /* for INT_MIN, INT_MAX */
-
/* IP Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP_PRE_ROUTING 0
@@ -28,7 +27,7 @@
#endif /* ! __KERNEL__ */
enum nf_ip_hook_priorities {
- NF_IP_PRI_FIRST = INT_MIN,
+ NF_IP_PRI_FIRST = __KERNEL_INT_MIN,
NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP_PRI_CONNTRACK_DEFRAG = -400,
NF_IP_PRI_RAW = -300,
@@ -41,8 +40,8 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_HELPER = 300,
- NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
- NF_IP_PRI_LAST = INT_MAX,
+ NF_IP_PRI_CONNTRACK_CONFIRM = __KERNEL_INT_MAX,
+ NF_IP_PRI_LAST = __KERNEL_INT_MAX,
};
/* Arguments for setsockopt SOL_IP: */
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index 80aa9b0799af..0e40d00b37fa 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -10,12 +10,11 @@
#include <linux/netfilter.h>
+#include <linux/typelimits.h>
/* only for userspace compatibility */
#ifndef __KERNEL__
-#include <limits.h> /* for INT_MIN, INT_MAX */
-
/* IP6 Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP6_PRE_ROUTING 0
@@ -32,7 +31,7 @@
enum nf_ip6_hook_priorities {
- NF_IP6_PRI_FIRST = INT_MIN,
+ NF_IP6_PRI_FIRST = __KERNEL_INT_MIN,
NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
NF_IP6_PRI_RAW = -300,
@@ -45,7 +44,7 @@ enum nf_ip6_hook_priorities {
NF_IP6_PRI_NAT_SRC = 100,
NF_IP6_PRI_SELINUX_LAST = 225,
NF_IP6_PRI_CONNTRACK_HELPER = 300,
- NF_IP6_PRI_LAST = INT_MAX,
+ NF_IP6_PRI_LAST = __KERNEL_INT_MAX,
};
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 71c7196d3281..e629c4953534 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -55,7 +55,7 @@
NFSERR_NODEV = 19, /* v2 v3 v4 */
NFSERR_NOTDIR = 20, /* v2 v3 v4 */
NFSERR_ISDIR = 21, /* v2 v3 v4 */
- NFSERR_INVAL = 22, /* v2 v3 v4 */
+ NFSERR_INVAL = 22, /* v3 v4 */
NFSERR_FBIG = 27, /* v2 v3 v4 */
NFSERR_NOSPC = 28, /* v2 v3 v4 */
NFSERR_ROFS = 30, /* v2 v3 v4 */
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
index e157e2009ea8..e9efbc9e63d8 100644
--- a/include/uapi/linux/nfsd_netlink.h
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -35,6 +35,7 @@ enum {
NFSD_A_SERVER_GRACETIME,
NFSD_A_SERVER_LEASETIME,
NFSD_A_SERVER_SCOPE,
+ NFSD_A_SERVER_MIN_THREADS,
__NFSD_A_SERVER_MAX,
NFSD_A_SERVER_MAX = (__NFSD_A_SERVER_MAX - 1)
diff --git a/include/uapi/linux/nilfs2_api.h b/include/uapi/linux/nilfs2_api.h
index 8b9b89104f3d..d1b6fcde2fb8 100644
--- a/include/uapi/linux/nilfs2_api.h
+++ b/include/uapi/linux/nilfs2_api.h
@@ -58,7 +58,7 @@ NILFS_CPINFO_FNS(INVALID, invalid)
NILFS_CPINFO_FNS(MINOR, minor)
/**
- * nilfs_suinfo - segment usage information
+ * struct nilfs_suinfo - segment usage information
* @sui_lastmod: timestamp of last modification
* @sui_nblocks: number of written blocks in segment
* @sui_flags: segment usage flags
@@ -93,7 +93,7 @@ static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
}
/**
- * nilfs_suinfo_update - segment usage information update
+ * struct nilfs_suinfo_update - segment usage information update
* @sup_segnum: segment number
* @sup_flags: flags for which fields are active in sup_sui
* @sup_reserved: reserved necessary for alignment
diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
index 3196cc44a002..b3442b16ff6a 100644
--- a/include/uapi/linux/nilfs2_ondisk.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -133,73 +133,104 @@ struct nilfs_super_root {
/**
* struct nilfs_super_block - structure of super block on disk
+ * @s_rev_level: Revision level
+ * @s_minor_rev_level: minor revision level
+ * @s_magic: Magic signature
+ * @s_bytes: Bytes count of CRC calculation for
+ * this structure. s_reserved is excluded.
+ * @s_flags: flags
+ * @s_crc_seed: Seed value of CRC calculation
+ * @s_sum: Check sum of super block
+ * @s_log_block_size: Block size represented as follows:
+ * blocksize = 1 << (s_log_block_size + 10)
+ * @s_nsegments: Number of segments in filesystem
+ * @s_dev_size: block device size in bytes
+ * @s_first_data_block: 1st seg disk block number
+ * @s_blocks_per_segment: number of blocks per full segment
+ * @s_r_segments_percentage: Reserved segments percentage
+ * @s_last_cno: Last checkpoint number
+ * @s_last_pseg: disk block addr pseg written last
+ * @s_last_seq: seq. number of seg written last
+ * @s_free_blocks_count: Free blocks count
+ * @s_ctime: Creation time (execution time of newfs)
+ * @s_mtime: Mount time
+ * @s_wtime: Write time
+ * @s_mnt_count: Mount count
+ * @s_max_mnt_count: Maximal mount count
+ * @s_state: File system state
+ * @s_errors: Behaviour when detecting errors
+ * @s_lastcheck: time of last check
+ * @s_checkinterval: max. time between checks
+ * @s_creator_os: OS
+ * @s_def_resuid: Default uid for reserved blocks
+ * @s_def_resgid: Default gid for reserved blocks
+ * @s_first_ino: First non-reserved inode
+ * @s_inode_size: Size of an inode
+ * @s_dat_entry_size: Size of a dat entry
+ * @s_checkpoint_size: Size of a checkpoint
+ * @s_segment_usage_size: Size of a segment usage
+ * @s_uuid: 128-bit uuid for volume
+ * @s_volume_name: volume name
+ * @s_c_interval: Commit interval of segment
+ * @s_c_block_max: Threshold of data amount for the
+ * segment construction
+ * @s_feature_compat: Compatible feature set
+ * @s_feature_compat_ro: Read-only compatible feature set
+ * @s_feature_incompat: Incompatible feature set
+ * @s_reserved: padding to the end of the block
*/
struct nilfs_super_block {
-/*00*/ __le32 s_rev_level; /* Revision level */
- __le16 s_minor_rev_level; /* minor revision level */
- __le16 s_magic; /* Magic signature */
-
- __le16 s_bytes; /*
- * Bytes count of CRC calculation
- * for this structure. s_reserved
- * is excluded.
- */
- __le16 s_flags; /* flags */
- __le32 s_crc_seed; /* Seed value of CRC calculation */
-/*10*/ __le32 s_sum; /* Check sum of super block */
-
- __le32 s_log_block_size; /*
- * Block size represented as follows
- * blocksize =
- * 1 << (s_log_block_size + 10)
- */
- __le64 s_nsegments; /* Number of segments in filesystem */
-/*20*/ __le64 s_dev_size; /* block device size in bytes */
- __le64 s_first_data_block; /* 1st seg disk block number */
-/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */
- __le32 s_r_segments_percentage; /* Reserved segments percentage */
-
- __le64 s_last_cno; /* Last checkpoint number */
-/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */
- __le64 s_last_seq; /* seq. number of seg written last */
-/*50*/ __le64 s_free_blocks_count; /* Free blocks count */
-
- __le64 s_ctime; /*
- * Creation time (execution time of
- * newfs)
- */
-/*60*/ __le64 s_mtime; /* Mount time */
- __le64 s_wtime; /* Write time */
-/*70*/ __le16 s_mnt_count; /* Mount count */
- __le16 s_max_mnt_count; /* Maximal mount count */
- __le16 s_state; /* File system state */
- __le16 s_errors; /* Behaviour when detecting errors */
- __le64 s_lastcheck; /* time of last check */
-
-/*80*/ __le32 s_checkinterval; /* max. time between checks */
- __le32 s_creator_os; /* OS */
- __le16 s_def_resuid; /* Default uid for reserved blocks */
- __le16 s_def_resgid; /* Default gid for reserved blocks */
- __le32 s_first_ino; /* First non-reserved inode */
-
-/*90*/ __le16 s_inode_size; /* Size of an inode */
- __le16 s_dat_entry_size; /* Size of a dat entry */
- __le16 s_checkpoint_size; /* Size of a checkpoint */
- __le16 s_segment_usage_size; /* Size of a segment usage */
-
-/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
-/*A8*/ char s_volume_name[80] /* volume name */
- __kernel_nonstring;
-
-/*F8*/ __le32 s_c_interval; /* Commit interval of segment */
- __le32 s_c_block_max; /*
- * Threshold of data amount for
- * the segment construction
- */
-/*100*/ __le64 s_feature_compat; /* Compatible feature set */
- __le64 s_feature_compat_ro; /* Read-only compatible feature set */
- __le64 s_feature_incompat; /* Incompatible feature set */
- __u32 s_reserved[186]; /* padding to the end of the block */
+/*00*/ __le32 s_rev_level;
+ __le16 s_minor_rev_level;
+ __le16 s_magic;
+
+ __le16 s_bytes;
+ __le16 s_flags;
+ __le32 s_crc_seed;
+/*10*/ __le32 s_sum;
+
+ __le32 s_log_block_size;
+ __le64 s_nsegments;
+/*20*/ __le64 s_dev_size;
+ __le64 s_first_data_block;
+/*30*/ __le32 s_blocks_per_segment;
+ __le32 s_r_segments_percentage;
+
+ __le64 s_last_cno;
+/*40*/ __le64 s_last_pseg;
+ __le64 s_last_seq;
+/*50*/ __le64 s_free_blocks_count;
+
+ __le64 s_ctime;
+/*60*/ __le64 s_mtime;
+ __le64 s_wtime;
+/*70*/ __le16 s_mnt_count;
+ __le16 s_max_mnt_count;
+ __le16 s_state;
+ __le16 s_errors;
+ __le64 s_lastcheck;
+
+/*80*/ __le32 s_checkinterval;
+ __le32 s_creator_os;
+ __le16 s_def_resuid;
+ __le16 s_def_resgid;
+ __le32 s_first_ino;
+
+/*90*/ __le16 s_inode_size;
+ __le16 s_dat_entry_size;
+ __le16 s_checkpoint_size;
+ __le16 s_segment_usage_size;
+
+/*98*/ __u8 s_uuid[16];
+/*A8*/ char s_volume_name[80] __kernel_nonstring;
+
+/*F8*/ __le32 s_c_interval;
+ __le32 s_c_block_max;
+
+/*100*/ __le64 s_feature_compat;
+ __le64 s_feature_compat_ro;
+ __le64 s_feature_incompat;
+ __u32 s_reserved[186];
};
/*
@@ -449,7 +480,7 @@ struct nilfs_btree_node {
/**
* struct nilfs_direct_node - header of built-in bmap array
* @dn_flags: flags
- * @dn_pad: padding
+ * @pad: padding
*/
struct nilfs_direct_node {
__u8 dn_flags;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 8433bac48112..b63f71850906 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2025 Intel Corporation
+ * Copyright (C) 2018-2026 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -2974,6 +2974,16 @@ enum nl80211_commands {
* primary channel is 2 MHz wide, and the control channel designates
* the 1 MHz primary subchannel within that 2 MHz primary.
*
+ * @NL80211_ATTR_EPP_PEER: A flag attribute to indicate if the peer is an EPP
+ * STA. Used with %NL80211_CMD_NEW_STA and %NL80211_CMD_ADD_LINK_STA
+ *
+ * @NL80211_ATTR_UHR_CAPABILITY: UHR Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION). Can be set
+ * only if HE/EHT are also available.
+ * @NL80211_ATTR_DISABLE_UHR: Force UHR capable interfaces to disable
+ * this feature during association. This is a flag attribute.
+ * Currently only supported in mac80211 drivers.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3542,6 +3552,11 @@ enum nl80211_attrs {
NL80211_ATTR_S1G_PRIMARY_2MHZ,
+ NL80211_ATTR_EPP_PEER,
+
+ NL80211_ATTR_UHR_CAPABILITY,
+ NL80211_ATTR_DISABLE_UHR,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3894,6 +3909,12 @@ enum nl80211_eht_ru_alloc {
* @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate
* @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate
* @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate
+ * @NL80211_RATE_INFO_UHR_MCS: UHR MCS index (u8, 0-15, 17, 19, 20, 23)
+ * Note that the other EHT attributes (such as @NL80211_RATE_INFO_EHT_NSS)
+ * are used in conjunction with this where applicable
+ * @NL80211_RATE_INFO_UHR_ELR: UHR ELR flag, which restricts NSS to 1,
+ * MCS to 0 or 1, and GI to %NL80211_RATE_INFO_EHT_GI_1_6.
+ * @NL80211_RATE_INFO_UHR_IM: UHR Interference Mitigation flag
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -3927,6 +3948,9 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_4_MHZ_WIDTH,
NL80211_RATE_INFO_8_MHZ_WIDTH,
NL80211_RATE_INFO_16_MHZ_WIDTH,
+ NL80211_RATE_INFO_UHR_MCS,
+ NL80211_RATE_INFO_UHR_ELR,
+ NL80211_RATE_INFO_UHR_IM,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -4249,6 +4273,10 @@ enum nl80211_mpath_info {
* capabilities element
* @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as
* defined in EHT capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_UHR_CAP_MAC: UHR MAC capabilities as in UHR
+ * capabilities element
+ * @NL80211_BAND_IFTYPE_ATTR_UHR_CAP_PHY: UHR PHY capabilities as in UHR
+ * capabilities element
* @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
* @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined
*/
@@ -4266,6 +4294,8 @@ enum nl80211_band_iftype_attr {
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY,
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET,
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE,
+ NL80211_BAND_IFTYPE_ATTR_UHR_CAP_MAC,
+ NL80211_BAND_IFTYPE_ATTR_UHR_CAP_PHY,
/* keep last */
__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -4445,6 +4475,11 @@ enum nl80211_wmm_rule {
* channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_16MHZ: 16 MHz operation is not allowed on this
* channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_S1G_NO_PRIMARY: Channel is not permitted for use
+ * as a primary channel. Does not prevent the channel from existing
+ * as a non-primary subchannel. Only applicable to S1G channels.
+ * @NL80211_FREQUENCY_ATTR_NO_UHR: UHR operation is not allowed on this channel
+ * in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4493,6 +4528,8 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_4MHZ,
NL80211_FREQUENCY_ATTR_NO_8MHZ,
NL80211_FREQUENCY_ATTR_NO_16MHZ,
+ NL80211_FREQUENCY_ATTR_S1G_NO_PRIMARY,
+ NL80211_FREQUENCY_ATTR_NO_UHR,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4706,6 +4743,7 @@ enum nl80211_sched_scan_match_attr {
* despite NO_IR configuration.
* @NL80211_RRF_ALLOW_20MHZ_ACTIVITY: Allow activity in 20 MHz bandwidth,
* despite NO_IR configuration.
+ * @NL80211_RRF_NO_UHR: UHR operation not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1 << 0,
@@ -4732,6 +4770,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1 << 23,
NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1 << 24,
NL80211_RRF_ALLOW_20MHZ_ACTIVITY = 1 << 25,
+ NL80211_RRF_NO_UHR = 1 << 26,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -5426,6 +5465,7 @@ enum nl80211_bss_status {
* @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key
* @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
* @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
+ * @NL80211_AUTHTYPE_EPPKE: Enhanced Privacy Protection Key Exchange
* @__NL80211_AUTHTYPE_NUM: internal
* @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
* @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -5441,6 +5481,7 @@ enum nl80211_auth_type {
NL80211_AUTHTYPE_FILS_SK,
NL80211_AUTHTYPE_FILS_SK_PFS,
NL80211_AUTHTYPE_FILS_PK,
+ NL80211_AUTHTYPE_EPPKE,
/* keep last */
__NL80211_AUTHTYPE_NUM,
@@ -6745,6 +6786,15 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_BEACON_RATE_EHT: Driver supports beacon rate
* configuration (AP/mesh) with EHT rates.
*
+ * @NL80211_EXT_FEATURE_EPPKE: Driver supports Enhanced Privacy Protection
+ * Key Exchange (EPPKE) with user space SME (NL80211_CMD_AUTHENTICATE)
+ * in non-AP STA mode.
+ *
+ * @NL80211_EXT_FEATURE_ASSOC_FRAME_ENCRYPTION: This specifies that the
+ * driver supports encryption of (Re)Association Request and Response
+ * frames in both non‑AP STA and AP mode as specified in
+ * "IEEE P802.11bi/D3.0, 12.16.6".
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6821,6 +6871,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_DFS_CONCURRENT,
NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
NL80211_EXT_FEATURE_BEACON_RATE_EHT,
+ NL80211_EXT_FEATURE_EPPKE,
+ NL80211_EXT_FEATURE_ASSOC_FRAME_ENCRYPTION,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -7433,6 +7485,8 @@ enum nl80211_nan_band_conf_attributes {
* address that can take values from 50-6F-9A-01-00-00 to
* 50-6F-9A-01-FF-FF. This attribute is optional. If not present,
* a random Cluster ID will be chosen.
+ * This attribute will be ignored in NL80211_CMD_CHANGE_NAN_CONFIG
+ * since after NAN was started, the cluster ID can no longer change.
* @NL80211_NAN_CONF_EXTRA_ATTRS: Additional NAN attributes to be
* published in the beacons. This is an optional byte array.
* @NL80211_NAN_CONF_VENDOR_ELEMS: Vendor-specific elements that will
@@ -7767,6 +7821,30 @@ enum nl80211_peer_measurement_attrs {
* trigger based ranging measurement is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
* if non-trigger-based ranging measurement is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_6GHZ_SUPPORT: flag attribute indicating if
+ * ranging on the 6 GHz band is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_LTF_REP: u32 attribute indicating
+ * the maximum number of LTF repetitions the device can transmit in the
+ * preamble of the ranging NDP (zero means only one LTF, no repetitions)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_LTF_REP: u32 attribute indicating
+ * the maximum number of LTF repetitions the device can receive in the
+ * preamble of the ranging NDP (zero means only one LTF, no repetitions)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_STS: u32 attribute indicating
+ * the maximum number of space-time streams supported for ranging NDP TX
+ * (zero-based)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_STS: u32 attribute indicating
+ * the maximum number of space-time streams supported for ranging NDP RX
+ * (zero-based)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_TX: u32 attribute indicating the
+ * maximum total number of LTFs the device can transmit. The total number
+ * of LTFs is (number of LTF repetitions) * (number of space-time streams).
+ * This limits the allowed combinations of LTF repetitions and STS.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_RX: u32 attribute indicating the
+ * maximum total number of LTFs the device can receive. The total number
+ * of LTFs is (number of LTF repetitions) * (number of space-time streams).
+ * This limits the allowed combinations of LTF repetitions and STS.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_RSTA_SUPPORT: flag attribute indicating the
+ * device supports operating as the RSTA in PMSR FTM request
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -7784,6 +7862,14 @@ enum nl80211_peer_measurement_ftm_capa {
NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED,
NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED,
+ NL80211_PMSR_FTM_CAPA_ATTR_6GHZ_SUPPORT,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_LTF_REP,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_LTF_REP,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_STS,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_STS,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_TX,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_RX,
+ NL80211_PMSR_FTM_CAPA_ATTR_RSTA_SUPPORT,
/* keep last */
NUM_NL80211_PMSR_FTM_CAPA_ATTR,
@@ -7799,12 +7885,15 @@ enum nl80211_peer_measurement_ftm_capa {
* &enum nl80211_preamble), optional for DMG (u32)
* @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in
* 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element"
- * (u8, 0-15, optional with default 15 i.e. "no preference")
+ * (u8, 0-15, optional with default 15 i.e. "no preference". No limit for
+ * non-EDCA ranging)
* @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units
* of 100ms (u16, optional with default 0)
* @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016
* Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with
- * default 15 i.e. "no preference")
+ * default 15 i.e. "no preference"). For non-EDCA ranging, this is the
+ * burst duration in milliseconds (optional with default 0, i.e. let the
+ * device decide).
* @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames
* requested per burst
* (u8, 0-31, optional with default 0 i.e. "no preference")
@@ -7833,6 +7922,14 @@ enum nl80211_peer_measurement_ftm_capa {
* @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the
* responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
* or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set.
+ * @NL80211_PMSR_FTM_REQ_ATTR_RSTA: optional. Request to perform the measurement
+ * as the RSTA (flag). When set, the device is expected to dwell on the
+ * channel specified in %NL80211_PMSR_PEER_ATTR_CHAN until it receives the
+ * FTM request from the peer or the timeout specified by
+ * %NL80211_ATTR_TIMEOUT has expired.
+ * Only valid if %NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK is set (so the
+ * RSTA will have the measurement results to report back in the FTM
+ * response).
*
* @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
* @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -7853,6 +7950,7 @@ enum nl80211_peer_measurement_ftm_req {
NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK,
NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR,
+ NL80211_PMSR_FTM_REQ_ATTR_RSTA,
/* keep last */
NUM_NL80211_PMSR_FTM_REQ_ATTR,
@@ -7937,6 +8035,8 @@ enum nl80211_peer_measurement_ftm_failure_reasons {
* 9.4.2.22.1) starting with the Measurement Token, with Measurement
* Type 11.
* @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_PERIOD: actual burst period used by
+ * the responder (similar to request, u16)
*
* @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal
* @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number
@@ -7965,6 +8065,7 @@ enum nl80211_peer_measurement_ftm_resp {
NL80211_PMSR_FTM_RESP_ATTR_LCI,
NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
NL80211_PMSR_FTM_RESP_ATTR_PAD,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_PERIOD,
/* keep last */
NUM_NL80211_PMSR_FTM_RESP_ATTR,
diff --git a/include/uapi/linux/pci.h b/include/uapi/linux/pci.h
index a769eefc5139..4f150028965d 100644
--- a/include/uapi/linux/pci.h
+++ b/include/uapi/linux/pci.h
@@ -39,4 +39,11 @@
#define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */
#define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */
+enum pci_hotplug_event {
+ PCI_HOTPLUG_LINK_UP,
+ PCI_HOTPLUG_LINK_DOWN,
+ PCI_HOTPLUG_CARD_PRESENT,
+ PCI_HOTPLUG_CARD_NOT_PRESENT,
+};
+
#endif /* _UAPILINUX_PCI_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 3add74ae2594..ec1c54b5a310 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -132,6 +132,11 @@
#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
+/* Masks for dword-sized processing of Bus Number and Sec Latency Timer fields */
+#define PCI_PRIMARY_BUS_MASK 0x000000ff
+#define PCI_SECONDARY_BUS_MASK 0x0000ff00
+#define PCI_SUBORDINATE_BUS_MASK 0x00ff0000
+#define PCI_SEC_LATENCY_TIMER_MASK 0xff000000
#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
#define PCI_IO_LIMIT 0x1d
#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
@@ -1253,11 +1258,6 @@
#define PCI_DEV3_STA 0x0c /* Device 3 Status Register */
#define PCI_DEV3_STA_SEGMENT 0x8 /* Segment Captured (end-to-end flit-mode detected) */
-/* Compute Express Link (CXL r3.1, sec 8.1.5) */
-#define PCI_DVSEC_CXL_PORT 3
-#define PCI_DVSEC_CXL_PORT_CTL 0x0c
-#define PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR 0x00000001
-
/* Integrity and Data Encryption Extended Capability */
#define PCI_IDE_CAP 0x04
#define PCI_IDE_CAP_LINK 0x1 /* Link IDE Stream Supported */
@@ -1338,4 +1338,63 @@
#define PCI_IDE_SEL_ADDR_3(x) (28 + (x) * PCI_IDE_SEL_ADDR_BLOCK_SIZE)
#define PCI_IDE_SEL_BLOCK_SIZE(nr_assoc) (20 + PCI_IDE_SEL_ADDR_BLOCK_SIZE * (nr_assoc))
+/*
+ * Compute Express Link (CXL r4.0, sec 8.1)
+ *
+ * Note that CXL DVSEC id 3 and 7 to be ignored when the CXL link state
+ * is "disconnected" (CXL r4.0, sec 9.12.3). Re-enumerate these
+ * registers on downstream link-up events.
+ */
+
+/* CXL r4.0, 8.1.3: PCIe DVSEC for CXL Device */
+#define PCI_DVSEC_CXL_DEVICE 0
+#define PCI_DVSEC_CXL_CAP 0xA
+#define PCI_DVSEC_CXL_MEM_CAPABLE _BITUL(2)
+#define PCI_DVSEC_CXL_HDM_COUNT __GENMASK(5, 4)
+#define PCI_DVSEC_CXL_CTRL 0xC
+#define PCI_DVSEC_CXL_MEM_ENABLE _BITUL(2)
+#define PCI_DVSEC_CXL_RANGE_SIZE_HIGH(i) (0x18 + (i * 0x10))
+#define PCI_DVSEC_CXL_RANGE_SIZE_LOW(i) (0x1C + (i * 0x10))
+#define PCI_DVSEC_CXL_MEM_INFO_VALID _BITUL(0)
+#define PCI_DVSEC_CXL_MEM_ACTIVE _BITUL(1)
+#define PCI_DVSEC_CXL_MEM_SIZE_LOW __GENMASK(31, 28)
+#define PCI_DVSEC_CXL_RANGE_BASE_HIGH(i) (0x20 + (i * 0x10))
+#define PCI_DVSEC_CXL_RANGE_BASE_LOW(i) (0x24 + (i * 0x10))
+#define PCI_DVSEC_CXL_MEM_BASE_LOW __GENMASK(31, 28)
+
+#define CXL_DVSEC_RANGE_MAX 2
+
+/* CXL r4.0, 8.1.4: Non-CXL Function Map DVSEC */
+#define PCI_DVSEC_CXL_FUNCTION_MAP 2
+
+/* CXL r4.0, 8.1.5: Extensions DVSEC for Ports */
+#define PCI_DVSEC_CXL_PORT 3
+#define PCI_DVSEC_CXL_PORT_CTL 0x0c
+#define PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR 0x00000001
+
+/* CXL r4.0, 8.1.6: GPF DVSEC for CXL Port */
+#define PCI_DVSEC_CXL_PORT_GPF 4
+#define PCI_DVSEC_CXL_PORT_GPF_PHASE_1_CONTROL 0x0C
+#define PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_BASE __GENMASK(3, 0)
+#define PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_SCALE __GENMASK(11, 8)
+#define PCI_DVSEC_CXL_PORT_GPF_PHASE_2_CONTROL 0xE
+#define PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_BASE __GENMASK(3, 0)
+#define PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_SCALE __GENMASK(11, 8)
+
+/* CXL r4.0, 8.1.7: GPF DVSEC for CXL Device */
+#define PCI_DVSEC_CXL_DEVICE_GPF 5
+
+/* CXL r4.0, 8.1.8: Flex Bus DVSEC */
+#define PCI_DVSEC_CXL_FLEXBUS_PORT 7
+#define PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS 0xE
+#define PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS_CACHE _BITUL(0)
+#define PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS_MEM _BITUL(2)
+
+/* CXL r4.0, 8.1.9: Register Locator DVSEC */
+#define PCI_DVSEC_CXL_REG_LOCATOR 8
+#define PCI_DVSEC_CXL_REG_LOCATOR_BLOCK1 0xC
+#define PCI_DVSEC_CXL_REG_LOCATOR_BIR __GENMASK(2, 0)
+#define PCI_DVSEC_CXL_REG_LOCATOR_BLOCK_ID __GENMASK(15, 8)
+#define PCI_DVSEC_CXL_REG_LOCATOR_BLOCK_OFF_LOW __GENMASK(31, 16)
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index d6023a45a9d0..710f8842223f 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -22,6 +22,7 @@
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
#define PCITEST_BARS _IO('P', 0xa)
#define PCITEST_DOORBELL _IO('P', 0xb)
+#define PCITEST_BAR_SUBRANGE _IO('P', 0xc)
#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
#define PCITEST_IRQ_TYPE_UNDEFINED -1
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 72f03153dd32..fd10aa8d697f 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1330,14 +1330,16 @@ union perf_mem_data_src {
mem_snoopx : 2, /* Snoop mode, ext */
mem_blk : 3, /* Access blocked */
mem_hops : 3, /* Hop level */
- mem_rsvd : 18;
+ mem_region : 5, /* cache/memory regions */
+ mem_rsvd : 13;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd : 18,
+ __u64 mem_rsvd : 13,
+ mem_region : 5, /* cache/memory regions */
mem_hops : 3, /* Hop level */
mem_blk : 3, /* Access blocked */
mem_snoopx : 2, /* Snoop mode, ext */
@@ -1394,7 +1396,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
-/* 0x007 available */
+#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
@@ -1447,6 +1449,25 @@ union perf_mem_data_src {
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
+/* Cache/Memory region */
+#define PERF_MEM_REGION_NA 0x0 /* Invalid */
+#define PERF_MEM_REGION_RSVD 0x01 /* Reserved */
+#define PERF_MEM_REGION_L_SHARE 0x02 /* Local CA shared cache */
+#define PERF_MEM_REGION_L_NON_SHARE 0x03 /* Local CA non-shared cache */
+#define PERF_MEM_REGION_O_IO 0x04 /* Other CA IO agent */
+#define PERF_MEM_REGION_O_SHARE 0x05 /* Other CA shared cache */
+#define PERF_MEM_REGION_O_NON_SHARE 0x06 /* Other CA non-shared cache */
+#define PERF_MEM_REGION_MMIO 0x07 /* MMIO */
+#define PERF_MEM_REGION_MEM0 0x08 /* Memory region 0 */
+#define PERF_MEM_REGION_MEM1 0x09 /* Memory region 1 */
+#define PERF_MEM_REGION_MEM2 0x0a /* Memory region 2 */
+#define PERF_MEM_REGION_MEM3 0x0b /* Memory region 3 */
+#define PERF_MEM_REGION_MEM4 0x0c /* Memory region 4 */
+#define PERF_MEM_REGION_MEM5 0x0d /* Memory region 5 */
+#define PERF_MEM_REGION_MEM6 0x0e /* Memory region 6 */
+#define PERF_MEM_REGION_MEM7 0x0f /* Memory region 7 */
+#define PERF_MEM_REGION_SHIFT 46
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index c2da76e78bad..66e8072f44df 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1036,6 +1036,7 @@ enum {
TCA_CAKE_STATS_DROP_NEXT_US,
TCA_CAKE_STATS_P_DROP,
TCA_CAKE_STATS_BLUE_TIMER_US,
+ TCA_CAKE_STATS_ACTIVE_QUEUES,
__TCA_CAKE_STATS_MAX
};
#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 51c4e8c82b1e..55b0446fff9d 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -386,4 +386,41 @@ struct prctl_mm_map {
# define PR_FUTEX_HASH_SET_SLOTS 1
# define PR_FUTEX_HASH_GET_SLOTS 2
+/* RSEQ time slice extensions */
+#define PR_RSEQ_SLICE_EXTENSION 79
+# define PR_RSEQ_SLICE_EXTENSION_GET 1
+# define PR_RSEQ_SLICE_EXTENSION_SET 2
+/*
+ * Bits for RSEQ_SLICE_EXTENSION_GET/SET
+ * PR_RSEQ_SLICE_EXT_ENABLE: Enable
+ */
+# define PR_RSEQ_SLICE_EXT_ENABLE 0x01
+
+/*
+ * Get the current indirect branch tracking configuration for the current
+ * thread, this will be the value configured via PR_SET_INDIR_BR_LP_STATUS.
+ */
+#define PR_GET_INDIR_BR_LP_STATUS 80
+
+/*
+ * Set the indirect branch tracking configuration. PR_INDIR_BR_LP_ENABLE will
+ * enable cpu feature for user thread, to track all indirect branches and ensure
+ * they land on arch defined landing pad instruction.
+ * x86 - If enabled, an indirect branch must land on an ENDBRANCH instruction.
+ * arch64 - If enabled, an indirect branch must land on a BTI instruction.
+ * riscv - If enabled, an indirect branch must land on an lpad instruction.
+ * PR_INDIR_BR_LP_DISABLE will disable feature for user thread and indirect
+ * branches will no more be tracked by cpu to land on arch defined landing pad
+ * instruction.
+ */
+#define PR_SET_INDIR_BR_LP_STATUS 81
+# define PR_INDIR_BR_LP_ENABLE (1UL << 0)
+
+/*
+ * Prevent further changes to the specified indirect branch tracking
+ * configuration. All bits may be locked via this call, including
+ * undefined bits.
+ */
+#define PR_LOCK_INDIR_BR_LP_STATUS 82
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index 1b76d508400c..863c4a00a66b 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -19,13 +19,20 @@ enum rseq_cpu_id_state {
};
enum rseq_flags {
- RSEQ_FLAG_UNREGISTER = (1 << 0),
+ RSEQ_FLAG_UNREGISTER = (1 << 0),
+ RSEQ_FLAG_SLICE_EXT_DEFAULT_ON = (1 << 1),
};
enum rseq_cs_flags_bit {
+ /* Historical and unsupported bits */
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
+ /* (3) Intentional gap to put new bits into a separate byte */
+
+ /* User read only feature flags */
+ RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE_BIT = 4,
+ RSEQ_CS_FLAG_SLICE_EXT_ENABLED_BIT = 5,
};
enum rseq_cs_flags {
@@ -35,6 +42,11 @@ enum rseq_cs_flags {
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
+
+ RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE =
+ (1U << RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE_BIT),
+ RSEQ_CS_FLAG_SLICE_EXT_ENABLED =
+ (1U << RSEQ_CS_FLAG_SLICE_EXT_ENABLED_BIT),
};
/*
@@ -53,6 +65,27 @@ struct rseq_cs {
__u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
+/**
+ * rseq_slice_ctrl - Time slice extension control structure
+ * @all: Compound value
+ * @request: Request for a time slice extension
+ * @granted: Granted time slice extension
+ *
+ * @request is set by user space and can be cleared by user space or kernel
+ * space. @granted is set and cleared by the kernel and must only be read
+ * by user space.
+ */
+struct rseq_slice_ctrl {
+ union {
+ __u32 all;
+ struct {
+ __u8 request;
+ __u8 granted;
+ __u16 __reserved;
+ };
+ };
+};
+
/*
* struct rseq is aligned on 4 * 8 bytes to ensure it is always
* contained within a single cache-line.
@@ -142,6 +175,12 @@ struct rseq {
__u32 mm_cid;
/*
+ * Time slice extension control structure. CPU local updates from
+ * kernel and user space.
+ */
+ struct rseq_slice_ctrl slice_ctrl;
+
+ /*
* Flexible array member at end of structure, after last feature field.
*/
char end[];
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index 8d1f17a4e08e..7269f9f402e3 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -5,9 +5,6 @@
#include <linux/ipc.h>
#include <linux/errno.h>
#include <asm-generic/hugetlb_encode.h>
-#ifndef __KERNEL__
-#include <unistd.h>
-#endif
/*
* SHMMNI, SHMMAX and SHMALL are default upper limits which can be
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 9a28f7d9a334..111b097ec00b 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -72,6 +72,10 @@
#define __counted_by_be(m)
#endif
+#ifndef __counted_by_ptr
+#define __counted_by_ptr(m)
+#endif
+
#ifdef __KERNEL__
#define __kernel_nonstring __nonstring
#else
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 63d1464cb71c..bda516064174 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -92,7 +92,6 @@ enum
KERN_DOMAINNAME=8, /* string: domainname */
KERN_PANIC=15, /* int: panic timeout */
- KERN_REALROOTDEV=16, /* real root device to mount after initrd */
KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
@@ -183,7 +182,7 @@ enum
VM_LOWMEM_RESERVE_RATIO=20,/* reservation ratio for lower memory zones */
VM_MIN_FREE_KBYTES=21, /* Minimum free kilobytes to maintain */
VM_MAX_MAP_COUNT=22, /* int: Maximum number of mmaps/address-space */
- VM_LAPTOP_MODE=23, /* vm laptop mode */
+
VM_BLOCK_DUMP=24, /* block dump mode */
VM_HUGETLB_GROUP=25, /* permitted hugetlb group */
VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 5929030d4e8b..3ae25f3ce067 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -18,6 +18,7 @@
#define _LINUX_TASKSTATS_H
#include <linux/types.h>
+#include <linux/time_types.h>
/* Format for per-task data returned to userland when
* - a task exits
@@ -34,7 +35,7 @@
*/
-#define TASKSTATS_VERSION 16
+#define TASKSTATS_VERSION 17
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -230,6 +231,16 @@ struct taskstats {
__u64 irq_delay_max;
__u64 irq_delay_min;
+
+ /*v17: delay max timestamp record*/
+ struct __kernel_timespec cpu_delay_max_ts;
+ struct __kernel_timespec blkio_delay_max_ts;
+ struct __kernel_timespec swapin_delay_max_ts;
+ struct __kernel_timespec freepages_delay_max_ts;
+ struct __kernel_timespec thrashing_delay_max_ts;
+ struct __kernel_timespec compact_delay_max_ts;
+ struct __kernel_timespec wpcopy_delay_max_ts;
+ struct __kernel_timespec irq_delay_max_ts;
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index dce3113787a7..03772dd4d399 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -226,6 +226,24 @@ enum tcp_ca_state {
#define TCPF_CA_Loss (1<<TCP_CA_Loss)
};
+/* Values for tcpi_ecn_mode after negotiation */
+#define TCPI_ECN_MODE_DISABLED 0x0
+#define TCPI_ECN_MODE_RFC3168 0x1
+#define TCPI_ECN_MODE_ACCECN 0x2
+#define TCPI_ECN_MODE_PENDING 0x3
+
+/* Values for accecn_opt_seen */
+#define TCP_ACCECN_OPT_NOT_SEEN 0x0
+#define TCP_ACCECN_OPT_EMPTY_SEEN 0x1
+#define TCP_ACCECN_OPT_COUNTER_SEEN 0x2
+#define TCP_ACCECN_OPT_FAIL_SEEN 0x3
+
+/* Values for accecn_fail_mode */
+#define TCP_ACCECN_ACE_FAIL_SEND BIT(0)
+#define TCP_ACCECN_ACE_FAIL_RECV BIT(1)
+#define TCP_ACCECN_OPT_FAIL_SEND BIT(2)
+#define TCP_ACCECN_OPT_FAIL_RECV BIT(3)
+
struct tcp_info {
__u8 tcpi_state;
__u8 tcpi_ca_state;
@@ -316,15 +334,17 @@ struct tcp_info {
* in milliseconds, including any
* unfinished recovery.
*/
- __u32 tcpi_received_ce; /* # of CE marks received */
+ __u32 tcpi_received_ce; /* # of CE marked segments received */
__u32 tcpi_delivered_e1_bytes; /* Accurate ECN byte counters */
__u32 tcpi_delivered_e0_bytes;
__u32 tcpi_delivered_ce_bytes;
__u32 tcpi_received_e1_bytes;
__u32 tcpi_received_e0_bytes;
__u32 tcpi_received_ce_bytes;
- __u16 tcpi_accecn_fail_mode;
- __u16 tcpi_accecn_opt_seen;
+ __u32 tcpi_ecn_mode:2,
+ tcpi_accecn_opt_seen:2,
+ tcpi_accecn_fail_mode:4,
+ tcpi_options2:24;
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
diff --git a/include/uapi/linux/typelimits.h b/include/uapi/linux/typelimits.h
new file mode 100644
index 000000000000..8166c639b518
--- /dev/null
+++ b/include/uapi/linux/typelimits.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_TYPELIMITS_H
+#define _UAPI_LINUX_TYPELIMITS_H
+
+#define __KERNEL_INT_MAX ((int)(~0U >> 1))
+#define __KERNEL_INT_MIN (-__KERNEL_INT_MAX - 1)
+
+#endif /* _UAPI_LINUX_TYPELIMITS_H */
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index ec77dabba45b..a88876756805 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -55,7 +55,8 @@
_IOWR('u', 0x15, struct ublksrv_ctrl_cmd)
#define UBLK_U_CMD_QUIESCE_DEV \
_IOWR('u', 0x16, struct ublksrv_ctrl_cmd)
-
+#define UBLK_U_CMD_TRY_STOP_DEV \
+ _IOWR('u', 0x17, struct ublksrv_ctrl_cmd)
/*
* 64bits are enough now, and it should be easy to extend in case of
* running out of feature flags
@@ -103,6 +104,30 @@
#define UBLK_U_IO_UNREGISTER_IO_BUF \
_IOWR('u', 0x24, struct ublksrv_io_cmd)
+/*
+ * return 0 if the command is run successfully, otherwise failure code
+ * is returned
+ */
+#define UBLK_U_IO_PREP_IO_CMDS \
+ _IOWR('u', 0x25, struct ublk_batch_io)
+/*
+ * If failure code is returned, nothing in the command buffer is handled.
+ * Otherwise, the returned value means how many bytes in command buffer
+ * are handled actually, then number of handled IOs can be calculated with
+ * `elem_bytes` for each IO. IOs in the remained bytes are not committed,
+ * userspace has to check return value for dealing with partial committing
+ * correctly.
+ */
+#define UBLK_U_IO_COMMIT_IO_CMDS \
+ _IOWR('u', 0x26, struct ublk_batch_io)
+
+/*
+ * Fetch io commands to provided buffer in multishot style,
+ * `IORING_URING_CMD_MULTISHOT` is required for this command.
+ */
+#define UBLK_U_IO_FETCH_IO_CMDS \
+ _IOWR('u', 0x27, struct ublk_batch_io)
+
/* only ABORT means that no re-fetch */
#define UBLK_IO_RES_OK 0
#define UBLK_IO_RES_NEED_GET_DATA 1
@@ -134,6 +159,10 @@
#define UBLKSRV_IO_BUF_TOTAL_BITS (UBLK_QID_OFF + UBLK_QID_BITS)
#define UBLKSRV_IO_BUF_TOTAL_SIZE (1ULL << UBLKSRV_IO_BUF_TOTAL_BITS)
+/* Copy to/from request integrity buffer instead of data buffer */
+#define UBLK_INTEGRITY_FLAG_OFF 62
+#define UBLKSRV_IO_INTEGRITY_FLAG (1ULL << UBLK_INTEGRITY_FLAG_OFF)
+
/*
* ublk server can register data buffers for incoming I/O requests with a sparse
* io_uring buffer table. The request buffer can then be used as the data buffer
@@ -311,6 +340,36 @@
*/
#define UBLK_F_BUF_REG_OFF_DAEMON (1ULL << 14)
+/*
+ * Support the following commands for delivering & committing io command
+ * in batch.
+ *
+ * - UBLK_U_IO_PREP_IO_CMDS
+ * - UBLK_U_IO_COMMIT_IO_CMDS
+ * - UBLK_U_IO_FETCH_IO_CMDS
+ * - UBLK_U_IO_REGISTER_IO_BUF
+ * - UBLK_U_IO_UNREGISTER_IO_BUF
+ *
+ * The existing UBLK_U_IO_FETCH_REQ, UBLK_U_IO_COMMIT_AND_FETCH_REQ and
+ * UBLK_U_IO_NEED_GET_DATA uring_cmd are not supported for this feature.
+ */
+#define UBLK_F_BATCH_IO (1ULL << 15)
+
+/*
+ * ublk device supports requests with integrity/metadata buffer.
+ * Requires UBLK_F_USER_COPY.
+ */
+#define UBLK_F_INTEGRITY (1ULL << 16)
+
+/*
+ * The device supports the UBLK_CMD_TRY_STOP_DEV command, which
+ * allows stopping the device only if there are no openers.
+ */
+#define UBLK_F_SAFE_STOP_DEV (1ULL << 17)
+
+/* Disable automatic partition scanning when device is started */
+#define UBLK_F_NO_AUTO_PART_SCAN (1ULL << 18)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -408,6 +467,8 @@ struct ublksrv_ctrl_dev_info {
* passed in.
*/
#define UBLK_IO_F_NEED_REG_BUF (1U << 17)
+/* Request has an integrity data buffer */
+#define UBLK_IO_F_INTEGRITY (1UL << 18)
/*
* io cmd is described by this structure, and stored in share memory, indexed
@@ -525,6 +586,51 @@ struct ublksrv_io_cmd {
};
};
+struct ublk_elem_header {
+ __u16 tag; /* IO tag */
+
+ /*
+ * Buffer index for incoming io command, only valid iff
+ * UBLK_F_AUTO_BUF_REG is set
+ */
+ __u16 buf_index;
+ __s32 result; /* I/O completion result (commit only) */
+};
+
+/*
+ * uring_cmd buffer structure for batch commands
+ *
+ * buffer includes multiple elements, which number is specified by
+ * `nr_elem`. Each element buffer is organized in the following order:
+ *
+ * struct ublk_elem_buffer {
+ * // Mandatory fields (8 bytes)
+ * struct ublk_elem_header header;
+ *
+ * // Optional fields (8 bytes each, included based on flags)
+ *
+ * // Buffer address (if UBLK_BATCH_F_HAS_BUF_ADDR) for copying data
+ * // between ublk request and ublk server buffer
+ * __u64 buf_addr;
+ *
+ * // returned Zone append LBA (if UBLK_BATCH_F_HAS_ZONE_LBA)
+ * __u64 zone_lba;
+ * }
+ *
+ * Used for `UBLK_U_IO_PREP_IO_CMDS` and `UBLK_U_IO_COMMIT_IO_CMDS`
+ */
+struct ublk_batch_io {
+ __u16 q_id;
+#define UBLK_BATCH_F_HAS_ZONE_LBA (1 << 0)
+#define UBLK_BATCH_F_HAS_BUF_ADDR (1 << 1)
+#define UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK (1 << 2)
+ __u16 flags;
+ __u16 nr_elem;
+ __u8 elem_bytes;
+ __u8 reserved;
+ __u64 reserved2;
+};
+
struct ublk_param_basic {
#define UBLK_ATTR_READ_ONLY (1 << 0)
#define UBLK_ATTR_ROTATIONAL (1 << 1)
@@ -600,6 +706,17 @@ struct ublk_param_segment {
__u8 pad[2];
};
+struct ublk_param_integrity {
+ __u32 flags; /* LBMD_PI_CAP_* from linux/fs.h */
+ __u16 max_integrity_segments; /* 0 means no limit */
+ __u8 interval_exp;
+ __u8 metadata_size; /* UBLK_PARAM_TYPE_INTEGRITY requires nonzero */
+ __u8 pi_offset;
+ __u8 csum_type; /* LBMD_PI_CSUM_* from linux/fs.h */
+ __u8 tag_size;
+ __u8 pad[5];
+};
+
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -614,6 +731,7 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_ZONED (1 << 3)
#define UBLK_PARAM_TYPE_DMA_ALIGN (1 << 4)
#define UBLK_PARAM_TYPE_SEGMENT (1 << 5)
+#define UBLK_PARAM_TYPE_INTEGRITY (1 << 6) /* requires UBLK_F_INTEGRITY */
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
@@ -622,6 +740,7 @@ struct ublk_params {
struct ublk_param_zoned zoned;
struct ublk_param_dma_align dma;
struct ublk_param_segment seg;
+ struct ublk_param_integrity integrity;
};
#endif
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f84ed133a6c9..68dd0c4e47b2 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1192,6 +1192,8 @@ enum v4l2_flash_strobe_source {
#define V4L2_CID_FLASH_CHARGE (V4L2_CID_FLASH_CLASS_BASE + 11)
#define V4L2_CID_FLASH_READY (V4L2_CID_FLASH_CLASS_BASE + 12)
+#define V4L2_CID_FLASH_DURATION (V4L2_CID_FLASH_CLASS_BASE + 13)
+#define V4L2_CID_FLASH_STROBE_OE (V4L2_CID_FLASH_CLASS_BASE + 14)
/* JPEG-class control IDs */
@@ -2099,6 +2101,8 @@ struct v4l2_ctrl_mpeg2_quantisation {
#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405)
#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406)
#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407)
+#define V4L2_CID_STATELESS_HEVC_EXT_SPS_ST_RPS (V4L2_CID_CODEC_STATELESS_BASE + 408)
+#define V4L2_CID_STATELESS_HEVC_EXT_SPS_LT_RPS (V4L2_CID_CODEC_STATELESS_BASE + 409)
enum v4l2_stateless_hevc_decode_mode {
V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
@@ -2554,6 +2558,65 @@ struct v4l2_ctrl_hevc_scaling_matrix {
__u8 scaling_list_dc_coef_32x32[2];
};
+#define V4L2_HEVC_EXT_SPS_ST_RPS_FLAG_INTER_REF_PIC_SET_PRED 0x1
+
+/*
+ * struct v4l2_ctrl_hevc_ext_sps_st_rps - HEVC short term RPS parameters
+ *
+ * Dynamic size 1-dimension array for short term RPS. The number of elements
+ * is v4l2_ctrl_hevc_sps::num_short_term_ref_pic_sets. It can contain up to 65 elements.
+ *
+ * @delta_idx_minus1: Specifies the delta compare to the index. See details in section 7.4.8
+ * "Short-term reference picture set semantics" of the specification.
+ * @delta_rps_sign: Sign of the delta as specified in section 7.4.8 "Short-term reference picture
+ * set semantics" of the specification.
+ * @abs_delta_rps_minus1: Absolute delta RPS as specified in section 7.4.8 "Short-term reference
+ * picture set semantics" of the specification.
+ * @num_negative_pics: Number of short-term RPS entries that have picture order count values less
+ * than the picture order count value of the current picture.
+ * @num_positive_pics: Number of short-term RPS entries that have picture order count values
+ * greater than the picture order count value of the current picture.
+ * @used_by_curr_pic: Bit j specifies if short-term RPS j is used by the current picture.
+ * @use_delta_flag: Bit j equals to 1 specifies that the j-th entry in the source candidate
+ * short-term RPS is included in this candidate short-term RPS.
+ * @delta_poc_s0_minus1: Specifies the negative picture order count delta for the i-th entry in
+ * the short-term RPS. See details in section 7.4.8 "Short-term reference
+ * picture set semantics" of the specification.
+ * @delta_poc_s1_minus1: Specifies the positive picture order count delta for the i-th entry in
+ * the short-term RPS. See details in section 7.4.8 "Short-term reference
+ * picture set semantics" of the specification.
+ * @flags: See V4L2_HEVC_EXT_SPS_ST_RPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_ext_sps_st_rps {
+ __u8 delta_idx_minus1;
+ __u8 delta_rps_sign;
+ __u8 num_negative_pics;
+ __u8 num_positive_pics;
+ __u32 used_by_curr_pic;
+ __u32 use_delta_flag;
+ __u16 abs_delta_rps_minus1;
+ __u16 delta_poc_s0_minus1[16];
+ __u16 delta_poc_s1_minus1[16];
+ __u16 flags;
+};
+
+#define V4L2_HEVC_EXT_SPS_LT_RPS_FLAG_USED_LT 0x1
+
+/*
+ * struct v4l2_ctrl_hevc_ext_sps_lt_rps - HEVC long term RPS parameters
+ *
+ * Dynamic size 1-dimension array for long term RPS. The number of elements
+ * is v4l2_ctrl_hevc_sps::num_long_term_ref_pics_sps. It can contain up to 65 elements.
+ *
+ * @lt_ref_pic_poc_lsb_sps: picture order count modulo MaxPicOrderCntLsb of the i-th candidate
+ * long-term reference picture.
+ * @flags: See V4L2_HEVC_EXT_SPS_LT_RPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_ext_sps_lt_rps {
+ __u16 lt_ref_pic_poc_lsb_sps;
+ __u16 flags;
+};
+
/* Stateless VP9 controls */
#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 6073858d52a2..11f3627c3729 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -236,7 +236,7 @@ struct vmmdev_hgcm_function_parameter32 {
/** Relative to the request header. */
__u32 offset;
} page_list;
- } u;
+ } __packed u;
} __packed;
VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter32, 4 + 8);
@@ -251,7 +251,7 @@ struct vmmdev_hgcm_function_parameter64 {
union {
__u64 phys_addr;
__u64 linear_addr;
- } u;
+ } __packed u;
} __packed pointer;
struct {
/** Size of the buffer described by the page list. */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index ac2329f24141..bb7b89330d35 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -964,6 +964,10 @@ struct vfio_device_bind_iommufd {
* hwpt corresponding to the given pt_id.
*
* Return: 0 on success, -errno on failure.
+ *
+ * When a device is resetting, -EBUSY will be returned to reject any concurrent
+ * attachment to the resetting device itself or any sibling device in the IOMMU
+ * group having the resetting device.
*/
struct vfio_device_attach_iommufd_pt {
__u32 argsz;
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index add08188f068..eda4492e40dc 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -775,6 +775,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
+#define V4L2_PIX_FMT_AV1 v4l2_fourcc('A', 'V', '0', '1') /* AV1 */
#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
@@ -1985,6 +1986,8 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
+ V4L2_CTRL_TYPE_HEVC_EXT_SPS_ST_RPS = 0x0275,
+ V4L2_CTRL_TYPE_HEVC_EXT_SPS_LT_RPS = 0x0276,
V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
diff --git a/include/uapi/linux/vmclock-abi.h b/include/uapi/linux/vmclock-abi.h
index 2d99b29ac44a..d320623b0118 100644
--- a/include/uapi/linux/vmclock-abi.h
+++ b/include/uapi/linux/vmclock-abi.h
@@ -115,6 +115,17 @@ struct vmclock_abi {
* bit again after the update, using the about-to-be-valid fields.
*/
#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7)
+ /*
+ * If the VM_GEN_COUNTER_PRESENT flag is set, the hypervisor will
+ * bump the vm_generation_counter field every time the guest is
+ * loaded from some save state (restored from a snapshot).
+ */
+#define VMCLOCK_FLAG_VM_GEN_COUNTER_PRESENT (1 << 8)
+ /*
+ * If the NOTIFICATION_PRESENT flag is set, the hypervisor will send
+ * a notification every time it updates seq_count to a new even number.
+ */
+#define VMCLOCK_FLAG_NOTIFICATION_PRESENT (1 << 9)
__u8 pad[2];
__u8 clock_status;
@@ -177,6 +188,15 @@ struct vmclock_abi {
__le64 time_frac_sec; /* Units of 1/2^64 of a second */
__le64 time_esterror_nanosec;
__le64 time_maxerror_nanosec;
+
+ /*
+ * This field changes to another non-repeating value when the guest
+ * has been loaded from a snapshot. In addition to handling a
+ * disruption in time (which will also be signalled through the
+ * disruption_marker field), a guest may wish to discard UUIDs,
+ * reset network connections, reseed entropy, etc.
+ */
+ __le64 vm_generation_counter;
};
#endif /* __VMCLOCK_ABI_H__ */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index faa9d62b3b30..f24edf1c75eb 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -56,6 +56,7 @@ enum {
BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED = 0x40,
+ BNXT_RE_UCNTX_CMASK_QP_RATE_LIMIT_ENABLED = 0x80ULL,
};
enum bnxt_re_wqe_mode {
@@ -215,4 +216,19 @@ enum bnxt_re_toggle_mem_methods {
BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT),
BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
};
+
+struct bnxt_re_packet_pacing_caps {
+ __u32 qp_rate_limit_min;
+ __u32 qp_rate_limit_max; /* In kbps */
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_RC
+ */
+ __u32 supported_qpts;
+ __u32 reserved;
+};
+
+struct bnxt_re_query_device_ex_resp {
+ struct bnxt_re_packet_pacing_caps packet_pacing_caps;
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index de6f5a94f1e3..72041c1b0ea5 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -56,6 +56,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_COUNTERS,
UVERBS_OBJECT_ASYNC_EVENT,
UVERBS_OBJECT_DMAH,
+ UVERBS_OBJECT_DMABUF,
};
enum {
@@ -73,6 +74,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_QUERY_CONTEXT,
UVERBS_METHOD_QUERY_GID_TABLE,
UVERBS_METHOD_QUERY_GID_ENTRY,
+ UVERBS_METHOD_QUERY_PORT_SPEED,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -86,6 +88,11 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_RESP,
};
+enum uverbs_attrs_query_port_speed_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_PORT_SPEED_PORT_NUM,
+ UVERBS_ATTR_QUERY_PORT_SPEED_RESP,
+};
+
enum uverbs_attrs_get_context_attr_ids {
UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
@@ -257,6 +264,15 @@ enum uverbs_methods_dmah {
UVERBS_METHOD_DMAH_FREE,
};
+enum uverbs_attrs_alloc_dmabuf_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DMABUF_HANDLE,
+ UVERBS_ATTR_ALLOC_DMABUF_PGOFF,
+};
+
+enum uverbs_methods_dmabuf {
+ UVERBS_METHOD_DMABUF_ALLOC,
+};
+
enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
UVERBS_ATTR_REG_DM_MR_HANDLE,
UVERBS_ATTR_REG_DM_MR_OFFSET,
diff --git a/include/uapi/rdma/mana-abi.h b/include/uapi/rdma/mana-abi.h
index 45c2df619f07..a75bf32b8cfb 100644
--- a/include/uapi/rdma/mana-abi.h
+++ b/include/uapi/rdma/mana-abi.h
@@ -17,6 +17,9 @@
#define MANA_IB_UVERBS_ABI_VERSION 1
enum mana_ib_create_cq_flags {
+ /* Reserved for backward compatibility. Legacy
+ * kernel versions use it to create CQs in RNIC
+ */
MANA_IB_CREATE_RNIC_CQ = 1 << 0,
};
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 8c29e498ef98..06f88d1b1876 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -94,16 +94,15 @@ struct utp_upiu_header {
};
/**
- * struct utp_upiu_query - upiu request buffer structure for
- * query request.
- * @opcode: command to perform B-0
- * @idn: a value that indicates the particular type of data B-1
- * @index: Index to further identify data B-2
- * @selector: Index to further identify data B-3
+ * struct utp_upiu_query - QUERY REQUEST UPIU structure.
+ * @opcode: query function to perform B-0
+ * @idn: descriptor or attribute identification number B-1
+ * @index: Index that further identifies which data to access B-2
+ * @selector: Index that further identifies which data to access B-3
* @reserved_osf: spec reserved field B-4,5
- * @length: number of descriptor bytes to read/write B-6,7
- * @value: Attribute value to be written DW-5
- * @reserved: spec reserved DW-6,7
+ * @length: number of descriptor bytes to read or write B-6,7
+ * @value: if @opcode == UPIU_QUERY_OPCODE_WRITE_ATTR, the value to be written B-6,7
+ * @reserved: reserved for future use DW-6,7
*/
struct utp_upiu_query {
__u8 opcode;
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 9ce72fbd6f11..f4a7baadb44d 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -56,6 +56,9 @@
#define SOF_TKN_SCHED_LP_MODE 207
#define SOF_TKN_SCHED_MEM_USAGE 208
#define SOF_TKN_SCHED_USE_CHAIN_DMA 209
+#define SOF_TKN_SCHED_KCPS 210
+#define SOF_TKN_SCHED_DIRECTION 211
+#define SOF_TKN_SCHED_DIRECTION_VALID 212
/* volume */
#define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250
@@ -107,6 +110,9 @@
#define SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME 417
#define SOF_TKN_COMP_SCHED_DOMAIN 418
+#define SOF_TKN_COMP_DOMAIN_ID 419
+#define SOF_TKN_COMP_HEAP_BYTES_REQUIREMENT 420
+#define SOF_TKN_COMP_STACK_BYTES_REQUIREMENT 421
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index ab8f6c07b5a2..602aa34c9822 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -21,6 +21,7 @@
* in this header file of the size of struct utp_upiu_header.
*/
static_assert(sizeof(struct utp_upiu_header) == 12);
+static_assert(sizeof(struct utp_upiu_query) == 20);
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
@@ -561,7 +562,7 @@ enum ufs_dev_pwr_mode {
#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
/**
- * struct utp_cmd_rsp - Response UPIU structure
+ * struct utp_cmd_rsp - RESPONSE UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
* @reserved: Reserved double words DW-4 to DW-7
* @sense_data_len: Sense data length DW-8 U16
@@ -574,6 +575,8 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
+static_assert(sizeof(struct utp_cmd_rsp) == 40);
+
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 19154228780b..8563b6648976 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -834,6 +834,7 @@ enum ufshcd_mcq_opr {
* @uic_link_state: active state of the link to the UFS device.
* @rpm_lvl: desired UFS power management level during runtime PM.
* @spm_lvl: desired UFS power management level during system PM.
+ * @pm_lvl_min: minimum supported power management level.
* @pm_op_in_progress: whether or not a PM operation is in progress.
* @ahit: value of Auto-Hibernate Idle Timer register.
* @outstanding_tasks: Bits representing outstanding task requests
@@ -972,6 +973,7 @@ struct ufs_hba {
enum ufs_pm_level rpm_lvl;
/* Desired UFS power management level during system PM */
enum ufs_pm_level spm_lvl;
+ enum ufs_pm_level pm_lvl_min;
int pm_op_in_progress;
/* Auto-Hibernate Idle Timer register value */
@@ -1342,17 +1344,13 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba)
return hba->priv;
}
-#ifdef CONFIG_PM
extern int ufshcd_runtime_suspend(struct device *dev);
extern int ufshcd_runtime_resume(struct device *dev);
-#endif
-#ifdef CONFIG_PM_SLEEP
extern int ufshcd_system_suspend(struct device *dev);
extern int ufshcd_system_resume(struct device *dev);
extern int ufshcd_system_freeze(struct device *dev);
extern int ufshcd_system_thaw(struct device *dev);
extern int ufshcd_system_restore(struct device *dev);
-#endif
extern int ufshcd_dme_reset(struct ufs_hba *hba);
extern int ufshcd_dme_enable(struct ufs_hba *hba);
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index d36df24242a3..806fdaf52bd9 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -288,6 +288,7 @@ enum {
/* REG_UFS_MEM_CFG - Global Config Registers 300h */
#define MCQ_MODE_SELECT BIT(0)
+#define ESI_ENABLE BIT(1)
/* CQISy - CQ y Interrupt Status Register */
#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
index 9ac161866653..16a0a0556b86 100644
--- a/include/vdso/gettime.h
+++ b/include/vdso/gettime.h
@@ -20,5 +20,6 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
__kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_clock_getres_time64(clockid_t clock, struct __kernel_timespec *ts);
#endif
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
index ff0c06b6513e..9076483c9fbb 100644
--- a/include/vdso/unaligned.h
+++ b/include/vdso/unaligned.h
@@ -2,14 +2,43 @@
#ifndef __VDSO_UNALIGNED_H
#define __VDSO_UNALIGNED_H
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr); \
- __get_pptr->x; \
+#include <linux/compiler_types.h>
+
+/**
+ * __get_unaligned_t - read an unaligned value from memory.
+ * @type: the type to load from the pointer.
+ * @ptr: the pointer to load from.
+ *
+ * Use memcpy to affect an unaligned type sized load avoiding undefined behavior
+ * from approaches like type punning that require -fno-strict-aliasing in order
+ * to be correct. As type may be const, use __unqual_scalar_typeof to map to a
+ * non-const type - you can't memcpy into a const type. The
+ * __get_unaligned_ctrl_type gives __unqual_scalar_typeof its required
+ * expression rather than type, a pointer is used to avoid warnings about mixing
+ * the use of 0 and NULL. The void* cast silences ubsan warnings.
+ */
+#define __get_unaligned_t(type, ptr) ({ \
+ type *__get_unaligned_ctrl_type __always_unused = NULL; \
+ __unqual_scalar_typeof(*__get_unaligned_ctrl_type) __get_unaligned_val; \
+ __builtin_memcpy(&__get_unaligned_val, (void *)(ptr), \
+ sizeof(__get_unaligned_val)); \
+ __get_unaligned_val; \
})
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr); \
- __put_pptr->x = (val); \
+/**
+ * __put_unaligned_t - write an unaligned value to memory.
+ * @type: the type of the value to store.
+ * @val: the value to store.
+ * @ptr: the pointer to store to.
+ *
+ * Use memcpy to affect an unaligned type sized store avoiding undefined
+ * behavior from approaches like type punning that require -fno-strict-aliasing
+ * in order to be correct. The void* cast silences ubsan warnings.
+ */
+#define __put_unaligned_t(type, val, ptr) do { \
+ type __put_unaligned_val = (val); \
+ __builtin_memcpy((void *)(ptr), &__put_unaligned_val, \
+ sizeof(__put_unaligned_val)); \
} while (0)
#endif /* __VDSO_UNALIGNED_H */
diff --git a/include/video/edid.h b/include/video/edid.h
index c2b186b1933a..52aabb706032 100644
--- a/include/video/edid.h
+++ b/include/video/edid.h
@@ -4,8 +4,4 @@
#include <uapi/video/edid.h>
-#if defined(CONFIG_FIRMWARE_EDID)
-extern struct edid_info edid_info;
-#endif
-
#endif /* __linux_video_edid_h__ */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 61854e3f2837..f280c5dcf923 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -69,11 +69,13 @@ extern u64 xen_saved_max_mem_size;
#endif
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+extern unsigned long xen_unpopulated_pages;
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
#include <linux/ioport.h>
int arch_xen_unpopulated_init(struct resource **res);
#else
+#define xen_unpopulated_pages 0UL
#include <xen/balloon.h>
static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
struct page **pages)