summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/acpi_iort.h11
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/amd-pmf-io.h21
-rw-r--r--include/linux/array_size.h6
-rw-r--r--include/linux/ata.h3
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h18
-rw-r--r--include/linux/atomic/atomic-instrumented.h26
-rw-r--r--include/linux/atomic/atomic-long.h10
-rw-r--r--include/linux/attribute_container.h2
-rw-r--r--include/linux/audit.h26
-rw-r--r--include/linux/audit_arch.h7
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/balloon.h77
-rw-r--r--include/linux/balloon_compaction.h160
-rw-r--r--include/linux/bio.h33
-rw-r--r--include/linux/bit_spinlock.h24
-rw-r--r--include/linux/bitfield.h5
-rw-r--r--include/linux/blk-crypto.h32
-rw-r--r--include/linux/blk-integrity.h6
-rw-r--r--include/linux/blk-mq-dma.h2
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h7
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/bnge/hsi.h12609
-rw-r--r--include/linux/bnxt/hsi.h167
-rw-r--r--include/linux/bpf-cgroup.h4
-rw-r--r--include/linux/bpf.h178
-rw-r--r--include/linux/bpf_local_storage.h29
-rw-r--r--include/linux/bpf_mprog.h10
-rw-r--r--include/linux/bpf_verifier.h14
-rw-r--r--include/linux/btf.h9
-rw-r--r--include/linux/buildid.h3
-rw-r--r--include/linux/can/core.h1
-rw-r--r--include/linux/can/skb.h38
-rw-r--r--include/linux/capability.h6
-rw-r--r--include/linux/ceph/ceph_fs.h6
-rw-r--r--include/linux/cgroup-defs.h8
-rw-r--r--include/linux/cleanup.h58
-rw-r--r--include/linux/clk.h23
-rw-r--r--include/linux/cma.h24
-rw-r--r--include/linux/compiler-clang.h2
-rw-r--r--include/linux/compiler-context-analysis.h436
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/compiler_types.h122
-rw-r--r--include/linux/console.h21
-rw-r--r--include/linux/cper.h3
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/cpuhplock.h1
-rw-r--r--include/linux/cpuset.h10
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/damon.h65
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/delayacct.h8
-rw-r--r--include/linux/device/bus.h4
-rw-r--r--include/linux/device/devres.h4
-rw-r--r--include/linux/device/driver.h9
-rw-r--r--include/linux/device_cgroup.h2
-rw-r--r--include/linux/dma-buf-mapping.h2
-rw-r--r--include/linux/dma-buf.h22
-rw-r--r--include/linux/dma-fence.h35
-rw-r--r--include/linux/dma-heap.h2
-rw-r--r--include/linux/dma-map-ops.h12
-rw-r--r--include/linux/dma-mapping.h20
-rw-r--r--include/linux/dpll.h64
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/entry-common.h167
-rw-r--r--include/linux/ethtool.h36
-rw-r--r--include/linux/exportfs.h33
-rw-r--r--include/linux/f2fs_fs.h73
-rw-r--r--include/linux/fb.h17
-rw-r--r--include/linux/filelock.h18
-rw-r--r--include/linux/filter.h27
-rw-r--r--include/linux/firewire.h36
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h6
-rw-r--r--include/linux/firmware/cirrus/wmfw.h7
-rw-r--r--include/linux/firmware/imx/sm.h2
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h30
-rw-r--r--include/linux/firmware/xlnx-zynqmp-crypto.h119
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h14
-rw-r--r--include/linux/fortify-string.h8
-rw-r--r--include/linux/framer/framer.h5
-rw-r--r--include/linux/fs.h88
-rw-r--r--include/linux/fs/super_types.h8
-rw-r--r--include/linux/fserror.h75
-rw-r--r--include/linux/fsnotify_backend.h5
-rw-r--r--include/linux/fsverity.h190
-rw-r--r--include/linux/ftrace.h37
-rw-r--r--include/linux/ftrace_regs.h25
-rw-r--r--include/linux/getcpu.h19
-rw-r--r--include/linux/gfp.h60
-rw-r--r--include/linux/gfp_types.h7
-rw-r--r--include/linux/gpio/consumer.h36
-rw-r--r--include/linux/highmem.h98
-rw-r--r--include/linux/hippidevice.h40
-rw-r--r--include/linux/hisi_acc_qm.h15
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/linux/hrtimer.h15
-rw-r--r--include/linux/hrtimer_defs.h20
-rw-r--r--include/linux/hugetlb.h32
-rw-r--r--include/linux/hw_random.h2
-rw-r--r--include/linux/i3c/device.h22
-rw-r--r--include/linux/i3c/master.h11
-rw-r--r--include/linux/ieee80211-eht.h13
-rw-r--r--include/linux/ieee80211-s1g.h2
-rw-r--r--include/linux/ieee80211-uhr.h220
-rw-r--r--include/linux/ieee80211.h40
-rw-r--r--include/linux/if_vlan.h51
-rw-r--r--include/linux/iio/iio-opaque.h2
-rw-r--r--include/linux/ima.h1
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_syscalls.h1
-rw-r--r--include/linux/initrd.h2
-rw-r--r--include/linux/instrumented.h17
-rw-r--r--include/linux/interrupt.h26
-rw-r--r--include/linux/io_uring.h14
-rw-r--r--include/linux/io_uring_types.h42
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/iommu-debug-pagealloc.h32
-rw-r--r--include/linux/iommu.h14
-rw-r--r--include/linux/ioport.h14
-rw-r--r--include/linux/ipv6.h35
-rw-r--r--include/linux/irq.h15
-rw-r--r--include/linux/irqchip/arm-gic-v5.h12
-rw-r--r--include/linux/irqchip/irq-renesas-rzt2h.h23
-rw-r--r--include/linux/irqchip/riscv-imsic.h3
-rw-r--r--include/linux/irqdesc.h17
-rw-r--r--include/linux/irqdomain.h30
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/kasan.h14
-rw-r--r--include/linux/kernel.h211
-rw-r--r--include/linux/kexec_handover.h33
-rw-r--r--include/linux/kho/abi/kexec_handover.h163
-rw-r--r--include/linux/kho/abi/luo.h89
-rw-r--r--include/linux/kho/abi/memblock.h73
-rw-r--r--include/linux/kho/abi/memfd.h6
-rw-r--r--include/linux/khugepaged.h9
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/kvm_host.h26
-rw-r--r--include/linux/libata.h76
-rw-r--r--include/linux/linux_logo.h8
-rw-r--r--include/linux/list_bl.h2
-rw-r--r--include/linux/list_private.h256
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/liveupdate.h147
-rw-r--r--include/linux/local_lock.h59
-rw-r--r--include/linux/local_lock_internal.h72
-rw-r--r--include/linux/lockdep.h12
-rw-r--r--include/linux/lockref.h4
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h19
-rw-r--r--include/linux/mailbox/mtk-vcp-mailbox.h32
-rw-r--r--include/linux/maple_tree.h9
-rw-r--r--include/linux/mdio.h14
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h73
-rw-r--r--include/linux/memfd.h6
-rw-r--r--include/linux/memory-failure.h13
-rw-r--r--include/linux/memremap.h9
-rw-r--r--include/linux/mfd/rohm-bd72720.h634
-rw-r--r--include/linux/mfd/rohm-generic.h1
-rw-r--r--include/linux/mfd/samsung/core.h2
-rw-r--r--include/linux/mfd/samsung/irq.h105
-rw-r--r--include/linux/mfd/samsung/s2mpg10.h44
-rw-r--r--include/linux/mfd/samsung/s2mpg11.h434
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h16
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/mlx5/vport.h6
-rw-r--r--include/linux/mm.h121
-rw-r--r--include/linux/mm_types.h76
-rw-r--r--include/linux/mm_types_task.h5
-rw-r--r--include/linux/mmap_lock.h279
-rw-r--r--include/linux/mmc/sdio_func.h1
-rw-r--r--include/linux/mmdebug.h10
-rw-r--r--include/linux/mmu_context.h2
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/module.h27
-rw-r--r--include/linux/moduleparam.h15
-rw-r--r--include/linux/msi.h16
-rw-r--r--include/linux/mtd/spinand.h162
-rw-r--r--include/linux/mutex.h40
-rw-r--r--include/linux/mutex_types.h4
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h3
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_xdr.h9
-rw-r--r--include/linux/nodemask.h8
-rw-r--r--include/linux/ns/ns_common_types.h4
-rw-r--r--include/linux/nubus.h3
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_irq.h41
-rw-r--r--include/linux/oid_registry.h5
-rw-r--r--include/linux/overflow.h52
-rw-r--r--include/linux/page-isolation.h2
-rw-r--r--include/linux/page_ext.h6
-rw-r--r--include/linux/page_table_check.h69
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/panic.h8
-rw-r--r--include/linux/pci-acpi.h3
-rw-r--r--include/linux/pci-epc.h9
-rw-r--r--include/linux/pci-epf.h23
-rw-r--r--include/linux/pci-ide.h4
-rw-r--r--include/linux/pci-p2pdma.h2
-rw-r--r--include/linux/pci-pwrctrl.h16
-rw-r--r--include/linux/pci.h31
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/pcs/pcs-mtk-lynxi.h5
-rw-r--r--include/linux/percpu-rwsem.h1
-rw-r--r--include/linux/perf_event.h35
-rw-r--r--include/linux/pgtable.h168
-rw-r--r--include/linux/phy.h72
-rw-r--r--include/linux/phy/phy-common-props.h32
-rw-r--r--include/linux/phy_port.h99
-rw-r--r--include/linux/phylink.h12
-rw-r--r--include/linux/platform_data/cros_ec_commands.h24
-rw-r--r--include/linux/platform_data/davinci_asp.h3
-rw-r--r--include/linux/platform_data/hwmon-s3c.h36
-rw-r--r--include/linux/platform_data/mipi-i3c-hci.h15
-rw-r--r--include/linux/platform_data/x86/asus-wmi-leds-ids.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h29
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/posix_acl_xattr.h5
-rw-r--r--include/linux/psp.h1
-rw-r--r--include/linux/rcupdate.h121
-rw-r--r--include/linux/rcupdate_trace.h166
-rw-r--r--include/linux/refcount.h6
-rw-r--r--include/linux/regmap.h14
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/resctrl.h57
-rw-r--r--include/linux/resctrl_types.h11
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/rhashtable.h16
-rw-r--r--include/linux/rmap.h76
-rw-r--r--include/linux/rseq.h11
-rw-r--r--include/linux/rseq_entry.h192
-rw-r--r--include/linux/rseq_types.h38
-rw-r--r--include/linux/rv.h4
-rw-r--r--include/linux/rwlock.h19
-rw-r--r--include/linux/rwlock_api_smp.h43
-rw-r--r--include/linux/rwlock_rt.h43
-rw-r--r--include/linux/rwlock_types.h10
-rw-r--r--include/linux/rwsem.h78
-rw-r--r--include/linux/sched.h82
-rw-r--r--include/linux/sched/cputime.h18
-rw-r--r--include/linux/sched/isolation.h16
-rw-r--r--include/linux/sched/signal.h16
-rw-r--r--include/linux/sched/task.h6
-rw-r--r--include/linux/sched/wake_q.h3
-rw-r--r--include/linux/scmi_imx_protocol.h2
-rw-r--r--include/linux/screen_info.h2
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/seqlock.h57
-rw-r--r--include/linux/seqlock_types.h5
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/skbuff_ref.h10
-rw-r--r--include/linux/skmsg.h70
-rw-r--r--include/linux/slab.h146
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/soc/apple/rtkit.h7
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h93
-rw-r--r--include/linux/soc/qcom/apr.h6
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h4
-rw-r--r--include/linux/soc/qcom/mdt_loader.h22
-rw-r--r--include/linux/soc/qcom/ubwc.h1
-rw-r--r--include/linux/spi/spi-mem.h22
-rw-r--r--include/linux/spi/spi.h39
-rw-r--r--include/linux/spinlock.h119
-rw-r--r--include/linux/spinlock_api_smp.h34
-rw-r--r--include/linux/spinlock_api_up.h112
-rw-r--r--include/linux/spinlock_rt.h36
-rw-r--r--include/linux/spinlock_types.h10
-rw-r--r--include/linux/spinlock_types_raw.h5
-rw-r--r--include/linux/srcu.h73
-rw-r--r--include/linux/srcutiny.h6
-rw-r--r--include/linux/srcutree.h10
-rw-r--r--include/linux/stmmac.h20
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/sunrpc/debug.h2
-rw-r--r--include/linux/sunrpc/svc.h13
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h80
-rw-r--r--include/linux/sunrpc/xdrgen/nfs4_1.h112
-rw-r--r--include/linux/swap.h71
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysfb.h23
-rw-r--r--include/linux/sysfs.h13
-rw-r--r--include/linux/tcp.h9
-rw-r--r--include/linux/tee_core.h9
-rw-r--r--include/linux/tee_drv.h12
-rw-r--r--include/linux/thread_info.h16
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/timecounter.h31
-rw-r--r--include/linux/tnum.h5
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/linux/trace_printk.h204
-rw-r--r--include/linux/trace_seq.h12
-rw-r--r--include/linux/tracepoint.h9
-rw-r--r--include/linux/transport_class.h7
-rw-r--r--include/linux/tsm.h3
-rw-r--r--include/linux/types.h8
-rw-r--r--include/linux/u64_stats_sync.h25
-rw-r--r--include/linux/udp.h2
-rw-r--r--include/linux/uio.h3
-rw-r--r--include/linux/unwind_user.h18
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usb/uvc.h8
-rw-r--r--include/linux/util_macros.h2
-rw-r--r--include/linux/vdpa.h4
-rw-r--r--include/linux/vfio_pci_core.h13
-rw-r--r--include/linux/virtio.h11
-rw-r--r--include/linux/virtio_vsock.h9
-rw-r--r--include/linux/vm_event_item.h8
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/wmi.h45
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/linux/ww_mutex.h22
-rw-r--r--include/linux/wwan.h2
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/linux/zsmalloc.h8
329 files changed, 21493 insertions, 2413 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fbf0c3a65f59..3a412dcebc29 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -107,6 +107,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_GIC_V5,
ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index d4ed5622cf2b..17bb3374f4ca 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -27,12 +27,15 @@ int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+struct fwnode_handle *iort_iwb_handle(u32 iwb_id);
#ifdef CONFIG_ACPI_IORT
u32 iort_msi_map_id(struct device *dev, u32 id);
+u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node);
+int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
+int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa);
void acpi_configure_pmsi_domain(struct device *dev);
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head);
@@ -46,9 +49,15 @@ phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
+static inline u32 iort_msi_xlate(struct device *dev, u32 id, struct fwnode_handle **node)
+{ return id; }
+static inline int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
+{ return -ENODEV; }
static inline struct irq_domain *iort_get_device_domain(
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
+static inline int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa)
+{ return -ENODEV; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
static inline
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 02940be66324..df0f5c382286 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -56,12 +56,14 @@ struct aer_capability_regs {
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
int pcie_aer_is_native(struct pci_dev *dev);
+void pci_aer_unmask_internal_errors(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
+static inline void pci_aer_unmask_internal_errors(struct pci_dev *dev) { }
#endif
void pci_print_aer(struct pci_dev *dev, int aer_severity,
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
index 6fa510f419c0..55198d2875cc 100644
--- a/include/linux/amd-pmf-io.h
+++ b/include/linux/amd-pmf-io.h
@@ -61,5 +61,26 @@ enum laptop_placement {
LP_UNDEFINED,
};
+/**
+ * struct amd_pmf_npu_metrics: Get NPU metrics data from PMF driver
+ * @npuclk_freq: NPU clock frequency [MHz]
+ * @npu_busy: NPU busy % [0-100]
+ * @npu_power: NPU power [mW]
+ * @mpnpuclk_freq: MPNPU [MHz]
+ * @npu_reads: NPU read bandwidth [MB/sec]
+ * @npu_writes: NPU write bandwidth [MB/sec]
+ */
+struct amd_pmf_npu_metrics {
+ u16 npuclk_freq;
+ u16 npu_busy[8];
+ u16 npu_power;
+ u16 mpnpuclk_freq;
+ u16 npu_reads;
+ u16 npu_writes;
+};
+
int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
+
+/* AMD PMF and NPU interface */
+int amd_pmf_get_npu_data(struct amd_pmf_npu_metrics *info);
#endif
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
index 06d7d83196ca..0c4fec98822e 100644
--- a/include/linux/array_size.h
+++ b/include/linux/array_size.h
@@ -10,4 +10,10 @@
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+/**
+ * ARRAY_END - get a pointer to one past the last element in array @arr
+ * @arr: array
+ */
+#define ARRAY_END(arr) (&(arr)[ARRAY_SIZE(arr)])
+
#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 54b416e26995..8fd48bcb2a46 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -26,10 +26,7 @@ enum {
ATA_MAX_DEVICES = 2, /* per bus/port */
ATA_MAX_PRD = 256, /* we could make these 256/256 */
ATA_SECT_SIZE = 512,
- ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
- ATA_MAX_SECTORS_1024 = 1024,
- ATA_MAX_SECTORS_8191 = 8191,
ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 2f9d36b72bd8..cdc25f8979f7 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2121,7 +2121,7 @@ raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -2155,7 +2155,7 @@ raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -2189,7 +2189,7 @@ raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -2222,7 +2222,7 @@ raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -4247,7 +4247,7 @@ raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -4281,7 +4281,7 @@ raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -4315,7 +4315,7 @@ raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -4348,7 +4348,7 @@ raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// b565db590afeeff0d7c9485ccbca5bb6e155749f
+// 206314f82b8b73a5c3aa69cf7f35ac9e7b5d6b58
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 37ab6314a9f7..feb3b5dc3e96 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1269,7 +1269,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -1292,7 +1292,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -1314,7 +1314,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -1337,7 +1337,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -2847,7 +2847,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -2870,7 +2870,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -2892,7 +2892,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -2915,7 +2915,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4425,7 +4425,7 @@ atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -4448,7 +4448,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -4470,7 +4470,7 @@ atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -4493,7 +4493,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// f618ac667f868941a84ce0ab2242f1786e049ed4
+// 9dd948d3012b22c4e75933a5172983f912e46439
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index f86b29d90877..6a4e47d2db35 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1449,7 +1449,7 @@ raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -1473,7 +1473,7 @@ raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -1497,7 +1497,7 @@ raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -1521,7 +1521,7 @@ raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// eadf183c3600b8b92b91839dd3be6bcc560c752d
+// 4b882bf19018602c10816c52f8b4ae280adc887b
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index b3643de9931d..fa6520e192be 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -36,7 +36,7 @@ attribute_container_set_no_classdevs(struct attribute_container *atc)
atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS;
}
-int attribute_container_register(struct attribute_container *cont);
+void attribute_container_register(struct attribute_container *cont);
int __must_check attribute_container_unregister(struct attribute_container *cont);
void attribute_container_create_device(struct device *dev,
int (*fn)(struct attribute_container *,
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 536f8ee8da81..b642b5faca65 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -13,7 +13,6 @@
#include <linux/ptrace.h>
#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
-#include <uapi/linux/netfilter/nf_tables.h>
#include <uapi/linux/fanotify.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
@@ -128,12 +127,6 @@ enum audit_nfcfgop {
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
-/* only for compat system calls */
-extern unsigned compat_write_class[];
-extern unsigned compat_read_class[];
-extern unsigned compat_dir_class[];
-extern unsigned compat_chattr_class[];
-extern unsigned compat_signal_class[];
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -195,6 +188,8 @@ extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
+extern int audit_log_nf_skb(struct audit_buffer *ab,
+ const struct sk_buff *skb, u8 nfproto);
extern int audit_update_lsm_rules(void);
@@ -272,6 +267,12 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
+static inline int audit_log_nf_skb(struct audit_buffer *ab,
+ const struct sk_buff *skb, u8 nfproto)
+{
+ return 0;
+}
+
static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
{
return INVALID_UID;
@@ -316,7 +317,6 @@ extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
-extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
@@ -380,12 +380,6 @@ static inline void audit_syscall_exit(void *pt_regs)
__audit_syscall_exit(success, return_code);
}
}
-static inline struct filename *audit_reusename(const __user char *name)
-{
- if (unlikely(!audit_dummy_context()))
- return __audit_reusename(name);
- return NULL;
-}
static inline void audit_getname(struct filename *name)
{
if (unlikely(!audit_dummy_context()))
@@ -624,10 +618,6 @@ static inline struct audit_context *audit_context(void)
{
return NULL;
}
-static inline struct filename *audit_reusename(const __user char *name)
-{
- return NULL;
-}
static inline void audit_getname(struct filename *name)
{ }
static inline void audit_inode(struct filename *name,
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
index 0e34d673ef17..2b8153791e6a 100644
--- a/include/linux/audit_arch.h
+++ b/include/linux/audit_arch.h
@@ -23,4 +23,11 @@ enum auditsc_class_t {
extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+/* only for compat system calls */
+extern unsigned compat_write_class[];
+extern unsigned compat_read_class[];
+extern unsigned compat_dir_class[];
+extern unsigned compat_chattr_class[];
+extern unsigned compat_signal_class[];
+
#endif
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0217c1073735..c88fd4d37d1f 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -46,7 +46,6 @@ enum wb_reason {
WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
- WB_REASON_LAPTOP_TIMER,
WB_REASON_FS_FREE_SPACE,
/*
* There is no bdi forker thread any more and works are done
@@ -204,8 +203,6 @@ struct backing_dev_info {
char dev_name[64];
struct device *owner;
- struct timer_list laptop_mode_wb_timer;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
#endif
diff --git a/include/linux/balloon.h b/include/linux/balloon.h
new file mode 100644
index 000000000000..ca5b15150f42
--- /dev/null
+++ b/include/linux/balloon.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common interface for implementing a memory balloon, including support
+ * for migration of pages inflated in a memory balloon.
+ *
+ * Balloon page migration makes use of the general "movable_ops page migration"
+ * feature.
+ *
+ * page->private is used to reference the responsible balloon device.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
+ *
+ * Once the page type and the PG_movable_ops are set, migration code
+ * can initiate page isolation by invoking the
+ * movable_operations()->isolate_page() callback
+ *
+ * As long as page->private is set, the page is either on the balloon list
+ * or isolated for migration. If page->private is not set, the page is
+ * either still getting inflated, or was deflated to be freed by the balloon
+ * driver soon. Isolation is impossible in both cases.
+ *
+ * As the page isolation scanning step a compaction thread does is a lockless
+ * procedure (from a page standpoint), it might bring some racy situations while
+ * performing balloon page migration. In order to sort out these racy scenarios
+ * and safely perform balloon's page migration we must, always, ensure following
+ * these simple rules:
+ *
+ * i. Inflation/deflation must set/clear page->private under the
+ * balloon_pages_lock
+ *
+ * ii. isolation or dequeueing procedure must remove the page from balloon
+ * device page list under balloon_pages_lock
+ *
+ * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
+ */
+#ifndef _LINUX_BALLOON_H
+#define _LINUX_BALLOON_H
+#include <linux/pagemap.h>
+#include <linux/page-flags.h>
+#include <linux/migrate.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/list.h>
+
+/*
+ * Balloon device information descriptor.
+ * This struct is used to allow the common balloon page migration interface
+ * procedures to find the proper balloon device holding memory pages they'll
+ * have to cope for page migration, as well as it serves the balloon driver as
+ * a page book-keeper for its registered balloon devices.
+ */
+struct balloon_dev_info {
+ unsigned long isolated_pages; /* # of isolated pages for migration */
+ struct list_head pages; /* Pages enqueued & handled to Host */
+ int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+ bool adjust_managed_page_count;
+};
+
+struct page *balloon_page_alloc(void);
+void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
+ struct page *page);
+struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
+size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages);
+size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages, size_t n_req_pages);
+
+static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
+{
+ balloon->isolated_pages = 0;
+ INIT_LIST_HEAD(&balloon->pages);
+ balloon->migratepage = NULL;
+ balloon->adjust_managed_page_count = false;
+}
+#endif /* _LINUX_BALLOON_H */
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
deleted file mode 100644
index 7cfe48769239..000000000000
--- a/include/linux/balloon_compaction.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/linux/balloon_compaction.h
- *
- * Common interface definitions for making balloon pages movable by compaction.
- *
- * Balloon page migration makes use of the general "movable_ops page migration"
- * feature.
- *
- * page->private is used to reference the responsible balloon device.
- * That these pages have movable_ops, and which movable_ops apply,
- * is derived from the page type (PageOffline()) combined with the
- * PG_movable_ops flag (PageMovableOps()).
- *
- * As the page isolation scanning step a compaction thread does is a lockless
- * procedure (from a page standpoint), it might bring some racy situations while
- * performing balloon page compaction. In order to sort out these racy scenarios
- * and safely perform balloon's page compaction and migration we must, always,
- * ensure following these simple rules:
- *
- * i. Setting the PG_movable_ops flag and page->private with the following
- * lock order
- * +-page_lock(page);
- * +--spin_lock_irq(&b_dev_info->pages_lock);
- *
- * ii. isolation or dequeueing procedure must remove the page from balloon
- * device page list under b_dev_info->pages_lock.
- *
- * The functions provided by this interface are placed to help on coping with
- * the aforementioned balloon page corner case, as well as to ensure the simple
- * set of exposed rules are satisfied while we are dealing with balloon pages
- * compaction / migration.
- *
- * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
- */
-#ifndef _LINUX_BALLOON_COMPACTION_H
-#define _LINUX_BALLOON_COMPACTION_H
-#include <linux/pagemap.h>
-#include <linux/page-flags.h>
-#include <linux/migrate.h>
-#include <linux/gfp.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-
-/*
- * Balloon device information descriptor.
- * This struct is used to allow the common balloon compaction interface
- * procedures to find the proper balloon device holding memory pages they'll
- * have to cope for page compaction / migration, as well as it serves the
- * balloon driver as a page book-keeper for its registered balloon devices.
- */
-struct balloon_dev_info {
- unsigned long isolated_pages; /* # of isolated pages for migration */
- spinlock_t pages_lock; /* Protection to pages list */
- struct list_head pages; /* Pages enqueued & handled to Host */
- int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
- struct page *page, enum migrate_mode mode);
-};
-
-extern struct page *balloon_page_alloc(void);
-extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
- struct page *page);
-extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
-extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
- struct list_head *pages);
-extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
- struct list_head *pages, size_t n_req_pages);
-
-static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
-{
- balloon->isolated_pages = 0;
- spin_lock_init(&balloon->pages_lock);
- INIT_LIST_HEAD(&balloon->pages);
- balloon->migratepage = NULL;
-}
-
-#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct movable_operations balloon_mops;
-/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
- */
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
-{
- return (struct balloon_dev_info *)page_private(page);
-}
-#endif /* CONFIG_BALLOON_COMPACTION */
-
-/*
- * balloon_page_insert - insert a page into the balloon's page list and make
- * the page->private assignment accordingly.
- * @balloon : pointer to balloon device
- * @page : page to be assigned as a 'balloon page'
- *
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before inserting a page into the balloon device.
- */
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageOffline(page);
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
- SetPageMovableOps(page);
- set_page_private(page, (unsigned long)balloon);
- }
- list_add(&page->lru, &balloon->pages);
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
- return GFP_HIGHUSER_MOVABLE;
- return GFP_HIGHUSER;
-}
-
-/*
- * balloon_page_finalize - prepare a balloon page that was removed from the
- * balloon list for release to the page allocator
- * @page: page to be released to the page allocator
- *
- * Caller must ensure that the page is locked.
- */
-static inline void balloon_page_finalize(struct page *page)
-{
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
- set_page_private(page, 0);
- /* PageOffline is sticky until the page is freed to the buddy. */
-}
-
-/*
- * balloon_page_push - insert a page into a page list.
- * @head : pointer to list
- * @page : page to be added
- *
- * Caller must ensure the page is private and protect the list.
- */
-static inline void balloon_page_push(struct list_head *pages, struct page *page)
-{
- list_add(&page->lru, pages);
-}
-
-/*
- * balloon_page_pop - remove a page from a page list.
- * @head : pointer to list
- * @page : page to be added
- *
- * Caller must ensure the page is private and protect the list.
- */
-static inline struct page *balloon_page_pop(struct list_head *pages)
-{
- struct page *page = list_first_entry_or_null(pages, struct page, lru);
-
- if (!page)
- return NULL;
-
- list_del(&page->lru);
- return page;
-}
-#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index c75a9b3672aa..36a3f2275ecd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -256,12 +256,6 @@ static inline struct folio *bio_first_folio_all(struct bio *bio)
return page_folio(bio_first_page_all(bio));
}
-static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
-{
- WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
- return &bio->bi_io_vec[bio->bi_vcnt - 1];
-}
-
/**
* struct folio_iter - State for iterating all folios in a bio.
* @folio: The current folio we're iterating. NULL after the last folio.
@@ -403,6 +397,29 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
return iov_iter_npages(iter, max_segs);
}
+/**
+ * bio_iov_bounce_nr_vecs - calculate number of bvecs for a bounce bio
+ * @iter: iter to bounce from
+ * @op: REQ_OP_* for the bio
+ *
+ * Calculates how many bvecs are needed for the next bio to bounce from/to
+ * @iter.
+ */
+static inline unsigned short
+bio_iov_bounce_nr_vecs(struct iov_iter *iter, blk_opf_t op)
+{
+ /*
+ * We still need to bounce bvec iters, so don't special case them
+ * here unlike in bio_iov_vecs_to_alloc.
+ *
+ * For reads we need to use a vector for the bounce buffer, account
+ * for that here.
+ */
+ if (op_is_write(op))
+ return iov_iter_npages(iter, BIO_MAX_VECS);
+ return iov_iter_npages(iter, BIO_MAX_VECS - 1) + 1;
+}
+
struct request_queue;
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
@@ -414,6 +431,7 @@ static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
}
extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
+void bio_reuse(struct bio *bio, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
@@ -456,6 +474,9 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
+int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
+void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty);
+
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index c0989b5b0407..7869a6e59b6a 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -7,6 +7,18 @@
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <asm/processor.h> /* for cpu_relax() */
+
+/*
+ * For static context analysis, we need a unique token for each possible bit
+ * that can be used as a bit_spinlock. The easiest way to do that is to create a
+ * fake context that we can cast to with the __bitlock(bitnum, addr) macro
+ * below, which will give us unique instances for each (bit, addr) pair that the
+ * static analysis can use.
+ */
+context_lock_struct(__context_bitlock) { };
+#define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr))
+
/*
* bit-based spin_lock()
*
@@ -14,6 +26,7 @@
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
+ __acquires(__bitlock(bitnum, addr))
{
/*
* Assuming the lock is uncontended, this never enters
@@ -32,13 +45,14 @@ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+ __cond_acquires(true, __bitlock(bitnum, addr))
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -47,7 +61,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
return 0;
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
return 1;
}
@@ -55,6 +69,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -63,7 +78,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
@@ -72,6 +87,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -80,7 +96,7 @@ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 126dc5b380af..54aeeef1f0ec 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -8,6 +8,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/typecheck.h>
#include <asm/byteorder.h>
@@ -243,7 +244,7 @@ __MAKE_OP(64)
#define __field_prep(mask, val) \
({ \
- __auto_type __mask = (mask); \
+ auto __mask = (mask); \
typeof(__mask) __val = (val); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
@@ -252,7 +253,7 @@ __MAKE_OP(64)
#define __field_get(mask, reg) \
({ \
- __auto_type __mask = (mask); \
+ auto __mask = (mask); \
typeof(__mask) __reg = (reg); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 58b0c5254a67..f7c3cb4a342f 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -132,6 +132,11 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
return bio->bi_crypt_context;
}
+static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
+{
+ return bio->bi_crypt_context;
+}
+
void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
gfp_t gfp_mask);
@@ -169,8 +174,35 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
return false;
}
+static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
+{
+ return NULL;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+bool __blk_crypto_submit_bio(struct bio *bio);
+
+/**
+ * blk_crypto_submit_bio - Submit a bio that may have a crypto context
+ * @bio: bio to submit
+ *
+ * If @bio has no crypto context, or the crypt context attached to @bio is
+ * supported by the underlying device's inline encryption hardware, just submit
+ * @bio.
+ *
+ * Otherwise, try to perform en/decryption for this bio by falling back to the
+ * kernel crypto API. For encryption this means submitting newly allocated
+ * bios for the encrypted payload while keeping back the source bio until they
+ * complete, while for reads the decryption happens in-place by a hooked in
+ * completion handler.
+ */
+static inline void blk_crypto_submit_bio(struct bio *bio)
+{
+ if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
+ submit_bio(bio);
+}
+
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
/**
* bio_crypt_clone - clone bio encryption context
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index a6b84206eb94..c15b1ac62765 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -91,7 +91,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return bio_integrity_intervals(bi, sectors) * bi->metadata_size;
}
-static inline bool blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(const struct request *rq)
{
return rq->cmd_flags & REQ_INTEGRITY;
}
@@ -168,9 +168,9 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
{
return 0;
}
-static inline int blk_integrity_rq(struct request *rq)
+static inline bool blk_integrity_rq(const struct request *rq)
{
- return 0;
+ return false;
}
static inline struct bio_vec rq_integrity_vec(struct request *rq)
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index cb88fc791fbd..214c181ff2c9 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -28,7 +28,7 @@ struct blk_dma_iter {
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter);
+ struct blk_dma_iter *iter);
/**
* blk_rq_dma_map_coalesce - were all segments coalesced?
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index cae9e857aea4..18a2388ba581 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,6 +13,7 @@
struct blk_mq_tags;
struct blk_flush_queue;
+struct io_comp_batch;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_DEFAULT_RQ 128
@@ -22,7 +23,8 @@ enum rq_end_io_ret {
RQ_END_IO_FREE,
};
-typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t,
+ const struct io_comp_batch *);
/*
* request flags */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5dc061d318a4..d59553324a84 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -232,6 +232,8 @@ struct bio {
atomic_t __bi_remaining;
+ /* The actual vec list, preserved by bio_reset() */
+ struct bio_vec *bi_io_vec;
struct bvec_iter bi_iter;
union {
@@ -275,13 +277,12 @@ struct bio {
atomic_t __bi_cnt; /* pin count */
- struct bio_vec *bi_io_vec; /* the actual vec list */
-
struct bio_set *bi_pool;
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
-#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+#define BIO_MAX_SIZE UINT_MAX /* max value of bi_iter.bi_size */
+#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> SECTOR_SHIFT)
static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 72e34acd439c..99ef8cd7673c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -340,14 +340,13 @@ typedef unsigned int __bitwise blk_features_t;
/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES ((__force blk_features_t)(1u << 14))
+
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
-/* atomic writes enabled */
-#define BLK_FEAT_ATOMIC_WRITES \
- ((__force blk_features_t)(1u << 16))
-
/*
* Flags automatically inherited when stacking limits.
*/
@@ -551,7 +550,8 @@ struct request_queue {
/*
* queue settings
*/
- unsigned long nr_requests; /* Max # of requests */
+ unsigned int nr_requests; /* Max # of requests */
+ unsigned int async_depth; /* Max # of async requests */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
@@ -681,7 +681,7 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_passthrough_stat(q) \
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
@@ -1026,7 +1026,7 @@ extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-/* Helper to convert REQ_OP_XXX to its string format XXX */
+/* Convert a request operation REQ_OP_name into the string "name" */
extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
@@ -1044,7 +1044,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
return bdev->bd_queue; /* this is never NULL */
}
-/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+/* Convert a zone condition BLK_ZONE_COND_name into the string "name" */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
static inline unsigned int bio_zone_no(struct bio *bio)
@@ -1462,9 +1462,14 @@ bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
}
+static inline bool bdev_rot(struct block_device *bdev)
+{
+ return blk_queue_rot(bdev_get_queue(bdev));
+}
+
static inline bool bdev_nonrot(struct block_device *bdev)
{
- return blk_queue_nonrot(bdev_get_queue(bdev));
+ return !bdev_rot(bdev);
}
static inline bool bdev_synchronous(struct block_device *bdev)
@@ -1822,6 +1827,7 @@ struct io_comp_batch {
struct rq_list req_list;
bool need_ts;
void (*complete)(struct io_comp_batch *);
+ void *poll_ctx;
};
static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
diff --git a/include/linux/bnge/hsi.h b/include/linux/bnge/hsi.h
new file mode 100644
index 000000000000..8ea13d5407ee
--- /dev/null
+++ b/include/linux/bnge/hsi.h
@@ -0,0 +1,12609 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2026 Broadcom */
+
+/* DO NOT MODIFY!!! This file is automatically generated. */
+
+#ifndef _BNGE_HSI_H_
+#define _BNGE_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
+ __le16 size;
+ __le64 req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_PSP_CFG 0x72UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
+ #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
+ #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_ADV 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QUIESCE 0xc5UL
+ #define HWRM_FW_STATE_BACKUP 0xc6UL
+ #define HWRM_FW_STATE_RESTORE 0xc7UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
+ #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
+ #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_START 0xe4UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
+ #define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
+ #define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_PROF_QUERY 0x1c3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_FUNC_PTP_QCFG 0x1c5UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_PORT_PHY_FDRSTAT 0x232UL
+ #define HWRM_PORT_PHY_DBG 0x23aUL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_QCFG 0x264UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_QCFG 0x265UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
+ #define HWRM_TFC_SPR_BA_SET 0x3a1UL
+ #define HWRM_TFC_SPR_BA_GET 0x3a2UL
+ #define HWRM_MGMT_FILTER_ALLOC 0x3e8UL
+ #define HWRM_MGMT_FILTER_FREE 0x3e9UL
+ #define HWRM_MGMT_FILTER_CFG 0x3eaUL
+ #define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_DBG_TOKEN_QUERY_AUTH_IDS 0xff2fUL
+ #define HWRM_DBG_TOKEN_CFG 0xff30UL
+ #define HWRM_DBG_TRACE_TRIGGER 0xff31UL
+ #define HWRM_DBG_TRACE_TRIGGER_STATUS 0xff32UL
+ #define HWRM_NVM_SET_PROFILE 0xffe9UL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 704
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 15
+#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_RSVD 1
+#define HWRM_VERSION_STR "1.15.1.1"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1472b/184B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_DEBUG_TOKEN_SUPPORTED 0x20000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_PSP_SUPPORTED 0x40000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ROCE_COUNTERSET_SUPPORTED 0x80000UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ char active_pkg_name[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ __le16 max_req_timeout;
+ __le16 max_psp_supported_pfs;
+ __le16 max_psp_supported_vfs;
+ __le16 max_roce_countersets;
+ __le16 max_ext_req_timeout;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
+ __le16 len;
+ __le32 opaque;
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PNO_HOST_DMA_COMPLETE 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_GID_UPDATE 0x52UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ADPTV_QOS 0x53UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FABRIC_NEXT_HOP_IP_UPDATED 0x54UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x55UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PSP_SM_KEY_ROTATE_NOTIFY 0x56UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
+};
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
+};
+
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE 0xcUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPRT_TRACE 0xdUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RERT_TRACE 0xeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPC_MSG_TRACE 0xfUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPC_CMPL_TRACE 0x10UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MPC_CMPL_TRACE
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUP_UDCC_SES 0x7UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcaps_output (size:1152b/144B) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ __le16 max_tso_segs;
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_CHANGE_UDP_SRCPORT_SUPPORT 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_DATA_SUPPORTED 0x800UL
+ __le16 max_roce_vfs;
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcfg_output (size:1408b/176B) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ __le16 alloc_msix;
+ __le16 registered_vfs;
+ __le16 l2_doorbell_bar_size_kb;
+ u8 active_endpoints;
+ u8 always_1;
+ __le32 reset_addr_poll;
+ __le16 legacy_l2_db_size_kb;
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ __le16 roce_vnic_id;
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
+ __le16 stag_vid;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 mirror_vnic_id;
+ u8 max_link_width;
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_LAST FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16
+ u8 max_link_speed;
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_LAST FUNC_QCFG_RESP_MAX_LINK_SPEED_G5
+ u8 negotiated_link_width;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16
+ u8 negotiated_link_speed;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5
+ u8 unused_7[2];
+ u8 pcie_compliance;
+ u8 unused_8;
+ __le16 l2_db_multi_page_size_kb;
+ u8 unused_9[5];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:1280b/160B) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_msix;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
+ __le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ #define FUNC_CFG_REQ_ENABLES2_PCIE_COMPLIANCE 0x1000UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ __le16 physical_slot_number;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
+ u8 pcie_compliance;
+ u8 unused_2;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_OUT_OF_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_NPAR_PARTITION_DOWN_FAILED 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_TPID_SET_DFLT_VLAN_NOT_SET 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_RES_ARRAY_ALLOC_FAILED 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_ASSET_TEST_FAILED 0x5UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_RES_UPDATE_FAILED 0x6UL
+ #define FUNC_CFG_CMD_ERR_CODE_APPLY_MAX_BW_FAILED 0x7UL
+ #define FUNC_CFG_CMD_ERR_CODE_ENABLE_EVB_FAILED 0x8UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_ASSET_TEST_FAILED 0x9UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_RES_UPDATE_FAILED 0xaUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_ASSET_TEST_FAILED 0xbUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_RES_UPDATE_FAILED 0xcUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_ASSET_TEST_FAILED 0xdUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_RES_UPDATE_FAILED 0xeUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_ASSET_TEST_FAILED 0xfUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_RES_UPDATE_FAILED 0x10UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_ASSET_TEST_FAILED 0x11UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_RES_UPDATE_FAILED 0x12UL
+ #define FUNC_CFG_CMD_ERR_CODE_FAILED_TO_START_STATS_THREAD 0x13UL
+ #define FUNC_CFG_CMD_ERR_CODE_RDMA_SRIOV_DISABLED 0x14UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_DISABLED 0x15UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_ASSET_TEST_FAILED 0x16UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_RES_UPDATE_FAILED 0x17UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_DISABLED 0x18UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_ASSET_TEST_FAILED 0x19UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_RES_UPDATE_FAILED 0x1aUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_DISABLED 0x1bUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_ASSET_TEST_FAILED 0x1cUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_RES_UPDATE_FAILED 0x1dUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_DISABLED 0x1eUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_ASSET_TEST_FAILED 0x1fUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_RES_UPDATE_FAILED 0x20UL
+ #define FUNC_CFG_CMD_ERR_CODE_INVALID_KDNET_MODE 0x21UL
+ #define FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL 0x22UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 clear_seq;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
+};
+
+/* hwrm_func_drv_qver_output (size:256b/32B) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 max_tx_scheduler_inputs;
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ __le16 mrav_num_entries_units;
+ u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ __le16 qp_num_fast_qpmd_entries;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ttx_pacing_rate_prof_query_input (size:192b/24B) */
+struct hwrm_func_ttx_pacing_rate_prof_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ttx_pacing_rate_prof_query_output (size:128b/16B) */
+struct hwrm_func_ttx_pacing_rate_prof_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 start_prof_id;
+ u8 end_prof_id;
+ u8 active_prof_id;
+ #define FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_60M 0x0UL
+ #define FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_50G 0x1UL
+ #define FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_LAST FUNC_TTX_PACING_RATE_PROF_QUERY_RESP_50G
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_func_ttx_pacing_rate_query_input (size:192b/24B) */
+struct hwrm_func_ttx_pacing_rate_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 profile_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_func_ttx_pacing_rate_query_output (size:4224b/528B) */
+struct hwrm_func_ttx_pacing_rate_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 rates[128];
+ u8 profile_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_key_ctx_alloc_input (size:384b/48B) */
+struct hwrm_func_key_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_key_ctxs;
+ __le32 dma_bufr_size_bytes;
+ u8 key_ctx_type;
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_TX 0x0UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_RX 0x1UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_TX 0x2UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_RX 0x3UL
+ #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_LAST FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_RX
+ u8 unused_0[7];
+ __le64 host_dma_addr;
+ __le32 partition_start_xid;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_key_ctx_alloc_output (size:192b/24B) */
+struct hwrm_func_key_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 num_key_ctxs_allocated;
+ u8 flags;
+ #define FUNC_KEY_CTX_ALLOC_RESP_FLAGS_KEY_CTXS_CONTIGUOUS 0x1UL
+ u8 unused_0;
+ __le32 partition_start_xid;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_key_ctx_free_input (size:256b/32B) */
+struct hwrm_func_key_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 key_ctx_type;
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_TX 0x0UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_RX 0x1UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_TX 0x2UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_RX 0x3UL
+ #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_LAST FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_RX
+ u8 unused_0;
+ __le32 partition_start_xid;
+ __le16 num_entries;
+ u8 unused_1[6];
+};
+
+/* hwrm_func_key_ctx_free_output (size:128b/16B) */
+struct hwrm_func_key_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_cmd_err (size:64b/8B) */
+struct hwrm_func_backing_store_cfg_v2_cmd_err {
+ u8 code;
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_QP_FAIL 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SRQ_FAIL 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_CQ_FAIL 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_VNIC_FAIL 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_STAT_FAIL 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TQM_SPR_FAIL 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TQM_FPR_FAIL 0x7UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_MRAV_FAIL 0x8UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TIM_FAIL 0x9UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_V2TRACE_FAIL 0xaUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TXCK_FAIL 0xbUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_RXCK_FAIL 0xcUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_MPC_FAIL 0xdUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SHADOW_DB_FAIL 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_XID_FAIL 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TFC_FAIL 0x10UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_TTXPACE_FAIL 0x11UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_CDU_ENABLE_FAIL 0x12UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SCHQ_ALLOC_FAIL 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_LAST FUNC_BACKING_STORE_CFG_V2_CMD_ERR_CODE_SCHQ_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ERR_QPC_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 locked;
+ __le32 rsvd2[2];
+};
+
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mr_split_entries (size:128b/16B) */
+struct mr_split_entries {
+ __le32 mr_num_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* av_split_entries (size:128b/16B) */
+struct av_split_entries {
+ __le32 av_num_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* sq_split_entries (size:128b/16B) */
+struct sq_split_entries {
+ __le32 sq_num_l2_entries;
+ __le32 rsvd2;
+ __le32 rsvd3[2];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_cmd_err (size:64b/8B) */
+struct hwrm_func_backing_store_qcfg_v2_cmd_err {
+ u8 code;
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_SHDDB_FAIL 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_XID_FAIL 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_TXPAC_RING_FAIL 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_INVALID_FIELD 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_LAST FUNC_BACKING_STORE_QCFG_V2_CMD_ERR_CODE_INVALID_FIELD
+ u8 unused_0[7];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_PNO_TQM_RING 0x2cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MPRT_TRACE 0x2dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RERT_TRACE 0x2eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MR 0x2fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AV 0x30UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ 0x31UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_IQM 0x32UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MPC_MSG_TRACE 0x33UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MPC_CMPL_TRACE 0x34UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_PHYSICAL_PBL_PREFERRED 0x80UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le16 max_instance_count;
+ u8 rsvd3;
+ u8 valid;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_ENABLE 0x800000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_DISABLE 0x1000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_ENABLE 0x2000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_DISABLE 0x4000000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
+ u8 unused_2[6];
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_LINK_TRAINING 0x8UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_PRECODING 0x10UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_TX_LASER_DISABLED 0x20UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+ __s32 ptp_freq_adj_ppb;
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
+ __s64 ptp_adj_phase;
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:3904b/488B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
+};
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ __le16 total_active_cos_queues;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
+ u8 valid;
+};
+
+/* hwrm_port_qstats_ext_pfc_wd_input (size:256b/32B) */
+struct hwrm_port_qstats_ext_pfc_wd_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 pfc_wd_stat_size;
+ u8 unused_0[4];
+ __le64 pfc_wd_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_pfc_wd_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_pfc_wd_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfc_wd_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
+};
+
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
+};
+
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
+/* port_stats_ext_pfc_adv (size:1536b/192B) */
+struct port_stats_ext_pfc_adv {
+ __le64 pfc_min_duration_time[8];
+ __le64 pfc_max_duration_time[8];
+ __le64 pfc_weighted_duration_time[8];
+};
+
+/* hwrm_port_qstats_ext_pfc_adv_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_pfc_adv_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 pfc_adv_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_PFC_ADV_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 tx_pfc_adv_stat_host_addr;
+ __le64 rx_pfc_adv_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_pfc_adv_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_pfc_adv_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfc_adv_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
+};
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_ts_query_input (size:320b/40B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_ADV_STATS_SUPPORTED 0x20UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_fdrstat_input (size:192b/24B) */
+struct hwrm_port_phy_fdrstat_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 rsvd[2];
+ __le16 ops;
+ #define PORT_PHY_FDRSTAT_REQ_OPS_START 0x0UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_STOP 0x1UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_CLEAR 0x2UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_COUNTER 0x3UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_LAST PORT_PHY_FDRSTAT_REQ_OPS_COUNTER
+};
+
+/* hwrm_port_phy_fdrstat_output (size:3072b/384B) */
+struct hwrm_port_phy_fdrstat_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 start_time;
+ __le64 end_time;
+ __le64 cmic_start_time;
+ __le64 cmic_end_time;
+ __le64 accumulated_uncorrected_codewords;
+ __le64 accumulated_corrected_codewords;
+ __le64 accumulated_total_codewords;
+ __le64 accumulated_symbol_errors;
+ __le64 accumulated_codewords_err_s0;
+ __le64 accumulated_codewords_err_s1;
+ __le64 accumulated_codewords_err_s2;
+ __le64 accumulated_codewords_err_s3;
+ __le64 accumulated_codewords_err_s4;
+ __le64 accumulated_codewords_err_s5;
+ __le64 accumulated_codewords_err_s6;
+ __le64 accumulated_codewords_err_s7;
+ __le64 accumulated_codewords_err_s8;
+ __le64 accumulated_codewords_err_s9;
+ __le64 accumulated_codewords_err_s10;
+ __le64 accumulated_codewords_err_s11;
+ __le64 accumulated_codewords_err_s12;
+ __le64 accumulated_codewords_err_s13;
+ __le64 accumulated_codewords_err_s14;
+ __le64 accumulated_codewords_err_s15;
+ __le64 accumulated_codewords_err_s16;
+ __le64 uncorrected_codewords;
+ __le64 corrected_codewords;
+ __le64 total_codewords;
+ __le64 symbol_errors;
+ __le64 codewords_err_s0;
+ __le64 codewords_err_s1;
+ __le64 codewords_err_s2;
+ __le64 codewords_err_s3;
+ __le64 codewords_err_s4;
+ __le64 codewords_err_s5;
+ __le64 codewords_err_s6;
+ __le64 codewords_err_s7;
+ __le64 codewords_err_s8;
+ __le64 codewords_err_s9;
+ __le64 codewords_err_s10;
+ __le64 codewords_err_s11;
+ __le64 codewords_err_s12;
+ __le64 codewords_err_s13;
+ __le64 codewords_err_s14;
+ __le64 codewords_err_s15;
+ __le64 codewords_err_s16;
+ __le32 window_size;
+ __le16 unused_0[1];
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_fdrstat_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_fdrstat_cmd_err {
+ u8 code;
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED 0x1UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_LAST PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED
+ u8 unused_0[7];
+};
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
+};
+
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
+};
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
+ __le16 virtio_net_fid;
+ __le16 vnic_id;
+};
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
+ #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
+ __le16 queue_id;
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ __le32 raw_qp_id;
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNABLE_TO_GET_RSS_CFG 0x2UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_UNSUPPORTED 0x3UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_ERR 0x4UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_MODE_FAIL 0x5UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RING_GRP_TABLE_ALLOC_ERR 0x6UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_KEY_ALLOC_ERR 0x7UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_DMA_FAILED 0x8UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RX_RING_ALLOC_ERR 0x9UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CMPL_RING_ALLOC_ERR 0xaUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HW_SET_RSS_FAILED 0xbUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CTX_INVALID 0xcUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_INVALID 0xdUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID 0xeUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 max_bds;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:768b/96B) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ #define RING_ALLOC_REQ_ENABLES_DPI_VALID 0x2000UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ __le16 schq_id;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 steering_tag;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
+ __le64 cq_handle;
+ __le16 dpi;
+ __le16 unused_5[3];
+};
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:256b/32B) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
+ __le16 ring_id;
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
+ u8 l2_addr[6];
+ u8 num_vlans;
+ u8 t_num_vlans;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_MAX_VLAN_TAGS 0x2UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_VNIC_ID 0x3UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION 0x4UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_MAC 0x65UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_BC_MC_MAC 0x66UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_VNIC 0x67UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_PF_FID 0x68UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L2_CTXT_ID 0x69UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_CTXT_CFG 0x6aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_DATA_FLD 0x6bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_CFA_LAYOUT 0x6cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_CTXT_ALLOC_FAIL 0x6dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ROCE_FLOW_ERR 0x6eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_OWNER_FID 0x6fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_REF_CNT 0x70UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_FLOW_TYPE 0x71UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_IVLAN 0x72UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_MAX_VLAN_ID 0x73UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_TNL_REQ 0x74UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_ADDR 0x75UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_IVLAN 0x76UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR 0x77UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR_TYPE 0x78UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_T_L3_ADDR_TYPE 0x79UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DST_VNIC_ID 0x7aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VNI 0x7bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_DST_ID 0x7cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_FAIL_ROCE_L2_FLOW 0x7dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_NPAR_VLAN 0x7eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ATSP_ADD 0x7fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DFLT_VLAN_FAIL 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L3_TYPE 0x81UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW 0x82UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_cmd_err (size:64b/8B) */
+struct hwrm_tunnel_dst_port_alloc_cmd_err {
+ u8 code;
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_UNKNOWN 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_TUNNEL_ALLOC_ERR 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_ACCESS_DENIED 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_GET_PORT_FAILED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_PORT_NUM_ERR 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_CUSTOM_TNL_PORT_ERR 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_TUNNEL_QUERY_ERR 0x6UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_GRE_MODE_UNSUPPORTED 0x7UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_GRE_ALREADY_ALLOC 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_TUNNEL_TYPE_INVALID 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_UPAR_ERR 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_CMD_ERR_LAST TUNNEL_DST_PORT_ALLOC_CMD_ERR_UPAR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_UNKNOWN 0x0UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_INVALID_FID 0x65UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_INVALID_CTX_ID 0x66UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_INVALID_PAYLOAD 0x67UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_CTX_STAT_RETRIEVAL_FAIL 0x68UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_RES_NOT_ALLOCATED 0x69UL
+#define HWRM_STAT_COMMON_CMD_ERR_CODE_LAST HWRM_STAT_COMMON_CMD_ERR_CODE_RES_NOT_ALLOCATED
+
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+};
+
+/* ctx_eng_stats (size:512b/64B) */
+struct ctx_eng_stats {
+ __le64 eng_bytes_in;
+ __le64 eng_bytes_out;
+ __le64 aux_bytes_in;
+ __le64 aux_bytes_out;
+ __le64 commands;
+ __le64 error_commands;
+ __le64 cce_engine_usage;
+ __le64 cdd_engine_usage;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
+ u8 unused_0;
+ __le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc_cmd_err (size:64b/8B) */
+struct hwrm_stat_ctx_alloc_cmd_err {
+ u8 code;
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_INVALID_FID 0x1UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_INVALID_FLAG 0x2UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_INVALID_DMA_ADDR 0x3UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_RES_NOT_AVAIL 0x4UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_RES_POOL_EXHAUSTED 0x5UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_CTX_ALLOC_FAIL 0x6UL
+ #define STAT_CTX_ALLOC_CMD_ERR_CODE_LAST STAT_CTX_ALLOC_CMD_ERR_CODE_CTX_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_cmd_err (size:64b/8B) */
+struct hwrm_stat_ctx_free_cmd_err {
+ u8 code;
+ #define STAT_CTX_FREE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_INVALID_CTX_ID 0x1UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_RES_DEALLOC_FAIL 0x2UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_CTX_FREE_FAIL 0x3UL
+ #define STAT_CTX_FREE_CMD_ERR_CODE_LAST STAT_CTX_FREE_CMD_ERR_CODE_CTX_FREE_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_eng_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */
+struct hwrm_stat_ctx_eng_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 eng_bytes_in;
+ __le64 eng_bytes_out;
+ __le64 aux_bytes_in;
+ __le64 aux_bytes_out;
+ __le64 commands;
+ __le64 error_commands;
+ __le64 cce_engine_usage;
+ __le64 cdd_engine_usage;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_cmd_err (size:64b/8B) */
+struct hwrm_pcie_qstats_cmd_err {
+ u8 code;
+ #define PCIE_QSTATS_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_LEGACY_INVALID_PF_ID 0x1UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_GENERIC_INVALID_EP_IDX 0x2UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_GENERIC_MEM_ALLOC_FAIL 0x3UL
+ #define PCIE_QSTATS_CMD_ERR_CODE_LAST PCIE_QSTATS_CMD_ERR_CODE_GENERIC_MEM_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
+/* pcie_ctx_hw_stats_v2 (size:4544b/568B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+ __le32 pcie_rd_latency_histogram[12];
+ __le32 pcie_rd_latency_all_normal_count;
+ __le32 unused_2;
+};
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_stat_generic_qstats_cmd_err (size:64b/8B) */
+struct hwrm_stat_generic_qstats_cmd_err {
+ u8 code;
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_INVALID_EP_ID 0x1UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_INVALID_STAT_SIZE 0x2UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_INVALID_DMA_ADDR 0x3UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_HOST_NOT_ACTIVE 0x4UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_MEM_ALLOC_FAIL 0x5UL
+ #define STAT_GENERIC_QSTATS_CMD_ERR_CODE_LAST STAT_GENERIC_QSTATS_CMD_ERR_CODE_MEM_ALLOC_FAIL
+ u8 unused_0[7];
+};
+
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
+};
+
+/* hwrm_stat_db_error_qstats_input (size:128b/16B) */
+struct hwrm_stat_db_error_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_stat_db_error_qstats_output (size:320b/40B) */
+struct hwrm_stat_db_error_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 tx_db_drop_invalid_qp_state;
+ __le32 rx_db_drop_invalid_rq_state;
+ __le32 tx_db_drop_format_error;
+ __le32 express_db_dropped_misc_error;
+ __le32 express_db_dropped_sq_overflow;
+ __le32 express_db_dropped_rq_overflow;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_query_roce_stats_input (size:256b/32B) */
+struct hwrm_stat_query_roce_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 roce_stat_size;
+ u8 unused_0[6];
+ __le64 roce_stat_host_addr;
+};
+
+/* hwrm_stat_query_roce_stats_output (size:128b/16B) */
+struct hwrm_stat_query_roce_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 roce_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* stat_query_roce_stats_data (size:2944b/368B) */
+struct stat_query_roce_stats_data {
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
+ __le64 xp_sq_overflow_err;
+ __le64 xp_rq_overflow_error;
+};
+
+/* hwrm_stat_query_roce_stats_ext_input (size:256b/32B) */
+struct hwrm_stat_query_roce_stats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 roce_stat_size;
+ u8 unused_0[6];
+ __le64 roce_stat_host_addr;
+};
+
+/* hwrm_stat_query_roce_stats_ext_output (size:128b/16B) */
+struct hwrm_stat_query_roce_stats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 roce_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* stat_query_roce_stats_ext_data (size:2240b/280B) */
+struct stat_query_roce_stats_ext_data {
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_send_req_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_send_req_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_ecn_marked_pkts;
+ __le64 tx_cnp_bytes;
+ __le64 rx_cnp_bytes;
+ __le64 seq_err_naks_rcvd;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 to_retransmit;
+ __le64 dup_req;
+ __le64 rx_dcn_payload_cut;
+ __le64 te_bypassed;
+ __le64 tx_dcn_cnp;
+ __le64 rx_dcn_cnp;
+ __le64 rx_payload_cut;
+ __le64 rx_payload_cut_ignored;
+ __le64 rx_dcn_cnp_ignored;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 host_idx;
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_get_time_input (size:128b/16B) */
+struct hwrm_fw_get_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_get_time_output (size:192b/24B) */
+struct hwrm_fw_get_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 year;
+ #define FW_GET_TIME_RESP_YEAR_UNKNOWN 0x0UL
+ #define FW_GET_TIME_RESP_YEAR_LAST FW_GET_TIME_RESP_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_GET_TIME_RESP_ZONE_UTC 0
+ #define FW_GET_TIME_RESP_ZONE_UNKNOWN 65535
+ #define FW_GET_TIME_RESP_ZONE_LAST FW_GET_TIME_RESP_ZONE_UNKNOWN
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS 0x190UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS
+ __le16 len;
+ u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_ets (size:256b/32B) */
+struct hwrm_struct_data_dcbx_ets {
+ u8 destination;
+ #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION 0x1UL
+ #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION 0x2UL
+ #define STRUCT_DATA_DCBX_ETS_DESTINATION_LAST STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION
+ u8 max_tcs;
+ __le16 unused1;
+ u8 pri0_to_tc_map;
+ u8 pri1_to_tc_map;
+ u8 pri2_to_tc_map;
+ u8 pri3_to_tc_map;
+ u8 pri4_to_tc_map;
+ u8 pri5_to_tc_map;
+ u8 pri6_to_tc_map;
+ u8 pri7_to_tc_map;
+ u8 tc0_to_bw_map;
+ u8 tc1_to_bw_map;
+ u8 tc2_to_bw_map;
+ u8 tc3_to_bw_map;
+ u8 tc4_to_bw_map;
+ u8 tc5_to_bw_map;
+ u8 tc6_to_bw_map;
+ u8 tc7_to_bw_map;
+ u8 tc0_to_tsa_map;
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP 0x0UL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS 0x1UL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS 0x2UL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL
+ #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_LAST STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC
+ u8 tc1_to_tsa_map;
+ u8 tc2_to_tsa_map;
+ u8 tc3_to_tsa_map;
+ u8 tc4_to_tsa_map;
+ u8 tc5_to_tsa_map;
+ u8 tc6_to_tsa_map;
+ u8 tc7_to_tsa_map;
+ u8 unused_0[4];
+};
+
+/* hwrm_struct_data_dcbx_pfc (size:64b/8B) */
+struct hwrm_struct_data_dcbx_pfc {
+ u8 pfc_priority_bitmap;
+ u8 max_pfc_tcs;
+ u8 mbc;
+ u8 unused_0[5];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_struct_data_dcbx_feature_state (size:64b/8B) */
+struct hwrm_struct_data_dcbx_feature_state {
+ u8 dcbx_mode;
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE
+ u8 ets_state;
+ u8 pfc_state;
+ u8 app_state;
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS
+ u8 unused[3];
+ u8 resets;
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS 0x1UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC 0x2UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP 0x4UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL
+ #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_LAST STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE
+};
+
+/* hwrm_struct_data_lldp (size:64b/8B) */
+struct hwrm_struct_data_lldp {
+ u8 admin_state;
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_TX 0x1UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_RX 0x2UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE 0x3UL
+ #define STRUCT_DATA_LLDP_ADMIN_STATE_LAST STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE
+ u8 port_description_state;
+ #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_LAST STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE
+ u8 system_name_state;
+ #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE
+ u8 system_desc_state;
+ #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE
+ u8 system_cap_state;
+ #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE
+ u8 mgmt_addr_state;
+ #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_LAST STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE
+ u8 async_event_notification_state;
+ #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL
+ #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL
+ #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_LAST STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE
+ u8 unused_0;
+};
+
+/* hwrm_struct_data_lldp_generic (size:2112b/264B) */
+struct hwrm_struct_data_lldp_generic {
+ u8 tlv_type;
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS 0x1UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT 0x2UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME 0x3UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME 0x5UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL
+ #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_LAST STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION
+ u8 subtype;
+ u8 length;
+ u8 unused1[5];
+ __le32 tlv_value[64];
+};
+
+/* hwrm_struct_data_lldp_device (size:1472b/184B) */
+struct hwrm_struct_data_lldp_device {
+ __le16 ttl;
+ u8 mgmt_addr_len;
+ u8 mgmt_addr_type;
+ u8 unused_3[4];
+ __le32 mgmt_addr[8];
+ __le32 system_caps;
+ u8 intf_num_type;
+ u8 mgmt_addr_oid_length;
+ u8 unused_4[2];
+ __le32 intf_num;
+ u8 unused_5[4];
+ __le32 mgmt_addr_oid[32];
+};
+
+/* hwrm_struct_data_port_description (size:64b/8B) */
+struct hwrm_struct_data_port_description {
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_struct_data_rss_v2 (size:128b/16B) */
+struct hwrm_struct_data_rss_v2 {
+ __le16 flags;
+ #define STRUCT_DATA_RSS_V2_FLAGS_HASH_VALID 0x1UL
+ __le16 rss_ctx_id;
+ __le16 num_ring_groups;
+ __le16 hash_type;
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV4 0x1UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV4 0x2UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV4 0x4UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV6 0x8UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV6 0x10UL
+ #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV6 0x20UL
+ __le64 hash_key_ring_group_ids;
+};
+
+/* hwrm_struct_data_power_information (size:192b/24B) */
+struct hwrm_struct_data_power_information {
+ __le32 bkup_power_info_ver;
+ __le32 platform_bkup_power_count;
+ __le32 load_milli_watt;
+ __le32 bkup_time_milli_seconds;
+ __le32 bkup_power_status;
+ __le32 bkup_power_charge_time;
+};
+
+/* hwrm_struct_data_peer_mmap (size:1600b/200B) */
+struct hwrm_struct_data_peer_mmap {
+ __le16 fid;
+ __le16 count;
+ __le32 unused_0;
+ __le64 hpa_0;
+ __le64 gpa_0;
+ __le64 size_0;
+ __le64 hpa_1;
+ __le64 gpa_1;
+ __le64 size_1;
+ __le64 hpa_2;
+ __le64 gpa_2;
+ __le64 size_2;
+ __le64 hpa_3;
+ __le64 gpa_3;
+ __le64 size_3;
+ __le64 hpa_4;
+ __le64 gpa_4;
+ __le64 size_4;
+ __le64 hpa_5;
+ __le64 gpa_5;
+ __le64 size_5;
+ __le64 hpa_6;
+ __le64 gpa_6;
+ __le64 size_6;
+ __le64 hpa_7;
+ __le64 gpa_7;
+ __le64 size_7;
+};
+
+/* hwrm_struct_data_peer_mmap_v2 (size:1792b/224B) */
+struct hwrm_struct_data_peer_mmap_v2 {
+ __le16 fid;
+ __le16 count;
+ __le32 unused_0;
+ __le64 hpa_0;
+ __le64 gpa_0;
+ __le64 size_0;
+ __le64 hpa_1;
+ __le64 gpa_1;
+ __le64 size_1;
+ __le64 hpa_2;
+ __le64 gpa_2;
+ __le64 size_2;
+ __le64 hpa_3;
+ __le64 gpa_3;
+ __le64 size_3;
+ __le64 hpa_4;
+ __le64 gpa_4;
+ __le64 size_4;
+ __le64 hpa_5;
+ __le64 gpa_5;
+ __le64 size_5;
+ __le64 hpa_6;
+ __le64 gpa_6;
+ __le64 size_6;
+ __le64 hpa_7;
+ __le64 gpa_7;
+ __le64 size_7;
+ __le16 ds_port;
+ __le16 auth_status;
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_SUCCESS 0x0UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_NONCE_MIS 0xdUL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_SIG_INVALID 0xeUL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_AUTH_FAILED 0xfUL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_CERT_N_VAL 0x10UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_INVA_CMD_CODE 0x11UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_INVALID_HDR 0x12UL
+ #define STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_LAST STRUCT_DATA_PEER_MMAP_V2_AUTH_STATUS_INVALID_HDR
+ __le32 unused_2;
+ __le16 status[8];
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_SUCCESS 0x0UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_HDR_VER_MISMATCH 0x1UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_PKT_SOM_MISSING 0x2UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_OUT_OF_ORDER_PKTS 0x3UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ALREADY_ADDED 0x4UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ALREADY_DELETED 0x5UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NOT_ADDED 0x6UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NOT_DELETED 0x7UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NO_EP_CNTX 0x8UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_INVALID_BUF_SZ 0xaUL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ALLOC_MEM_FAILED 0xbUL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_ENTRY_CNT_ERR 0xcUL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_NO_RESPONSE 0x13UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_IPC_ERROR 0x14UL
+ #define STRUCT_DATA_PEER_MMAP_V2_STATUS_LAST STRUCT_DATA_PEER_MMAP_V2_STATUS_IPC_ERROR
+};
+
+/* hwrm_struct_data_msix_per_vf (size:320b/40B) */
+struct hwrm_struct_data_msix_per_vf {
+ __le16 pf_id;
+ __le16 count;
+ __le32 unused_0;
+ __le16 start_vf_0;
+ __le16 msix_0;
+ __le16 start_vf_1;
+ __le16 msix_1;
+ __le16 start_vf_2;
+ __le16 msix_2;
+ __le16 start_vf_3;
+ __le16 msix_3;
+ __le16 start_vf_4;
+ __le16 msix_4;
+ __le16 start_vf_5;
+ __le16 msix_5;
+ __le16 start_vf_6;
+ __le16 msix_6;
+ __le16 start_vf_7;
+ __le16 msix_7;
+};
+
+/* hwrm_struct_data_dbg_token_claims (size:128b/16B) */
+struct hwrm_struct_data_dbg_token_claims {
+ __s32 claim_number;
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_EXP 4
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_CTI 7
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_AUTH_ID -67000
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_PERSIST -67001
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_SDB_EN -68000
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_DIAGRW_EN -68003
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_FW_CLI -68100
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_LAST STRUCT_DATA_DBG_TOKEN_CLAIMS_CLAIM_NUMBER_FW_CLI
+ __le16 data_type;
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_1_BYTE 0x1UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_2_BYTES 0x2UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_4_BYTES 0x3UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_UINT_8_BYTES 0x4UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_BOOLEAN 0x5UL
+ #define STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_LAST STRUCT_DATA_DBG_TOKEN_CLAIMS_DATA_TYPE_BOOLEAN
+ __le16 unused_0;
+ u8 claim_data[8];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_ALREADY_ADDED 0x4UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG 0x5UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_SUPPORTED 0x320UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE 0x321UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE
+ u8 count;
+ u8 unused_0;
+};
+
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_ipc_msg_input (size:320b/40B) */
+struct hwrm_fw_ipc_msg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FW_IPC_MSG_REQ_ENABLES_COMMAND_ID 0x1UL
+ #define FW_IPC_MSG_REQ_ENABLES_SRC_PROCESSOR 0x2UL
+ #define FW_IPC_MSG_REQ_ENABLES_DATA_OFFSET 0x4UL
+ #define FW_IPC_MSG_REQ_ENABLES_LENGTH 0x8UL
+ __le16 command_id;
+ #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG 0x1UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_MHB_HOST 0x2UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_DRVR_VERSION 0x3UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_LOG2H 0x4UL
+ #define FW_IPC_MSG_REQ_COMMAND_ID_LAST FW_IPC_MSG_REQ_COMMAND_ID_LOG2H
+ u8 src_processor;
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_CFW 0x1UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_BONO 0x2UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_APE 0x3UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG 0x4UL
+ #define FW_IPC_MSG_REQ_SRC_PROCESSOR_LAST FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG
+ u8 unused_0;
+ __le32 data_offset;
+ __le16 length;
+ u8 unused_1[2];
+ __le64 opaque;
+};
+
+/* hwrm_fw_ipc_msg_output (size:256b/32B) */
+struct hwrm_fw_ipc_msg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 msg_data_1;
+ __le32 msg_data_2;
+ __le64 reserved64;
+ u8 reserved48[7];
+ u8 valid;
+};
+
+/* hwrm_fw_ipc_mailbox_input (size:256b/32B) */
+struct hwrm_fw_ipc_mailbox_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ u8 unused;
+ u8 event_id;
+ u8 port_id;
+ __le32 event_data1;
+ __le32 event_data2;
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_ipc_mailbox_output (size:128b/16B) */
+struct hwrm_fw_ipc_mailbox_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_ipc_mailbox_cmd_err (size:64b/8B) */
+struct hwrm_fw_ipc_mailbox_cmd_err {
+ u8 code;
+ #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_IPC_MAILBOX_CMD_ERR_CODE_LAST FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_ecn_cfg_input (size:192b/24B) */
+struct hwrm_fw_ecn_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define FW_ECN_CFG_REQ_FLAGS_ENABLE_ECN 0x1UL
+ u8 unused_0[6];
+};
+
+/* hwrm_fw_ecn_cfg_output (size:128b/16B) */
+struct hwrm_fw_ecn_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_ecn_qcfg_input (size:128b/16B) */
+struct hwrm_fw_ecn_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_ecn_qcfg_output (size:128b/16B) */
+struct hwrm_fw_ecn_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define FW_ECN_QCFG_RESP_FLAGS_ENABLE_ECN 0x1UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_health_check_input (size:128b/16B) */
+struct hwrm_fw_health_check_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_health_check_output (size:128b/16B) */
+struct hwrm_fw_health_check_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 fw_status;
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_BOOTED 0x1UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH 0x2UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_BOOTED 0x4UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH 0x8UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_BOOTED 0x10UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH 0x20UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SECOND_RT 0x40UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_FASTBOOTED 0x80UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_BOOTED 0x100UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_MISMATCH 0x200UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_MBR_CORRUPT 0x400UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CFG_MISMATCH 0x800UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_FRU_MISMATCH 0x1000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT2_BOOTED 0x2000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT2_MISMATCH 0x4000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_GXRT_BOOTED 0x8000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_GXRT_MISMATCH 0x10000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT2_BOOTED 0x20000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT2_MISMATCH 0x40000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_ART_MISMATCH 0x80000UL
+ #define FW_HEALTH_CHECK_RESP_FW_STATUS_ART_BOOTED 0x100000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_MPRT_FW 0x3UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_RERT_FW 0x4UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_AUXRT_FW 0x5UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_AUXRT_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_MPRT_FW 0x3UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_RERT_FW 0x4UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_AUXRT_FW 0x5UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_AUXRT_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_sync_input (size:192b/24B) */
+struct hwrm_fw_sync_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 sync_action;
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI 0x1UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT 0x2UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT 0x4UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_DIR_HDR 0x8UL
+ #define FW_SYNC_REQ_SYNC_ACTION_WRITE_MBR 0x10UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CFG 0x20UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_FRU 0x40UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT2 0x80UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_GXRT 0x100UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT2 0x200UL
+ #define FW_SYNC_REQ_SYNC_ACTION_SYNC_ART 0x400UL
+ #define FW_SYNC_REQ_SYNC_ACTION_ACTION 0x80000000UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_sync_output (size:128b/16B) */
+struct hwrm_fw_sync_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 sync_status;
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_MASK 0xffUL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SFT 0
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SUCCESS 0x0UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_IN_PROGRESS 0x1UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_TIMEOUT 0x2UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL 0x3UL
+ #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_LAST FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL
+ #define FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR 0x40000000UL
+ #define FW_SYNC_RESP_SYNC_STATUS_SYNC_COMPLETE 0x80000000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_fw_sync_cmd_err (size:64b/8B) */
+struct hwrm_fw_sync_cmd_err {
+ u8 code;
+ #define FW_SYNC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SYNC_CMD_ERR_CODE_INVALID_LEN 0x1UL
+ #define FW_SYNC_CMD_ERR_CODE_INVALID_CRID 0x2UL
+ #define FW_SYNC_CMD_ERR_CODE_NO_WORKSPACE_MEM 0x2UL
+ #define FW_SYNC_CMD_ERR_CODE_SYNC_FAILED 0x3UL
+ #define FW_SYNC_CMD_ERR_CODE_LAST FW_SYNC_CMD_ERR_CODE_SYNC_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_state_qcaps_input (size:128b/16B) */
+struct hwrm_fw_state_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_state_qcaps_output (size:256b/32B) */
+struct hwrm_fw_state_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 backup_memory;
+ __le32 quiesce_timeout;
+ __le32 fw_status_blackout;
+ __le32 fw_status_max_wait;
+ u8 unused_0[4];
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_fw_state_quiesce_input (size:192b/24B) */
+struct hwrm_fw_state_quiesce_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define FW_STATE_QUIESCE_REQ_FLAGS_ERROR_RECOVERY 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_state_quiesce_output (size:192b/24B) */
+struct hwrm_fw_state_quiesce_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 quiesce_status;
+ #define FW_STATE_QUIESCE_RESP_QUIESCE_STATUS_INITIATED 0x80000000UL
+ u8 unused_0[4];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_fw_state_unquiesce_input (size:128b/16B) */
+struct hwrm_fw_state_unquiesce_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_fw_state_unquiesce_output (size:192b/24B) */
+struct hwrm_fw_state_unquiesce_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unquiesce_status;
+ #define FW_STATE_UNQUIESCE_RESP_UNQUIESCE_STATUS_COMPLETE 0x80000000UL
+ u8 unused_0[4];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_fw_state_backup_input (size:256b/32B) */
+struct hwrm_fw_state_backup_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 backup_pg_size_backup_lvl;
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_MASK 0xfUL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_SFT 0
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_0 0x0UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_1 0x1UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2 0x2UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LAST FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_MASK 0xf0UL
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_SFT 4
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_LAST FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G
+ u8 unused_0[7];
+ __le64 backup_page_dir;
+};
+
+/* hwrm_fw_state_backup_output (size:192b/24B) */
+struct hwrm_fw_state_backup_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 backup_status;
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_MASK 0xffUL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SFT 0
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SUCCESS 0x0UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_QUIESCE_ERROR 0x1UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL 0x3UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_LAST FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_RESET_REQUIRED 0x40000000UL
+ #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_COMPLETE 0x80000000UL
+ u8 unused_0[4];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_fw_state_restore_input (size:256b/32B) */
+struct hwrm_fw_state_restore_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 restore_pg_size_restore_lvl;
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_MASK 0xfUL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_SFT 0
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_0 0x0UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_1 0x1UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2 0x2UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LAST FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_MASK 0xf0UL
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_SFT 4
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_LAST FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G
+ u8 unused_0[7];
+ __le64 restore_page_dir;
+};
+
+/* hwrm_fw_state_restore_output (size:128b/16B) */
+struct hwrm_fw_state_restore_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 restore_status;
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_MASK 0xffUL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SFT 0
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SUCCESS 0x0UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_GENERAL 0x1UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_FORMAT_PARSE 0x2UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK 0x3UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_LAST FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_FAILURE_ROLLBACK_COMPLETED 0x40000000UL
+ #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_COMPLETE 0x80000000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_fw_secure_cfg_input (size:256b/32B) */
+struct hwrm_fw_secure_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 enable;
+ #define FW_SECURE_CFG_REQ_ENABLE_NVRAM 0x1UL
+ #define FW_SECURE_CFG_REQ_ENABLE_GRC 0x2UL
+ #define FW_SECURE_CFG_REQ_ENABLE_UART 0x3UL
+ #define FW_SECURE_CFG_REQ_ENABLE_LAST FW_SECURE_CFG_REQ_ENABLE_UART
+ u8 config_mode;
+ #define FW_SECURE_CFG_REQ_CONFIG_MODE_PERSISTENT 0x1UL
+ #define FW_SECURE_CFG_REQ_CONFIG_MODE_RUNTIME 0x2UL
+ u8 nvm_lock_mode;
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_NONE 0x0UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_PARTIAL 0x1UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_FULL 0x2UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_CHIP 0x3UL
+ #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_LAST FW_SECURE_CFG_REQ_NVM_LOCK_MODE_CHIP
+ u8 nvm_partial_lock_mask;
+ #define FW_SECURE_CFG_REQ_NVM_PARTIAL_LOCK_MASK_EXE 0x1UL
+ #define FW_SECURE_CFG_REQ_NVM_PARTIAL_LOCK_MASK_CFG 0x2UL
+ u8 grc_ctrl;
+ #define FW_SECURE_CFG_REQ_GRC_CTRL_RO 0x0UL
+ #define FW_SECURE_CFG_REQ_GRC_CTRL_RW 0x1UL
+ #define FW_SECURE_CFG_REQ_GRC_CTRL_LAST FW_SECURE_CFG_REQ_GRC_CTRL_RW
+ u8 uart_ctrl;
+ #define FW_SECURE_CFG_REQ_UART_CTRL_DISABLE 0x0UL
+ #define FW_SECURE_CFG_REQ_UART_CTRL_ENABLE 0x1UL
+ #define FW_SECURE_CFG_REQ_UART_CTRL_LAST FW_SECURE_CFG_REQ_UART_CTRL_ENABLE
+ u8 unused_0[2];
+ __le32 unused_1[2];
+};
+
+/* hwrm_fw_secure_cfg_output (size:128b/16B) */
+struct hwrm_fw_secure_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1792b/224B) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[48];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 phy_temp;
+ u8 om_temp;
+ u8 flags;
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_reg_power_query_input (size:128b/16B) */
+struct hwrm_reg_power_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_reg_power_query_output (size:192b/24B) */
+struct hwrm_reg_power_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define REG_POWER_QUERY_RESP_FLAGS_IN_POWER_AVAILABLE 0x1UL
+ #define REG_POWER_QUERY_RESP_FLAGS_OUT_POWER_AVAILABLE 0x2UL
+ __le32 in_power_mw;
+ __le32 out_power_mw;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_core_frequency_query_input (size:128b/16B) */
+struct hwrm_core_frequency_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_core_frequency_query_output (size:128b/16B) */
+struct hwrm_core_frequency_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 core_frequency_hz;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_reg_power_histogram_input (size:192b/24B) */
+struct hwrm_reg_power_histogram_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define REG_POWER_HISTOGRAM_REQ_FLAGS_CLEAR_HISTOGRAM 0x1UL
+ __le32 unused_0;
+};
+
+/* hwrm_reg_power_histogram_output (size:1088b/136B) */
+struct hwrm_reg_power_histogram_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT 0x1UL
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_INPUT 0x0UL
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT 0x1UL
+ #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_LAST REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT
+ u8 unused_0[2];
+ __le32 sampling_period;
+ __le64 sample_count;
+ __le32 power_hist[26];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+#define BUCKET_NO_DATA_FOR_SAMPLE 0x0UL
+#define BUCKET_RANGE_8W_OR_LESS 0x1UL
+#define BUCKET_RANGE_8W_TO_9W 0x2UL
+#define BUCKET_RANGE_9W_TO_10W 0x3UL
+#define BUCKET_RANGE_10W_TO_11W 0x4UL
+#define BUCKET_RANGE_11W_TO_12W 0x5UL
+#define BUCKET_RANGE_12W_TO_13W 0x6UL
+#define BUCKET_RANGE_13W_TO_14W 0x7UL
+#define BUCKET_RANGE_14W_TO_15W 0x8UL
+#define BUCKET_RANGE_15W_TO_16W 0x9UL
+#define BUCKET_RANGE_16W_TO_18W 0xaUL
+#define BUCKET_RANGE_18W_TO_20W 0xbUL
+#define BUCKET_RANGE_20W_TO_22W 0xcUL
+#define BUCKET_RANGE_22W_TO_24W 0xdUL
+#define BUCKET_RANGE_24W_TO_26W 0xeUL
+#define BUCKET_RANGE_26W_TO_28W 0xfUL
+#define BUCKET_RANGE_28W_TO_30W 0x10UL
+#define BUCKET_RANGE_30W_TO_32W 0x11UL
+#define BUCKET_RANGE_32W_TO_34W 0x12UL
+#define BUCKET_RANGE_34W_TO_36W 0x13UL
+#define BUCKET_RANGE_36W_TO_38W 0x14UL
+#define BUCKET_RANGE_38W_TO_40W 0x15UL
+#define BUCKET_RANGE_40W_TO_42W 0x16UL
+#define BUCKET_RANGE_42W_TO_44W 0x17UL
+#define BUCKET_RANGE_44W_TO_50W 0x18UL
+#define BUCKET_RANGE_OVER_50W 0x19UL
+#define BUCKET_LAST BUCKET_RANGE_OVER_50W
+
+/* hwrm_monitor_pax_histogram_start_input (size:448b/56B) */
+struct hwrm_monitor_pax_histogram_start_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_READ 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_WRITE 0x0UL
+ #define MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_LAST MONITOR_PAX_HISTOGRAM_START_REQ_FLAGS_PROFILE_WRITE
+ u8 unused_0[4];
+ __le64 start_addr;
+ __le64 end_addr;
+ __le32 axuser_value;
+ __le32 axuser_mask;
+ u8 lsb_sel;
+ u8 unused_1[7];
+};
+
+/* hwrm_monitor_pax_histogram_start_output (size:192b/24B) */
+struct hwrm_monitor_pax_histogram_start_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 timestamp;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define PAX_HISTOGRAM_BUCKET0 0x0UL
+#define PAX_HISTOGRAM_BUCKET1 0x1UL
+#define PAX_HISTOGRAM_BUCKET2 0x2UL
+#define PAX_HISTOGRAM_BUCKET3 0x3UL
+#define PAX_HISTOGRAM_BUCKET4 0x4UL
+#define PAX_HISTOGRAM_BUCKET5 0x5UL
+#define PAX_HISTOGRAM_BUCKET6 0x6UL
+#define PAX_HISTOGRAM_BUCKET7 0x7UL
+#define PAX_HISTOGRAM_BUCKET8 0x8UL
+#define PAX_HISTOGRAM_BUCKET9 0x9UL
+#define PAX_HISTOGRAM_BUCKET10 0xaUL
+#define PAX_HISTOGRAM_BUCKET11 0xbUL
+#define PAX_HISTOGRAM_BUCKET12 0xcUL
+#define PAX_HISTOGRAM_BUCKET13 0xdUL
+#define PAX_HISTOGRAM_BUCKET14 0xeUL
+#define PAX_HISTOGRAM_BUCKET15 0xfUL
+#define PAX_HISTOGRAM_MIN_LATENCY 0x10UL
+#define PAX_HISTOGRAM_MAX_LATENCY 0x11UL
+#define PAX_HISTOGRAM_EVENT_COUNTER 0x12UL
+#define PAX_HISTOGRAM_ACCUMULATOR 0x13UL
+#define PAX_HISTOGRAM_LAST PAX_HISTOGRAM_ACCUMULATOR
+
+/* hwrm_monitor_pax_histogram_collect_input (size:192b/24B) */
+struct hwrm_monitor_pax_histogram_collect_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_READ 0x1UL
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_WRITE 0x0UL
+ #define MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_LAST MONITOR_PAX_HISTOGRAM_COLLECT_REQ_FLAGS_PROFILE_WRITE
+ u8 unused_0[4];
+};
+
+/* hwrm_monitor_pax_histogram_collect_output (size:2752b/344B) */
+struct hwrm_monitor_pax_histogram_collect_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 timestamp;
+ __le64 histogram_data_mst0[20];
+ __le64 histogram_data_mst1[20];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
+struct hwrm_wol_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* hwrm_wol_filter_free_output (size:128b/16B) */
+struct hwrm_wol_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[2];
+ __le32 segment_len;
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+#define HWRM_NVM_COMMON_CMD_ERR_UNKNOWN 0x0UL
+#define HWRM_NVM_COMMON_CMD_ERR_ACCESS_DENIED 0x65UL
+#define HWRM_NVM_COMMON_CMD_ERR_FW_BUSY 0x66UL
+#define HWRM_NVM_COMMON_CMD_ERR_FW_ABORT 0x67UL
+#define HWRM_NVM_COMMON_CMD_ERR_FW_UPGRD_IN_PROG 0x68UL
+#define HWRM_NVM_COMMON_CMD_ERR_NO_WORKSPACE_MEM 0x69UL
+#define HWRM_NVM_COMMON_CMD_ERR_RESOURCE_LOCKED 0x6aUL
+#define HWRM_NVM_COMMON_CMD_ERR_FILE_OPEN_FAILED 0x6bUL
+#define HWRM_NVM_COMMON_CMD_ERR_DMA_FAILED 0x6cUL
+#define HWRM_NVM_COMMON_CMD_ERR_LAST HWRM_NVM_COMMON_CMD_ERR_DMA_FAILED
+
+/* hwrm_nvm_raw_write_blk_input (size:320b/40B) */
+struct hwrm_nvm_raw_write_blk_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le32 dest_addr;
+ __le32 len;
+ u8 flags;
+ #define NVM_RAW_WRITE_BLK_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
+struct hwrm_nvm_raw_write_blk_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk_cmd_err (size:64b/8B) */
+struct hwrm_nvm_raw_write_blk_cmd_err {
+ u8 code;
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_INVALID_LEN 0x1UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_INVALID_ADDR 0x2UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_RAW_WRITE_BLK_CMD_ERR_CODE_LAST NVM_RAW_WRITE_BLK_CMD_ERR_CODE_WRITE_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_cmd_err (size:64b/8B) */
+struct hwrm_nvm_read_cmd_err {
+ u8 code;
+ #define NVM_READ_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_READ_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_READ_CMD_ERR_CODE_INVALID_LEN 0x2UL
+ #define NVM_READ_CMD_ERR_CODE_READ_FAILED 0x3UL
+ #define NVM_READ_CMD_ERR_CODE_FRU_CRC_CHECK_FAILED 0x4UL
+ #define NVM_READ_CMD_ERR_CODE_LAST NVM_READ_CMD_ERR_CODE_FRU_CRC_CHECK_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_raw_dump_input (size:320b/40B) */
+struct hwrm_nvm_raw_dump_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 offset;
+ __le32 len;
+ u8 flags;
+ #define NVM_RAW_DUMP_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_raw_dump_output (size:128b/16B) */
+struct hwrm_nvm_raw_dump_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_dump_cmd_err (size:64b/8B) */
+struct hwrm_nvm_raw_dump_cmd_err {
+ u8 code;
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_INVALID_LEN 0x1UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_INVALID_OFFSET 0x2UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_VALIDATE_FAILED 0x3UL
+ #define NVM_RAW_DUMP_CMD_ERR_CODE_LAST NVM_RAW_DUMP_CMD_ERR_CODE_VALIDATE_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_dir_entries_cmd_err {
+ u8 code;
+ #define NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_GET_DIR_LIST_FAILED 0x1UL
+ #define NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_LAST NVM_GET_DIR_ENTRIES_CMD_ERR_CODE_GET_DIR_LIST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
+ __le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_WRITE_CMD_ERR_CODE_REQD_ERASE_FAILED 0x4UL
+ #define NVM_WRITE_CMD_ERR_CODE_VERIFY_FAILED 0x5UL
+ #define NVM_WRITE_CMD_ERR_CODE_INVALID_HEADER 0x6UL
+ #define NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED 0x7UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_modify_cmd_err (size:64b/8B) */
+struct hwrm_nvm_modify_cmd_err {
+ u8 code;
+ #define NVM_MODIFY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_MODIFY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_MODIFY_CMD_ERR_CODE_INVALID_OFFSET 0x2UL
+ #define NVM_MODIFY_CMD_ERR_CODE_ITEM_TOO_BIG_ERR 0x3UL
+ #define NVM_MODIFY_CMD_ERR_CODE_BLK_BOUNDARY_ERR 0x4UL
+ #define NVM_MODIFY_CMD_ERR_CODE_SECURITY_VIOLATION 0x5UL
+ #define NVM_MODIFY_CMD_ERR_CODE_WRITE_FAILED 0x6UL
+ #define NVM_MODIFY_CMD_ERR_CODE_ERASE_SECTORS_FAILED 0x7UL
+ #define NVM_MODIFY_CMD_ERR_CODE_COMPUTE_CRC_FAILED 0x8UL
+ #define NVM_MODIFY_CMD_ERR_CODE_LAST NVM_MODIFY_CMD_ERR_CODE_COMPUTE_CRC_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_cmd_err (size:64b/8B) */
+struct hwrm_nvm_find_dir_entry_cmd_err {
+ u8 code;
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_MGMT_FW_DISABLED 0x1UL
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x2UL
+ #define NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_LAST NVM_FIND_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_cmd_err (size:64b/8B) */
+struct hwrm_nvm_erase_dir_entry_cmd_err {
+ u8 code;
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION 0x5UL
+ #define NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_LAST NVM_ERASE_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_output (size:832b/104B) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
+ __le16 art_fw_major;
+ __le16 art_fw_minor;
+ __le16 art_fw_build;
+ __le16 art_fw_patch;
+ u8 security_soc_fw_major;
+ u8 security_soc_fw_minor;
+ u8 security_soc_fw_build;
+ u8 security_soc_fw_patch;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_cmd_err (size:64b/8B) */
+struct hwrm_nvm_mod_dir_entry_cmd_err {
+ u8 code;
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x1UL
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION 0x5UL
+ #define NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_LAST NVM_MOD_DIR_ENTRY_CMD_ERR_CODE_SECURITY_VIOLATION
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_DEFRAG_FAILED 0x5UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x6UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_flush_input (size:128b/16B) */
+struct hwrm_nvm_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_flush_output (size:128b/16B) */
+struct hwrm_nvm_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
+struct hwrm_nvm_flush_cmd_err {
+ u8 code;
+ #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_FLUSH_CMD_ERR_CODE_FAIL 0x1UL
+ #define NVM_FLUSH_CMD_ERR_CODE_LAST NVM_FLUSH_CMD_ERR_CODE_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_VALIDATE_OPT_VALUE 0x2UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 flags;
+ #define NVM_GET_VARIABLE_RESP_FLAGS_VALIDATE_OPT_VALUE 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x4UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x5UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x6UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x7UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x8UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_defrag_input (size:192b/24B) */
+struct hwrm_nvm_defrag_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define NVM_DEFRAG_REQ_FLAGS_DEFRAG 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_nvm_defrag_output (size:128b/16B) */
+struct hwrm_nvm_defrag_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */
+struct hwrm_nvm_defrag_cmd_err {
+ u8 code;
+ #define NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_FAIL 0x1UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL 0x2UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_LAST NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_vpd_field_info_input (size:192b/24B) */
+struct hwrm_nvm_get_vpd_field_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tag_id[2];
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_get_vpd_field_info_output (size:2176b/272B) */
+struct hwrm_nvm_get_vpd_field_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 data[256];
+ __le16 data_len;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_vpd_field_info_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_vpd_field_info_cmd_err {
+ u8 code;
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_NOT_CACHED 0x1UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_VPD_PARSE_FAILED 0x2UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_INVALID_TAG_ID 0x3UL
+ #define NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_LAST NVM_GET_VPD_FIELD_INFO_CMD_ERR_CODE_INVALID_TAG_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_vpd_field_info_input (size:256b/32B) */
+struct hwrm_nvm_set_vpd_field_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ u8 tag_id[2];
+ __le16 data_len;
+ u8 unused_0[4];
+};
+
+/* hwrm_nvm_set_vpd_field_info_output (size:128b/16B) */
+struct hwrm_nvm_set_vpd_field_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_profile_input (size:256b/32B) */
+struct hwrm_nvm_set_profile_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 option_count;
+ u8 flags;
+ #define NVM_SET_PROFILE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_PROFILE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x3eUL
+ #define NVM_SET_PROFILE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 1
+ #define NVM_SET_PROFILE_REQ_FLAGS_VALIDATE_ONLY 0x40UL
+ #define NVM_SET_PROFILE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 profile_type;
+ #define NVM_SET_PROFILE_REQ_PROFILE_TYPE_NONE 0x0UL
+ #define NVM_SET_PROFILE_REQ_PROFILE_TYPE_EROCE 0x1UL
+ #define NVM_SET_PROFILE_REQ_PROFILE_TYPE_LAST NVM_SET_PROFILE_REQ_PROFILE_TYPE_EROCE
+ u8 unused_2[3];
+};
+
+/* hwrm_nvm_set_profile_output (size:128b/16B) */
+struct hwrm_nvm_set_profile_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_profile_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_profile_cmd_err {
+ u8 code;
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_PROVISION_ERROR 0xaUL
+ #define NVM_SET_PROFILE_CMD_ERR_CODE_LAST NVM_SET_PROFILE_CMD_ERR_CODE_PROVISION_ERROR
+ u8 err_index;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_set_profile_sb (size:128b/16B) */
+struct hwrm_nvm_set_profile_sb {
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_PROFILE_SB_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_PROFILE_SB_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_PROFILE_SB_OPTION_NUM_LAST NVM_SET_PROFILE_SB_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_PROFILE_SB_FLAGS_FLAGS_UNUSED_0_MASK 0xffUL
+ #define NVM_SET_PROFILE_SB_FLAGS_FLAGS_UNUSED_0_SFT 0
+ u8 unused_0;
+};
+
+/* hwrm_selftest_qlist_input (size:128b/16B) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_irq_output (size:128b/16B) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_CQ_REASSIGN (0xcUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
+#endif /* _BNGE_HSI_H_ */
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
index 47c34990cf23..74a6bf278d88 100644
--- a/include/linux/bnxt/hsi.h
+++ b/include/linux/bnxt/hsi.h
@@ -187,6 +187,8 @@ struct cmd_nums {
#define HWRM_RING_QCFG 0x63UL
#define HWRM_RESERVED5 0x64UL
#define HWRM_RESERVED6 0x65UL
+ #define HWRM_PORT_ADSM_QSTATES 0x66UL
+ #define HWRM_PORT_EVENTS_LOG 0x67UL
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
@@ -235,7 +237,7 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
#define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_QSTATS_EXT_PFC_ADV 0xbaUL
#define HWRM_PORT_TX_FIR_CFG 0xbbUL
#define HWRM_PORT_TX_FIR_QCFG 0xbcUL
#define HWRM_PORT_ECN_QSTATS 0xbdUL
@@ -271,6 +273,7 @@ struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_PORT_CFG 0xdcUL
#define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_DSC_COLLECTION 0xdeUL
#define HWRM_PORT_MAC_QCAPS 0xdfUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
@@ -280,6 +283,7 @@ struct cmd_nums {
#define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
#define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
#define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_MONITOR_DEVICE_HEALTH 0xe8UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -640,8 +644,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 133
-#define HWRM_VERSION_STR "1.10.3.133"
+#define HWRM_VERSION_RSVD 151
+#define HWRM_VERSION_STR "1.10.3.151"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1416,7 +1420,8 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_L2_TX_RING 0xbUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_L2_TX_RING
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1934,7 +1939,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
#define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
#define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_STATS_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_DATA_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_CMPL_TS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_ST_SUPPORTED 0x2000UL
__le16 max_roce_vfs;
__le16 max_crypto_rx_flow_filters;
u8 unused_3[3];
@@ -4441,7 +4448,10 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_224 0x7d3UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_224 0xfa3UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_224 0x1f43UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_224
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4457,7 +4467,11 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
- u8 unused_2[6];
+ __le16 auto_link_speeds2_ext_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_EXT_MASK_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_EXT_MASK_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_EXT_MASK_800GB_PAM4_224 0x4UL
+ u8 unused_2[4];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -4491,7 +4505,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+/* hwrm_port_phy_qcfg_output (size:896b/112B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -4501,14 +4515,17 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ #define PORT_PHY_QCFG_RESP_LINK_NO_SD 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LOCK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_NO_LOCK
u8 active_fec_signal_mode;
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_224 0x3UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_224
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
@@ -4699,7 +4716,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEDR4 0x3eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEFR4 0x3fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEFR4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4859,7 +4878,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_224 0x7d3UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_224 0xfa3UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_224 0x1f43UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_224
__le16 auto_link_speeds2;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
@@ -4876,6 +4898,16 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
u8 active_lanes;
+ u8 rsvd1;
+ __le16 support_speeds2_ext;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_EXT_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_EXT_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_EXT_800GB_PAM4_224 0x4UL
+ __le16 auto_link_speeds2_ext;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_EXT_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_EXT_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_EXT_800GB_PAM4_224 0x4UL
+ u8 rsvd2[3];
u8 valid;
};
@@ -5478,7 +5510,7 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+/* hwrm_port_phy_qcaps_output (size:384b/48B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -5563,6 +5595,10 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_ADV_STATS_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_ADSM_REPORT_SUPPORTED 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PM_EVENT_LOG_SUPPORTED 0x80UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_FDRSTAT_CMD_SUPPORTED 0x100UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -5595,7 +5631,15 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
- u8 unused_1[3];
+ __le16 supported_speeds2_ext_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_FORCE_MODE_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_FORCE_MODE_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_FORCE_MODE_800GB_PAM4_224 0x4UL
+ __le16 supported_speeds2_ext_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_AUTO_MODE_200GB_PAM4_224 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_AUTO_MODE_400GB_PAM4_224 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_EXT_AUTO_MODE_800GB_PAM4_224 0x4UL
+ u8 unused_1[7];
u8 valid;
};
@@ -6051,6 +6095,58 @@ struct hwrm_port_led_qcaps_output {
u8 valid;
};
+/* hwrm_port_phy_fdrstat_input (size:192b/24B) */
+struct hwrm_port_phy_fdrstat_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 rsvd[2];
+ __le16 ops;
+ #define PORT_PHY_FDRSTAT_REQ_OPS_START 0x0UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_STOP 0x1UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_CLEAR 0x2UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_COUNTER 0x3UL
+ #define PORT_PHY_FDRSTAT_REQ_OPS_LAST PORT_PHY_FDRSTAT_REQ_OPS_COUNTER
+};
+
+/* hwrm_port_phy_fdrstat_output (size:3072b/384B) */
+struct hwrm_port_phy_fdrstat_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 start_time;
+ __le64 end_time;
+ __le64 cmic_start_time;
+ __le64 cmic_end_time;
+ __le64 accumulated_uncorrected_codewords;
+ __le64 accumulated_corrected_codewords;
+ __le64 accumulated_total_codewords;
+ __le64 accumulated_symbol_errors;
+ __le64 accumulated_codewords_err_s[17];
+ __le64 uncorrected_codewords;
+ __le64 corrected_codewords;
+ __le64 total_codewords;
+ __le64 symbol_errors;
+ __le64 codewords_err_s[17];
+ __le32 window_size;
+ __le16 unused_0[1];
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_fdrstat_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_fdrstat_cmd_err {
+ u8 code;
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED 0x1UL
+ #define PORT_PHY_FDRSTAT_CMD_ERR_CODE_LAST PORT_PHY_FDRSTAT_CMD_ERR_CODE_NOT_STARTED
+ u8 unused_0[7];
+};
+
/* hwrm_port_mac_qcaps_input (size:192b/24B) */
struct hwrm_port_mac_qcaps_input {
__le16 req_type;
@@ -6912,6 +7008,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
#define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ #define VNIC_CFG_REQ_FLAGS_DEST_QP 0x100UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -6923,7 +7020,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
#define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
- #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ #define VNIC_CFG_REQ_ENABLES_QP_ID 0x400UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6943,7 +7040,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
#define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
#define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- __le32 raw_qp_id;
+ __le32 qp_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -7409,6 +7506,8 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
#define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
#define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ #define RING_ALLOC_REQ_FLAGS_DPI_ROCE_MANAGED 0x10UL
+ #define RING_ALLOC_REQ_FLAGS_TIMER_RESET 0x20UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -7583,6 +7682,7 @@ struct hwrm_ring_aggint_qcaps_output {
#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TMR_RESET_ON_ALLOC 0x200UL
__le32 nq_params;
#define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
__le16 num_cmpl_dma_aggr_min;
@@ -10325,6 +10425,9 @@ struct hwrm_dbg_coredump_retrieve_input {
__le16 instance;
__le16 unused_1;
u8 seg_flags;
+ #define DBG_COREDUMP_RETRIEVE_REQ_SFLAG_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_RETRIEVE_REQ_SFLAG_CRASHED_DATA 0x2UL
+ #define DBG_COREDUMP_RETRIEVE_REQ_SFLAG_NO_COMPRESS 0x4UL
u8 unused_2;
__le16 unused_3;
__le32 unused_4;
@@ -10926,6 +11029,38 @@ struct hwrm_nvm_set_variable_cmd_err {
u8 unused_0[7];
};
+/* hwrm_nvm_defrag_input (size:192b/24B) */
+struct hwrm_nvm_defrag_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define NVM_DEFRAG_REQ_FLAGS_DEFRAG 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_nvm_defrag_output (size:128b/16B) */
+struct hwrm_nvm_defrag_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */
+struct hwrm_nvm_defrag_cmd_err {
+ u8 code;
+ #define NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_FAIL 0x1UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL 0x2UL
+ #define NVM_DEFRAG_CMD_ERR_CODE_LAST NVM_DEFRAG_CMD_ERR_CODE_CHECK_FAIL
+ u8 unused_0[7];
+};
+
/* hwrm_selftest_qlist_input (size:128b/16B) */
struct hwrm_selftest_qlist_input {
__le16 req_type;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index d1eb5c7729cb..2f535331f926 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
@@ -470,7 +470,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
- void *value) {
+ void *value, u64 flags) {
return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e5be698256d1..cd9b96434904 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -287,6 +287,7 @@ struct bpf_map_owner {
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
+ bool sleepable;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
enum bpf_attach_type expected_attach_type;
@@ -673,6 +674,22 @@ void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags,
struct bpf_dynptr *ptr__uninit);
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+void *bpf_arena_alloc_pages_non_sleepable(void *p__map, void *addr__ign, u32 page_cnt, int node_id,
+ u64 flags);
+void bpf_arena_free_pages_non_sleepable(void *p__map, void *ptr__ign, u32 page_cnt);
+#else
+static inline void *bpf_arena_alloc_pages_non_sleepable(void *p__map, void *addr__ign, u32 page_cnt,
+ int node_id, u64 flags)
+{
+ return NULL;
+}
+
+static inline void bpf_arena_free_pages_non_sleepable(void *p__map, void *ptr__ign, u32 page_cnt)
+{
+}
+#endif
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* bpf_type_flag contains a set of flags that are applicable to the values of
@@ -737,7 +754,7 @@ enum bpf_type_flag {
MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
/* PTR was passed from the kernel in a trusted context, and may be
- * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * passed to kfuncs or BPF helper functions.
* Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
* PTR_UNTRUSTED refers to a kptr that was read directly from a map
* without invoking bpf_kptr_xchg(). What we really need to know is
@@ -1213,6 +1230,9 @@ enum {
#endif
};
+#define BPF_TRAMP_COOKIE_INDEX_SHIFT 8
+#define BPF_TRAMP_IS_RETURN_SHIFT 63
+
struct bpf_tramp_links {
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
int nr_links;
@@ -1293,6 +1313,7 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_MODIFY_RETURN,
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
+ BPF_TRAMP_FSESSION,
};
struct bpf_tramp_image {
@@ -1309,14 +1330,17 @@ struct bpf_tramp_image {
};
struct bpf_trampoline {
- /* hlist for trampoline_table */
- struct hlist_node hlist;
+ /* hlist for trampoline_key_table */
+ struct hlist_node hlist_key;
+ /* hlist for trampoline_ip_table */
+ struct hlist_node hlist_ip;
struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
u32 flags;
u64 key;
+ unsigned long ip;
struct {
struct btf_func_model model;
void *addr;
@@ -1418,7 +1442,7 @@ bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset,
void *src, u64 len, u64 flags);
void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
- void *buffer__opt, u64 buffer__szk);
+ void *buffer__nullable, u64 buffer__szk);
static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len)
{
@@ -1742,8 +1766,12 @@ struct bpf_prog_aux {
struct rcu_head rcu;
};
struct bpf_stream stream[2];
+ struct mutex st_ops_assoc_mutex;
+ struct bpf_map __rcu *st_ops_assoc;
};
+#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -1759,6 +1787,7 @@ struct bpf_prog {
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_func_ip:1, /* Do we call get_func_ip() */
+ call_session_cookie:1, /* Do we call bpf_session_cookie() */
tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
sleepable:1; /* BPF program is sleepable */
enum bpf_prog_type type; /* Type of BPF program */
@@ -1770,7 +1799,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
};
struct bpf_prog_stats __percpu *stats;
- int __percpu *active;
+ u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for recursion protection */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
@@ -1855,6 +1884,11 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_fsession_link {
+ struct bpf_tracing_link link;
+ struct bpf_tramp_link fexit;
+};
+
struct bpf_raw_tp_link {
struct bpf_link link;
struct bpf_raw_event_map *btp;
@@ -2002,6 +2036,40 @@ struct bpf_struct_ops_common_value {
enum bpf_struct_ops_state state;
};
+static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+ u32 val;
+
+ preempt_disable();
+ active[rctx]++;
+ val = le32_to_cpu(*(__le32 *)active);
+ preempt_enable();
+ if (val != BIT(rctx * 8))
+ return false;
+
+ return true;
+#else
+ return this_cpu_inc_return(*(int __percpu *)(prog->active)) == 1;
+#endif
+}
+
+static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
+{
+#ifdef CONFIG_ARM64
+ u8 rctx = interrupt_context_level();
+ u8 *active = this_cpu_ptr(prog->active);
+
+ preempt_disable();
+ active[rctx]--;
+ preempt_enable();
+#else
+ this_cpu_dec(*(int __percpu *)(prog->active));
+#endif
+}
+
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
/* This macro helps developer to register a struct_ops type and generate
* type information correctly. Developers should use this macro to register
@@ -2044,6 +2112,9 @@ static inline void bpf_module_put(const void *data, struct module *owner)
module_put(owner);
}
int bpf_struct_ops_link_create(union bpf_attr *attr);
+int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map);
+void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog);
+void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux);
u32 bpf_struct_ops_id(const void *kdata);
#ifdef CONFIG_NET
@@ -2091,6 +2162,17 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
+static inline int bpf_prog_assoc_struct_ops(struct bpf_prog *prog, struct bpf_map *map)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_prog_disassoc_struct_ops(struct bpf_prog *prog)
+{
+}
+static inline void *bpf_prog_get_assoc_struct_ops(const struct bpf_prog_aux *aux)
+{
+ return NULL;
+}
static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
{
}
@@ -2101,6 +2183,37 @@ static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_op
#endif
+static inline int bpf_fsession_cnt(struct bpf_tramp_links *links)
+{
+ struct bpf_tramp_links fentries = links[BPF_TRAMP_FENTRY];
+ int cnt = 0;
+
+ for (int i = 0; i < links[BPF_TRAMP_FENTRY].nr_links; i++) {
+ if (fentries.links[i]->link.prog->expected_attach_type == BPF_TRACE_FSESSION)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static inline bool bpf_prog_calls_session_cookie(struct bpf_tramp_link *link)
+{
+ return link->link.prog->call_session_cookie;
+}
+
+static inline int bpf_fsession_cookie_cnt(struct bpf_tramp_links *links)
+{
+ struct bpf_tramp_links fentries = links[BPF_TRAMP_FENTRY];
+ int cnt = 0;
+
+ for (int i = 0; i < links[BPF_TRAMP_FENTRY].nr_links; i++) {
+ if (bpf_prog_calls_session_cookie(fentries.links[i]))
+ cnt++;
+ }
+
+ return cnt;
+}
+
int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
const struct bpf_ctx_arg_aux *info, u32 cnt);
@@ -2540,6 +2653,10 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG
+void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+ struct mem_cgroup **new_memcg);
+void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+ struct mem_cgroup *memcg);
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
@@ -2564,6 +2681,17 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
kvcalloc(_n, _size, _flags)
#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
__alloc_percpu_gfp(_size, _align, _flags)
+static inline void bpf_map_memcg_enter(const struct bpf_map *map, struct mem_cgroup **old_memcg,
+ struct mem_cgroup **new_memcg)
+{
+ *new_memcg = NULL;
+ *old_memcg = NULL;
+}
+
+static inline void bpf_map_memcg_exit(struct mem_cgroup *old_memcg,
+ struct mem_cgroup *memcg)
+{
+}
#endif
static inline int
@@ -2764,8 +2892,8 @@ int map_set_for_each_callback_args(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
-int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
-int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 flags);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
@@ -3243,6 +3371,11 @@ static inline void bpf_prog_report_arena_violation(bool write, unsigned long add
}
#endif /* CONFIG_BPF_SYSCALL */
+static inline bool bpf_net_capable(void)
+{
+ return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
+}
+
static __always_inline int
bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
@@ -3832,14 +3965,43 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
}
#endif
+static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
+{
+ switch (map_type) {
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
{
- if (flags & ~allowed_flags)
+ u32 cpu;
+
+ if ((u32)flags & ~allowed_flags)
return -EINVAL;
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
return -EINVAL;
+ if (!(flags & BPF_F_CPU) && flags >> 32)
+ return -EINVAL;
+
+ if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
+ if (!bpf_map_supports_cpu_flags(map->map_type))
+ return -EINVAL;
+ if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS))
+ return -EINVAL;
+
+ cpu = flags >> 32;
+ if ((flags & BPF_F_CPU) && cpu >= num_possible_cpus())
+ return -ERANGE;
+ }
+
return 0;
}
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 66432248cd81..85efa9772530 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -15,12 +15,13 @@
#include <linux/types.h>
#include <linux/bpf_mem_alloc.h>
#include <uapi/linux/btf.h>
+#include <asm/rqspinlock.h>
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
struct bpf_local_storage_map_bucket {
struct hlist_head list;
- raw_spinlock_t lock;
+ rqspinlock_t lock;
};
/* Thp map is not the primary owner of a bpf_local_storage_elem.
@@ -67,6 +68,11 @@ struct bpf_local_storage_data {
u8 data[] __aligned(8);
};
+#define SELEM_MAP_UNLINKED (1 << 0)
+#define SELEM_STORAGE_UNLINKED (1 << 1)
+#define SELEM_UNLINKED (SELEM_MAP_UNLINKED | SELEM_STORAGE_UNLINKED)
+#define SELEM_TOFREE (1 << 2)
+
/* Linked to bpf_local_storage and bpf_local_storage_map */
struct bpf_local_storage_elem {
struct hlist_node map_node; /* Linked to bpf_local_storage_map */
@@ -79,7 +85,9 @@ struct bpf_local_storage_elem {
* after raw_spin_unlock
*/
};
- /* 8 bytes hole */
+ atomic_t state;
+ bool use_kmalloc_nolock;
+ /* 3 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
@@ -88,13 +96,14 @@ struct bpf_local_storage_elem {
struct bpf_local_storage {
struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
- struct bpf_local_storage_map __rcu *smap;
struct hlist_head list; /* List of bpf_local_storage_elem */
void *owner; /* The object that owns the above "list" of
* bpf_local_storage_elem.
*/
struct rcu_head rcu;
- raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+ rqspinlock_t lock; /* Protect adding/removing from the "list" */
+ u64 mem_charge; /* Copy of mem charged to owner. Protected by "lock" */
+ refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
bool use_kmalloc_nolock;
};
@@ -162,11 +171,10 @@ bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
return SDATA(selem);
}
-void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
void bpf_local_storage_map_free(struct bpf_map *map,
- struct bpf_local_storage_cache *cache,
- int __percpu *busy_counter);
+ struct bpf_local_storage_cache *cache);
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
const struct btf *btf,
@@ -176,10 +184,11 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem);
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
+int bpf_selem_unlink(struct bpf_local_storage_elem *selem);
-void bpf_selem_link_map(struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *selem);
+int bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
index 929225f7b095..0b9f4caeeb0a 100644
--- a/include/linux/bpf_mprog.h
+++ b/include/linux/bpf_mprog.h
@@ -340,4 +340,14 @@ static inline bool bpf_mprog_supported(enum bpf_prog_type type)
return false;
}
}
+
+static inline bool bpf_mprog_detach_empty(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return bpf_net_capable();
+ default:
+ return false;
+ }
+}
#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 130bcbd66f60..ef8e45a362d9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -147,8 +147,12 @@ struct bpf_reg_state {
* registers. Example:
* r1 = r2; both will have r1->id == r2->id == N
* r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ * r3 = r2; both will have r3->id == r2->id == N
+ * w3 += 10; r3->id == N | BPF_ADD_CONST32 and r3->off == 10
*/
-#define BPF_ADD_CONST (1U << 31)
+#define BPF_ADD_CONST64 (1U << 31)
+#define BPF_ADD_CONST32 (1U << 30)
+#define BPF_ADD_CONST (BPF_ADD_CONST64 | BPF_ADD_CONST32)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
@@ -692,12 +696,16 @@ struct bpf_id_pair {
struct bpf_idmap {
u32 tmp_id_gen;
+ u32 cnt;
struct bpf_id_pair map[BPF_ID_MAP_SIZE];
};
struct bpf_idset {
- u32 count;
- u32 ids[BPF_ID_MAP_SIZE];
+ u32 num_ids;
+ struct {
+ u32 id;
+ u32 cnt;
+ } entries[BPF_ID_MAP_SIZE];
};
/* see verifier.c:compute_scc_callchain() */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index f06976ffb63f..48108471c5b1 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -34,7 +34,7 @@
*
* And the following kfunc:
*
- * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE)
*
* All invocations to the kfunc must pass the unmodified, unwalked task:
*
@@ -66,7 +66,6 @@
* return 0;
* }
*/
-#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
@@ -79,6 +78,7 @@
#define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */
#define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */
#define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */
+#define KF_IMPLICIT_ARGS (1 << 16) /* kfunc has implicit arguments supplied by the verifier */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
@@ -220,6 +220,7 @@ bool btf_is_module(const struct btf *btf);
bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
+u32 btf_named_start_id(const struct btf *btf, bool own);
struct btf *btf_base_btf(const struct btf *btf);
bool btf_type_is_i32(const struct btf_type *t);
bool btf_type_is_i64(const struct btf_type *t);
@@ -575,8 +576,8 @@ const char *btf_name_by_offset(const struct btf *btf, u32 offset);
const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
-u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
- const struct bpf_prog *prog);
+u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog);
+bool btf_kfunc_is_allowed(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog);
u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
const struct bpf_prog *prog);
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 831c1b4b626c..7acc06b22fb7 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -7,7 +7,10 @@
#define BUILD_ID_SIZE_MAX 20
struct vm_area_struct;
+struct file;
+
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size);
int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 5fb8d0e3f9c1..3287232e3cad 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -58,6 +58,7 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
+void can_set_skb_uid(struct sk_buff *skb);
void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 1abc25a8d144..a70a02967071 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/can.h>
+#include <net/can.h>
#include <net/sock.h>
void can_flush_echo_skb(struct net_device *dev);
@@ -37,37 +38,20 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
-/*
- * The struct can_skb_priv is used to transport additional information along
- * with the stored struct can(fd)_frame that can not be contained in existing
- * struct sk_buff elements.
- * N.B. that this information must not be modified in cloned CAN sk_buffs.
- * To modify the CAN frame content or the struct can_skb_priv content
- * skb_copy() needs to be used instead of skb_clone().
- */
-
-/**
- * struct can_skb_priv - private additional data inside CAN sk_buffs
- * @ifindex: ifindex of the first interface the CAN frame appeared on
- * @skbcnt: atomic counter to have an unique id together with skb pointer
- * @frame_len: length of CAN frame in data link layer
- * @cf: align to the following CAN frame at skb->data
- */
-struct can_skb_priv {
- int ifindex;
- int skbcnt;
- unsigned int frame_len;
- struct can_frame cf[];
-};
-
-static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
+static inline struct can_skb_ext *can_skb_ext_add(struct sk_buff *skb)
{
- return (struct can_skb_priv *)(skb->head);
+ struct can_skb_ext *csx = skb_ext_add(skb, SKB_EXT_CAN);
+
+ /* skb_ext_add() returns uninitialized space */
+ if (csx)
+ csx->can_gw_hops = 0;
+
+ return csx;
}
-static inline void can_skb_reserve(struct sk_buff *skb)
+static inline struct can_skb_ext *can_skb_ext_find(struct sk_buff *skb)
{
- skb_reserve(skb, sizeof(struct can_skb_priv));
+ return skb_ext_find(skb, SKB_EXT_CAN);
}
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 1fb08922552c..37db92b3d6f8 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -203,6 +203,12 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
ns_capable(ns, CAP_SYS_ADMIN);
}
+static inline bool checkpoint_restore_ns_capable_noaudit(struct user_namespace *ns)
+{
+ return ns_capable_noaudit(ns, CAP_CHECKPOINT_RESTORE) ||
+ ns_capable_noaudit(ns, CAP_SYS_ADMIN);
+}
+
/* audit system wants to get cap info from files as well */
int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
const struct dentry *dentry,
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c7f2c63b3bc3..08e5dbe15ca4 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -31,6 +31,12 @@
#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
+/*
+ * name for "old" CephFS file systems,
+ * see ceph.git e2b151d009640114b2565c901d6f41f6cd5ec652
+ */
+#define CEPH_OLD_FS_NAME "cephfs"
+
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index f7cc60de0058..bb92f5c169ca 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -535,10 +535,10 @@ struct cgroup {
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
- u16 subtree_control;
- u16 subtree_ss_mask;
- u16 old_subtree_control;
- u16 old_subtree_ss_mask;
+ u32 subtree_control;
+ u32 subtree_ss_mask;
+ u32 old_subtree_control;
+ u32 old_subtree_ss_mask;
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 8d41b917c77d..dbc4162921e9 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -278,16 +278,21 @@ const volatile void * __must_check_fn(const volatile void *val)
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
+typedef _type lock_##_name##_t; \
static __always_inline void class_##_name##_destructor(_type *p) \
+ __no_context_analysis \
{ _type _T = *p; _exit; } \
static __always_inline _type class_##_name##_constructor(_init_args) \
+ __no_context_analysis \
{ _type t = _init; return t; }
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef lock_##_name##_t lock_##_name##ext##_t; \
typedef class_##_name##_t class_##_name##ext##_t; \
static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
{ class_##_name##_destructor(p); } \
static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ __no_context_analysis \
{ class_##_name##_t t = _init; return t; }
#define CLASS(_name, var) \
@@ -474,35 +479,80 @@ _label: \
*/
#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef _type lock_##_name##_t; \
typedef struct { \
_type *lock; \
__VA_ARGS__; \
} class_##_name##_t; \
\
static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
+ __no_context_analysis \
{ \
if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
} \
\
__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
-#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+#define __DEFINE_LOCK_GUARD_1(_name, _type, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
-#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+#define __DEFINE_LOCK_GUARD_0(_name, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(void) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = (void*)1 }, \
*_T __maybe_unused = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
+#define DECLARE_LOCK_GUARD_0_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(void) _lock;\
+static inline void class_##_name##_destructor(class_##_name##_t *_T) _unlock;
+
+/*
+ * To support Context Analysis, we need to allow the compiler to see the
+ * acquisition and release of the context lock. However, the "cleanup" helpers
+ * wrap the lock in a struct passed through separate helper functions, which
+ * hides the lock alias from the compiler (no inter-procedural analysis).
+ *
+ * To make it work, we introduce an explicit alias to the context lock instance
+ * that is "cleaned" up with a separate cleanup helper. This helper is a dummy
+ * function that does nothing at runtime, but has the "_unlock" attribute to
+ * tell the compiler what happens at the end of the scope.
+ *
+ * To generalize the pattern, the WITH_LOCK_GUARD_1_ATTRS() macro should be used
+ * to redefine the constructor, which then also creates the alias variable with
+ * the right "cleanup" attribute, *after* DECLARE_LOCK_GUARD_1_ATTRS() has been
+ * used.
+ *
+ * Example usage:
+ *
+ * DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+ * #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+ *
+ * Note: To support the for-loop based scoped helpers, the auxiliary variable
+ * must be a pointer to the "class" type because it is defined in the same
+ * statement as the guard variable. However, we initialize it with the lock
+ * pointer (despite the type mismatch, the compiler's alias analysis still works
+ * as expected). The "_unlock" attribute receives a pointer to the auxiliary
+ * variable (a double pointer to the class type), and must be cast and
+ * dereferenced appropriately.
+ */
+#define DECLARE_LOCK_GUARD_1_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T) _lock;\
+static __always_inline void __class_##_name##_cleanup_ctx(class_##_name##_t **_T) \
+ __no_context_analysis _unlock { }
+#define WITH_LOCK_GUARD_1_ATTRS(_name, _T) \
+ class_##_name##_constructor(_T), \
+ *__UNIQUE_ID(unlock) __cleanup(__class_##_name##_cleanup_ctx) = (void *)(unsigned long)(_T)
+
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
diff --git a/include/linux/clk.h b/include/linux/clk.h
index b607482ca77e..c571e294f0ef 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -479,6 +479,22 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
+ * devm_clk_bulk_get_optional_enable - Get and enable optional bulk clocks (managed)
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Behaves the same as devm_clk_bulk_get_optional() but also prepares and enables
+ * the clocks in one operation with management. The clks will automatically be
+ * disabled, unprepared and freed when the device is unbound.
+ *
+ * Return: 0 if all clocks specified in clk_bulk_data table are obtained
+ * and enabled successfully, or for any clk there was no clk provider available.
+ * Otherwise returns valid IS_ERR() condition containing errno.
+ */
+int __must_check devm_clk_bulk_get_optional_enable(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
@@ -1029,6 +1045,13 @@ static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
return 0;
}
+static inline int __must_check devm_clk_bulk_get_optional_enable(struct device *dev,
+ int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 62d9c1cf6326..d0793eaaadaa 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -49,29 +49,23 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
-extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
+struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
+ unsigned int align, bool no_warn);
+struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
+bool cma_release_frozen(struct cma *cma, const struct page *pages,
+ unsigned long count);
+
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
-#ifdef CONFIG_CMA
-struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
-bool cma_free_folio(struct cma *cma, const struct folio *folio);
-bool cma_validate_zones(struct cma *cma);
+#ifdef CONFIG_DMA_CMA
+extern bool cma_skip_dt_default_reserved_mem(void);
#else
-static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
-{
- return NULL;
-}
-
-static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
-{
- return false;
-}
-static inline bool cma_validate_zones(struct cma *cma)
+static inline bool cma_skip_dt_default_reserved_mem(void)
{
return false;
}
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 7edf1a07b535..e1123dd28486 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -153,4 +153,4 @@
* Bindgen uses LLVM even if our C compiler is GCC, so we cannot
* rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
*/
-#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19)
+#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ > 19 || (__clang_major__ == 19 && __clang_minor__ > 0))
diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h
new file mode 100644
index 000000000000..00c074a2ccb0
--- /dev/null
+++ b/include/linux/compiler-context-analysis.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Macros and attributes for compiler-based static context analysis.
+ */
+
+#ifndef _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+#define _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+
+#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__) && !defined(__GENKSYMS__)
+
+/*
+ * These attributes define new context lock (Clang: capability) types.
+ * Internal only.
+ */
+# define __ctx_lock_type(name) __attribute__((capability(#name)))
+# define __reentrant_ctx_lock __attribute__((reentrant_capability))
+# define __acquires_ctx_lock(...) __attribute__((acquire_capability(__VA_ARGS__)))
+# define __acquires_shared_ctx_lock(...) __attribute__((acquire_shared_capability(__VA_ARGS__)))
+# define __try_acquires_ctx_lock(ret, var) __attribute__((try_acquire_capability(ret, var)))
+# define __try_acquires_shared_ctx_lock(ret, var) __attribute__((try_acquire_shared_capability(ret, var)))
+# define __releases_ctx_lock(...) __attribute__((release_capability(__VA_ARGS__)))
+# define __releases_shared_ctx_lock(...) __attribute__((release_shared_capability(__VA_ARGS__)))
+# define __returns_ctx_lock(var) __attribute__((lock_returned(var)))
+
+/*
+ * The below are used to annotate code being checked. Internal only.
+ */
+# define __excludes_ctx_lock(...) __attribute__((locks_excluded(__VA_ARGS__)))
+# define __requires_ctx_lock(...) __attribute__((requires_capability(__VA_ARGS__)))
+# define __requires_shared_ctx_lock(...) __attribute__((requires_shared_capability(__VA_ARGS__)))
+
+/*
+ * The "assert_capability" attribute is a bit confusingly named. It does not
+ * generate a check. Instead, it tells the analysis to *assume* the capability
+ * is held. This is used for augmenting runtime assertions, that can then help
+ * with patterns beyond the compiler's static reasoning abilities.
+ */
+# define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__)))
+# define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
+
+/**
+ * __guarded_by - struct member and globals attribute, declares variable
+ * only accessible within active context
+ *
+ * Declares that the struct member or global variable is only accessible within
+ * the context entered by the given context lock. Read operations on the data
+ * require shared access, while write operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long counter __guarded_by(&lock);
+ * };
+ */
+# define __guarded_by(...) __attribute__((guarded_by(__VA_ARGS__)))
+
+/**
+ * __pt_guarded_by - struct member and globals attribute, declares pointed-to
+ * data only accessible within active context
+ *
+ * Declares that the data pointed to by the struct member pointer or global
+ * pointer is only accessible within the context entered by the given context
+ * lock. Read operations on the data require shared access, while write
+ * operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long *counter __pt_guarded_by(&lock);
+ * };
+ */
+# define __pt_guarded_by(...) __attribute__((pt_guarded_by(__VA_ARGS__)))
+
+/**
+ * context_lock_struct() - declare or define a context lock struct
+ * @name: struct name
+ *
+ * Helper to declare or define a struct type that is also a context lock.
+ *
+ * .. code-block:: c
+ *
+ * context_lock_struct(my_handle) {
+ * int foo;
+ * long bar;
+ * };
+ *
+ * struct some_state {
+ * ...
+ * };
+ * // ... declared elsewhere ...
+ * context_lock_struct(some_state);
+ *
+ * Note: The implementation defines several helper functions that can acquire
+ * and release the context lock.
+ */
+# define context_lock_struct(name, ...) \
+ struct __ctx_lock_type(name) __VA_ARGS__ name; \
+ static __always_inline void __acquire_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_ctx_lock(var) { } \
+ static __always_inline void __acquire_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_shared_ctx_lock(var) { } \
+ static __always_inline bool __try_acquire_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline bool __try_acquire_shared_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_shared_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline void __release_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_ctx_lock(var) { } \
+ static __always_inline void __release_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_shared_ctx_lock(var) { } \
+ static __always_inline void __assume_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_ctx_lock(var) { } \
+ static __always_inline void __assume_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_shared_ctx_lock(var) { } \
+ struct name
+
+/**
+ * disable_context_analysis() - disables context analysis
+ *
+ * Disables context analysis. Must be paired with a later
+ * enable_context_analysis().
+ */
+# define disable_context_analysis() \
+ __diag_push(); \
+ __diag_ignore_all("-Wunknown-warning-option", "") \
+ __diag_ignore_all("-Wthread-safety", "") \
+ __diag_ignore_all("-Wthread-safety-pointer", "")
+
+/**
+ * enable_context_analysis() - re-enables context analysis
+ *
+ * Re-enables context analysis. Must be paired with a prior
+ * disable_context_analysis().
+ */
+# define enable_context_analysis() __diag_pop()
+
+/**
+ * __no_context_analysis - function attribute, disables context analysis
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Prefer use of `context_unsafe()` where possible.
+ */
+# define __no_context_analysis __attribute__((no_thread_safety_analysis))
+
+#else /* !WARN_CONTEXT_ANALYSIS */
+
+# define __ctx_lock_type(name)
+# define __reentrant_ctx_lock
+# define __acquires_ctx_lock(...)
+# define __acquires_shared_ctx_lock(...)
+# define __try_acquires_ctx_lock(ret, var)
+# define __try_acquires_shared_ctx_lock(ret, var)
+# define __releases_ctx_lock(...)
+# define __releases_shared_ctx_lock(...)
+# define __assumes_ctx_lock(...)
+# define __assumes_shared_ctx_lock(...)
+# define __returns_ctx_lock(var)
+# define __guarded_by(...)
+# define __pt_guarded_by(...)
+# define __excludes_ctx_lock(...)
+# define __requires_ctx_lock(...)
+# define __requires_shared_ctx_lock(...)
+# define __acquire_ctx_lock(var) do { } while (0)
+# define __acquire_shared_ctx_lock(var) do { } while (0)
+# define __try_acquire_ctx_lock(var, ret) (ret)
+# define __try_acquire_shared_ctx_lock(var, ret) (ret)
+# define __release_ctx_lock(var) do { } while (0)
+# define __release_shared_ctx_lock(var) do { } while (0)
+# define __assume_ctx_lock(var) do { (void)(var); } while (0)
+# define __assume_shared_ctx_lock(var) do { (void)(var); } while (0)
+# define context_lock_struct(name, ...) struct __VA_ARGS__ name
+# define disable_context_analysis()
+# define enable_context_analysis()
+# define __no_context_analysis
+
+#endif /* WARN_CONTEXT_ANALYSIS */
+
+/**
+ * context_unsafe() - disable context checking for contained code
+ *
+ * Disables context checking for contained statements or expression.
+ *
+ * .. code-block:: c
+ *
+ * struct some_data {
+ * spinlock_t lock;
+ * int counter __guarded_by(&lock);
+ * };
+ *
+ * int foo(struct some_data *d)
+ * {
+ * // ...
+ * // other code that is still checked ...
+ * // ...
+ * return context_unsafe(d->counter);
+ * }
+ */
+#define context_unsafe(...) \
+({ \
+ disable_context_analysis(); \
+ __VA_ARGS__; \
+ enable_context_analysis() \
+})
+
+/**
+ * __context_unsafe() - function attribute, disable context checking
+ * @comment: comment explaining why opt-out is safe
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Forces adding an inline comment as argument.
+ */
+#define __context_unsafe(comment) __no_context_analysis
+
+/**
+ * context_unsafe_alias() - helper to insert a context lock "alias barrier"
+ * @p: pointer aliasing a context lock or object containing context locks
+ *
+ * No-op function that acts as a "context lock alias barrier", where the
+ * analysis rightfully detects that we're switching aliases, but the switch is
+ * considered safe but beyond the analysis reasoning abilities.
+ *
+ * This should be inserted before the first use of such an alias.
+ *
+ * Implementation Note: The compiler ignores aliases that may be reassigned but
+ * their value cannot be determined (e.g. when passing a non-const pointer to an
+ * alias as a function argument).
+ */
+#define context_unsafe_alias(p) _context_unsafe_alias((void **)&(p))
+static inline void _context_unsafe_alias(void **p) { }
+
+/**
+ * token_context_lock() - declare an abstract global context lock instance
+ * @name: token context lock name
+ *
+ * Helper that declares an abstract global context lock instance @name, but not
+ * backed by a real data structure (linker error if accidentally referenced).
+ * The type name is `__ctx_lock_@name`.
+ */
+#define token_context_lock(name, ...) \
+ context_lock_struct(__ctx_lock_##name, ##__VA_ARGS__) {}; \
+ extern const struct __ctx_lock_##name *name
+
+/**
+ * token_context_lock_instance() - declare another instance of a global context lock
+ * @ctx: token context lock previously declared with token_context_lock()
+ * @name: name of additional global context lock instance
+ *
+ * Helper that declares an additional instance @name of the same token context
+ * lock class @ctx. This is helpful where multiple related token contexts are
+ * declared, to allow using the same underlying type (`__ctx_lock_@ctx`) as
+ * function arguments.
+ */
+#define token_context_lock_instance(ctx, name) \
+ extern const struct __ctx_lock_##ctx *name
+
+/*
+ * Common keywords for static context analysis.
+ */
+
+/**
+ * __must_hold() - function attribute, caller must hold exclusive context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) exclusively.
+ */
+#define __must_hold(...) __requires_ctx_lock(__VA_ARGS__)
+
+/**
+ * __must_not_hold() - function attribute, caller must not hold context lock
+ *
+ * Function attribute declaring that the caller must not hold the given context
+ * lock instance(s).
+ */
+#define __must_not_hold(...) __excludes_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires() - function attribute, function acquires context lock exclusively
+ *
+ * Function attribute declaring that the function acquires the given context
+ * lock instance(s) exclusively, but does not release them.
+ */
+#define __acquires(...) __acquires_ctx_lock(__VA_ARGS__)
+
+/*
+ * Clang's analysis does not care precisely about the value, only that it is
+ * either zero or non-zero. So the __cond_acquires() interface might be
+ * misleading if we say that @ret is the value returned if acquired. Instead,
+ * provide symbolic variants which we translate.
+ */
+#define __cond_acquires_impl_true(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_false(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonzero(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_0(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonnull(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_NULL(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+
+/**
+ * __cond_acquires() - function attribute, function conditionally
+ * acquires a context lock exclusively
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x exclusively, but does not release it. The
+ * function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires(ret, x) __cond_acquires_impl_##ret(x)
+
+/**
+ * __releases() - function attribute, function releases a context lock exclusively
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) exclusively. The associated context(s) must be active on
+ * entry.
+ */
+#define __releases(...) __releases_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire() - function to acquire context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x exclusively.
+ */
+#define __acquire(x) __acquire_ctx_lock(x)
+
+/**
+ * __release() - function to release context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x.
+ */
+#define __release(x) __release_ctx_lock(x)
+
+/**
+ * __must_hold_shared() - function attribute, caller must hold shared context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) with shared access.
+ */
+#define __must_hold_shared(...) __requires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires_shared() - function attribute, function acquires context lock shared
+ *
+ * Function attribute declaring that the function acquires the given
+ * context lock instance(s) with shared access, but does not release them.
+ */
+#define __acquires_shared(...) __acquires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __cond_acquires_shared() - function attribute, function conditionally
+ * acquires a context lock shared
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x with shared access, but does not release it.
+ * The function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires_shared(ret, x) __cond_acquires_impl_##ret(x, _shared)
+
+/**
+ * __releases_shared() - function attribute, function releases a
+ * context lock shared
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) with shared access. The associated context(s) must be
+ * active on entry.
+ */
+#define __releases_shared(...) __releases_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire_shared() - function to acquire context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x with shared
+ * access.
+ */
+#define __acquire_shared(x) __acquire_shared_ctx_lock(x)
+
+/**
+ * __release_shared() - function to release context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x with shared
+ * access.
+ */
+#define __release_shared(x) __release_shared_ctx_lock(x)
+
+/**
+ * __acquire_ret() - helper to acquire context lock of return value
+ * @call: call expression
+ * @ret_expr: acquire expression that uses __ret
+ */
+#define __acquire_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire(ret_expr); \
+ __ret; \
+ })
+
+/**
+ * __acquire_shared_ret() - helper to acquire context lock shared of return value
+ * @call: call expression
+ * @ret_expr: acquire shared expression that uses __ret
+ */
+#define __acquire_shared_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire_shared(ret_expr); \
+ __ret; \
+ })
+
+/*
+ * Attributes to mark functions returning acquired context locks.
+ *
+ * This is purely cosmetic to help readability, and should be used with the
+ * above macros as follows:
+ *
+ * struct foo { spinlock_t lock; ... };
+ * ...
+ * #define myfunc(...) __acquire_ret(_myfunc(__VA_ARGS__), &__ret->lock)
+ * struct foo *_myfunc(int bar) __acquires_ret;
+ * ...
+ */
+#define __acquires_ret __no_context_analysis
+#define __acquires_shared_ret __no_context_analysis
+
+#endif /* _LINUX_COMPILER_CONTEXT_ANALYSIS_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04487c9bd751..af16624b29fd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define data_race(expr) \
({ \
__kcsan_disable_current(); \
+ disable_context_analysis(); \
auto __v = (expr); \
+ enable_context_analysis(); \
__kcsan_enable_current(); \
__v; \
})
@@ -231,16 +233,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
"must be non-C-string (not NUL-terminated)")
/*
- * Use __typeof_unqual__() when available.
- *
- * XXX: Remove test for __CHECKER__ once
- * sparse learns about __typeof_unqual__().
- */
-#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
-# define USE_TYPEOF_UNQUAL 1
-#endif
-
-/*
* Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
* operator when available, to return an unqualified type of the exp.
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d3318a3c2577..b1b141394d13 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -41,6 +41,8 @@
# define BTF_TYPE_TAG(value) /* nothing */
#endif
+#include <linux/compiler-context-analysis.h>
+
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
@@ -51,14 +53,6 @@
# define __rcu __attribute__((noderef, address_space(__rcu)))
static inline void __chk_user_ptr(const volatile void __user *ptr) { }
static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
-/* context/locking */
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __cond_acquires(x) __attribute__((context(x,0,-1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
/* other */
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
@@ -79,14 +73,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
-/* context/locking */
-# define __must_hold(x)
-# define __acquires(x)
-# define __cond_acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
/* other */
# define __force
# define __nocast
@@ -303,6 +289,22 @@ struct ftrace_likely_data {
# define __no_kasan_or_inline __always_inline
#endif
+#ifdef CONFIG_KCSAN
+/*
+ * Type qualifier to mark variables where all data-racy accesses should be
+ * ignored by KCSAN. Note, the implementation simply marks these variables as
+ * volatile, since KCSAN will treat such accesses as "marked".
+ *
+ * Defined here because defining __data_racy as volatile for KCSAN objects only
+ * causes problems in BPF Type Format (BTF) generation since struct members
+ * of core kernel data structs will be volatile in some objects and not in
+ * others. Instead define it globally for KCSAN kernels.
+ */
+# define __data_racy volatile
+#else
+# define __data_racy
+#endif
+
#ifdef __SANITIZE_THREAD__
/*
* Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
@@ -314,16 +316,9 @@ struct ftrace_likely_data {
* disable all instrumentation. See Kconfig.kcsan where this is mandatory.
*/
# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
-/*
- * Type qualifier to mark variables where all data-racy accesses should be
- * ignored by KCSAN. Note, the implementation simply marks these variables as
- * volatile, since KCSAN will treat such accesses as "marked".
- */
-# define __data_racy volatile
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
#else
# define __no_kcsan
-# define __data_racy
#endif
#ifdef __SANITIZE_MEMORY__
@@ -369,7 +364,7 @@ struct ftrace_likely_data {
* Optional: only supported since clang >= 18
*
* gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
- * clang: https://github.com/llvm/llvm-project/pull/76348
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#counted-by-counted-by-or-null-sized-by-sized-by-or-null
*
* __bdos on clang < 19.1.2 can erroneously return 0:
* https://github.com/llvm/llvm-project/pull/110497
@@ -384,6 +379,22 @@ struct ftrace_likely_data {
#endif
/*
+ * Runtime track number of objects pointed to by a pointer member for use by
+ * CONFIG_FORTIFY_SOURCE and CONFIG_UBSAN_BOUNDS.
+ *
+ * Optional: only supported since gcc >= 16
+ * Optional: only supported since clang >= 22
+ *
+ * gcc: https://gcc.gnu.org/pipermail/gcc-patches/2025-April/681727.html
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#counted-by-counted-by-or-null-sized-by-sized-by-or-null
+ */
+#ifdef CONFIG_CC_HAS_COUNTED_BY_PTR
+#define __counted_by_ptr(member) __attribute__((__counted_by__(member)))
+#else
+#define __counted_by_ptr(member)
+#endif
+
+/*
* Optional: only supported since gcc >= 15
* Optional: not supported by Clang
*
@@ -536,6 +547,37 @@ struct ftrace_likely_data {
#endif
/*
+ * Optional: only supported since gcc >= 15, clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#index-_005f_005fbuiltin_005fcounted_005fby_005fref
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#builtin-counted-by-ref
+ */
+#if __has_builtin(__builtin_counted_by_ref)
+/**
+ * __flex_counter() - Get pointer to counter member for the given
+ * flexible array, if it was annotated with __counted_by()
+ * @FAM: Pointer to flexible array member of an addressable struct instance
+ *
+ * For example, with:
+ *
+ * struct foo {
+ * int counter;
+ * short array[] __counted_by(counter);
+ * } *p;
+ *
+ * __flex_counter(p->array) will resolve to &p->counter.
+ *
+ * Note that Clang may not allow this to be assigned to a separate
+ * variable; it must be used directly.
+ *
+ * If p->array is unannotated, this returns (void *)NULL.
+ */
+#define __flex_counter(FAM) __builtin_counted_by_ref(FAM)
+#else
+#define __flex_counter(FAM) ((void *)NULL)
+#endif
+
+/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
@@ -562,6 +604,14 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif
+#ifndef __ASSEMBLY__
+/*
+ * Use __typeof_unqual__() when available.
+ */
+#if CC_HAS_TYPEOF_UNQUAL || defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -569,6 +619,7 @@ struct ftrace_likely_data {
* __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
* non-scalar types unchanged.
*/
+#ifndef USE_TYPEOF_UNQUAL
/*
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
* is not type-compatible with 'signed char', and we define a separate case.
@@ -586,6 +637,29 @@ struct ftrace_likely_data {
__scalar_type_to_expr_cases(long), \
__scalar_type_to_expr_cases(long long), \
default: (x)))
+#else
+#define __unqual_scalar_typeof(x) __typeof_unqual__(x)
+#endif
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * __signed_scalar_typeof(x) - Declare a signed scalar type, leaving
+ * non-scalar types unchanged.
+ */
+
+#define __scalar_type_to_signed_cases(type) \
+ unsigned type: (signed type)0, \
+ signed type: (signed type)0
+
+#define __signed_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (signed char)0, \
+ __scalar_type_to_signed_cases(char), \
+ __scalar_type_to_signed_cases(short), \
+ __scalar_type_to_signed_cases(int), \
+ __scalar_type_to_signed_cases(long), \
+ __scalar_type_to_signed_cases(long long), \
+ default: (x)))
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
diff --git a/include/linux/console.h b/include/linux/console.h
index fc9f5c5c1b04..5520e4477ad7 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -79,12 +79,6 @@ enum vc_intensity;
* characters. (optional)
* @con_invert_region: invert a region of length @count on @vc starting at @p.
* (optional)
- * @con_debug_enter: prepare the console for the debugger. This includes, but
- * is not limited to, unblanking the console, loading an
- * appropriate palette, and allowing debugger generated output.
- * (optional)
- * @con_debug_leave: restore the console to its pre-debug state as closely as
- * possible. (optional)
*/
struct consw {
struct module *owner;
@@ -123,8 +117,6 @@ struct consw {
enum vc_intensity intensity,
bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
- void (*con_debug_enter)(struct vc_data *vc);
- void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
@@ -298,12 +290,20 @@ struct nbcon_context {
* @outbuf: Pointer to the text buffer for output
* @len: Length to write
* @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ * @cpu: CPU on which the message was generated
+ * @pid: PID of the task that generated the message
+ * @comm: Name of the task that generated the message
*/
struct nbcon_write_context {
struct nbcon_context __private ctxt;
char *outbuf;
unsigned int len;
bool unsafe_takeover;
+#ifdef CONFIG_PRINTK_EXECUTION_CTX
+ int cpu;
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+#endif
};
/**
@@ -492,8 +492,8 @@ static inline bool console_srcu_read_lock_is_held(void)
extern int console_srcu_read_lock(void);
extern void console_srcu_read_unlock(int cookie);
-extern void console_list_lock(void) __acquires(console_mutex);
-extern void console_list_unlock(void) __releases(console_mutex);
+extern void console_list_lock(void);
+extern void console_list_unlock(void);
extern struct hlist_head console_list;
@@ -697,7 +697,6 @@ extern int unregister_console(struct console *);
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
-extern void console_conditional_schedule(void);
extern void console_unblank(void);
extern void console_flush_on_panic(enum con_flush_mode mode);
extern struct tty_driver *console_device(int *);
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 5b1236d8c65b..440b35e459e5 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -595,7 +595,8 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc);
+ const struct cper_sec_proc_arm *proc,
+ u32 length);
void cper_print_proc_ia(const char *pfx,
const struct cper_sec_proc_ia *proc);
int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 487b3bf2e1ea..8239cd95a005 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -229,4 +229,8 @@ static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
#define smt_mitigations SMT_MITIGATIONS_OFF
#endif
+int arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status);
+int arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status);
+int arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 0465d1e6f72a..cc894fc38971 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -203,6 +203,7 @@ struct cpufreq_freqs {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
+struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
@@ -210,6 +211,10 @@ static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
return NULL;
}
+static inline struct cpufreq_policy *cpufreq_cpu_policy(unsigned int cpu)
+{
+ return NULL;
+}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
index f7aa20f62b87..286b3ab92e15 100644
--- a/include/linux/cpuhplock.h
+++ b/include/linux/cpuhplock.h
@@ -13,6 +13,7 @@
struct device;
extern int lockdep_is_cpus_held(void);
+extern int lockdep_is_cpus_write_held(void);
#ifdef CONFIG_HOTPLUG_CPU
void cpus_write_lock(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a98d3330385c..cbd402b4f974 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,6 +18,8 @@
#include <linux/mmu_context.h>
#include <linux/jump_label.h>
+extern bool lockdep_is_cpuset_held(void);
+
#ifdef CONFIG_CPUSETS
/*
@@ -74,10 +76,10 @@ extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
+extern void lockdep_assert_cpuset_lock_held(void);
extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
-extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -195,6 +197,7 @@ static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
+static inline void lockdep_assert_cpuset_lock_held(void) { }
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
@@ -213,11 +216,6 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
-static inline bool cpuset_cpu_is_isolated(int cpu)
-{
- return false;
-}
-
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 343a140a6ba2..ed1609d78cd7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -164,7 +164,6 @@ static inline const struct cred *kernel_cred(void)
return rcu_dereference_raw(init_task.cred);
}
extern int set_security_override(struct cred *, u32);
-extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 3813373a9200..a4fea23da857 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -15,7 +15,7 @@
#include <linux/random.h>
/* Minimal region size. Every damon_region is aligned by this. */
-#define DAMON_MIN_REGION PAGE_SIZE
+#define DAMON_MIN_REGION_SZ PAGE_SIZE
/* Max priority score for DAMON-based operation schemes */
#define DAMOS_MAX_SCORE (99)
@@ -155,6 +155,8 @@ enum damos_action {
* @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node.
* @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup.
* @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup.
+ * @DAMOS_QUOTA_ACTIVE_MEM_BP: Active to total LRU memory ratio.
+ * @DAMOS_QUOTA_INACTIVE_MEM_BP: Inactive to total LRU memory ratio.
* @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
*
* Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
@@ -166,6 +168,8 @@ enum damos_quota_goal_metric {
DAMOS_QUOTA_NODE_MEM_FREE_BP,
DAMOS_QUOTA_NODE_MEMCG_USED_BP,
DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ DAMOS_QUOTA_ACTIVE_MEM_BP,
+ DAMOS_QUOTA_INACTIVE_MEM_BP,
NR_DAMOS_QUOTA_GOAL_METRICS,
};
@@ -203,7 +207,7 @@ struct damos_quota_goal {
u64 last_psi_total;
struct {
int nid;
- unsigned short memcg_id;
+ u64 memcg_id;
};
};
struct list_head list;
@@ -330,6 +334,8 @@ struct damos_watermarks {
* @sz_ops_filter_passed:
* Total bytes that passed ops layer-handled DAMOS filters.
* @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ * @nr_snapshots:
+ * Total number of DAMON snapshots that the scheme has tried.
*
* "Tried an action to a region" in this context means the DAMOS core logic
* determined the region as eligible to apply the action. The access pattern
@@ -355,6 +361,7 @@ struct damos_stat {
unsigned long sz_applied;
unsigned long sz_ops_filter_passed;
unsigned long qt_exceeds;
+ unsigned long nr_snapshots;
};
/**
@@ -416,7 +423,7 @@ struct damos_filter {
bool matching;
bool allow;
union {
- unsigned short memcg_id;
+ u64 memcg_id;
struct damon_addr_range addr_range;
int target_idx;
struct damon_size_range sz_range;
@@ -496,6 +503,7 @@ struct damos_migrate_dests {
* @ops_filters: ops layer handling &struct damos_filter objects list.
* @last_applied: Last @action applied ops-managing entity.
* @stat: Statistics of this scheme.
+ * @max_nr_snapshots: Upper limit of nr_snapshots stat.
* @list: List head for siblings.
*
* For each @apply_interval_us, DAMON finds regions which fit in the
@@ -529,9 +537,10 @@ struct damos_migrate_dests {
* unsets @last_applied when each regions walking for applying the scheme is
* finished.
*
- * After applying the &action to each region, &stat_count and &stat_sz is
- * updated to reflect the number of regions and total size of regions that the
- * &action is applied.
+ * After applying the &action to each region, &stat is updated.
+ *
+ * If &max_nr_snapshots is set as non-zero and &stat.nr_snapshots be same to or
+ * greater than it, the scheme is deactivated.
*/
struct damos {
struct damos_access_pattern pattern;
@@ -566,6 +575,7 @@ struct damos {
struct list_head ops_filters;
void *last_applied;
struct damos_stat stat;
+ unsigned long max_nr_snapshots;
struct list_head list;
};
@@ -597,7 +607,6 @@ enum damon_ops_id {
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
* @cleanup_target: Clean up each target before deallocation.
- * @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
* users should register the low level operations for their target address
@@ -630,7 +639,6 @@ enum damon_ops_id {
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup_target is called before the target will be deallocated.
- * @cleanup is called from @kdamond just before its termination.
*/
struct damon_operations {
enum damon_ops_id id;
@@ -646,7 +654,6 @@ struct damon_operations {
struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
void (*cleanup_target)(struct damon_target *t);
- void (*cleanup)(struct damon_ctx *context);
};
/*
@@ -656,7 +663,7 @@ struct damon_operations {
* @data: Data that will be passed to @fn.
* @repeat: Repeat invocations.
* @return_code: Return code from @fn invocation.
- * @dealloc_on_cancel: De-allocate when canceled.
+ * @dealloc_on_cancel: If @repeat is true, de-allocate when canceled.
*
* Control damon_call(), which requests specific kdamond to invoke a given
* function. Refer to damon_call() for more details.
@@ -749,27 +756,24 @@ struct damon_attrs {
* of the monitoring.
*
* @attrs: Monitoring attributes for accuracy/overhead control.
- * @kdamond: Kernel thread who does the monitoring.
- * @kdamond_lock: Mutex for the synchronizations with @kdamond.
*
- * For each monitoring context, one kernel thread for the monitoring is
- * created. The pointer to the thread is stored in @kdamond.
+ * For each monitoring context, one kernel thread for the monitoring, namely
+ * kdamond, is created. The pid of kdamond can be retrieved using
+ * damon_kdamond_pid().
*
- * Once started, the monitoring thread runs until explicitly required to be
- * terminated or every monitoring target is invalid. The validity of the
- * targets is checked via the &damon_operations.target_valid of @ops. The
- * termination can also be explicitly requested by calling damon_stop().
- * The thread sets @kdamond to NULL when it terminates. Therefore, users can
- * know whether the monitoring is ongoing or terminated by reading @kdamond.
- * Reads and writes to @kdamond from outside of the monitoring thread must
- * be protected by @kdamond_lock.
+ * Once started, kdamond runs until explicitly required to be terminated or
+ * every monitoring target is invalid. The validity of the targets is checked
+ * via the &damon_operations.target_valid of @ops. The termination can also be
+ * explicitly requested by calling damon_stop(). To know if a kdamond is
+ * running, damon_is_running() can be used.
*
- * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
- * Accesses to other fields must be protected by themselves.
+ * While the kdamond is running, all accesses to &struct damon_ctx from a
+ * thread other than the kdamond should be made using safe DAMON APIs,
+ * including damon_call() and damos_walk().
*
* @ops: Set of monitoring operations for given use cases.
* @addr_unit: Scale factor for core to ops address conversion.
- * @min_sz_region: Minimum region size.
+ * @min_region_sz: Minimum region size.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
@@ -806,13 +810,15 @@ struct damon_ctx {
struct damos_walk_control *walk_control;
struct mutex walk_control_lock;
-/* public: */
+ /* Working thread of the given DAMON context */
struct task_struct *kdamond;
+ /* Protects @kdamond field access */
struct mutex kdamond_lock;
+/* public: */
struct damon_operations ops;
unsigned long addr_unit;
- unsigned long min_sz_region;
+ unsigned long min_region_sz;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -901,7 +907,7 @@ static inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges, unsigned long min_sz_region);
+ unsigned int nr_ranges, unsigned long min_region_sz);
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damon_attrs *attrs);
@@ -962,13 +968,14 @@ bool damon_initialized(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
bool damon_is_running(struct damon_ctx *ctx);
+int damon_kdamond_pid(struct damon_ctx *ctx);
int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end,
- unsigned long min_sz_region);
+ unsigned long min_region_sz);
#endif /* CONFIG_DAMON */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7cecda29447e..4177c4738282 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -239,18 +239,16 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
* @cancel: callback to call
* @cancel_data: extra data for the callback to call
*/
-struct debugfs_cancellation {
+context_lock_struct(debugfs_cancellation) {
struct list_head list;
void (*cancel)(struct dentry *, void *);
void *cancel_data;
};
-void __acquires(cancellation)
-debugfs_enter_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
-void __releases(cancellation)
-debugfs_leave_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
+void debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __acquires(cancellation);
+void debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __releases(cancellation);
#else
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 800dcc360db2..ecb06f16d22c 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -69,6 +69,14 @@ struct task_delay_info {
u32 compact_count; /* total count of memory compact */
u32 wpcopy_count; /* total count of write-protect copy */
u32 irq_count; /* total count of IRQ/SOFTIRQ */
+
+ struct timespec64 blkio_delay_max_ts;
+ struct timespec64 swapin_delay_max_ts;
+ struct timespec64 freepages_delay_max_ts;
+ struct timespec64 thrashing_delay_max_ts;
+ struct timespec64 compact_delay_max_ts;
+ struct timespec64 wpcopy_delay_max_ts;
+ struct timespec64 irq_delay_max_ts;
};
#endif
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 99b1002b3e31..99c3c83ea520 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -215,9 +215,9 @@ bus_find_next_device(const struct bus_type *bus,struct device *cur)
return bus_find_device(bus, cur, NULL, device_match_any);
}
-#ifdef CONFIG_ACPI
struct acpi_device;
+#ifdef CONFIG_ACPI
/**
* bus_find_device_by_acpi_dev : device iterator for locating a particular device
* matching the ACPI COMPANION device.
@@ -231,7 +231,7 @@ bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device
}
#else
static inline struct device *
-bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
{
return NULL;
}
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
index 9c1e3d643d69..14ab9159bdda 100644
--- a/include/linux/device/devres.h
+++ b/include/linux/device/devres.h
@@ -26,10 +26,6 @@ __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
-void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
void devres_free(void *res);
void devres_add(struct device *dev, void *res);
void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index cd8e0f0a634b..bbc67ec513ed 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -85,6 +85,8 @@ enum probe_type {
* uevent.
* @p: Driver core's private data, no one other than the driver
* core can touch this.
+ * @p_cb: Callbacks private to the driver core; no one other than the
+ * driver core is allowed to touch this.
*
* The device driver-model tracks all of the drivers known to the system.
* The main reason for this tracking is to enable the driver core to match
@@ -119,6 +121,13 @@ struct device_driver {
void (*coredump) (struct device *dev);
struct driver_private *p;
+ struct {
+ /*
+ * Called after remove() and after all devres entries have been
+ * processed. This is a Rust only callback.
+ */
+ void (*post_unbind_rust)(struct device *dev);
+ } p_cb;
};
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0864773a57e8..822085bc2d20 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -21,7 +21,7 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
return 0;
- if (likely(!inode->i_rdev))
+ if (!inode->i_rdev)
return 0;
if (S_ISBLK(inode->i_mode))
diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h
index a3c0ce2d3a42..09bde3f748e4 100644
--- a/include/linux/dma-buf-mapping.h
+++ b/include/linux/dma-buf-mapping.h
@@ -9,7 +9,7 @@
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir);
void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 0bc492090237..133b9e637b55 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -429,18 +429,6 @@ struct dma_buf {
__poll_t active;
} cb_in, cb_out;
-#ifdef CONFIG_DMABUF_SYSFS_STATS
- /**
- * @sysfs_entry:
- *
- * For exposing information about this buffer in sysfs. See also
- * `DMA-BUF statistics`_ for the uapi this enables.
- */
- struct dma_buf_sysfs_entry {
- struct kobject kobj;
- struct dma_buf *dmabuf;
- } *sysfs_entry;
-#endif
};
/**
@@ -532,16 +520,6 @@ struct dma_buf_export_info {
};
/**
- * struct dma_buf_phys_vec - describe continuous chunk of memory
- * @paddr: physical address of that chunk
- * @len: Length of this chunk
- */
-struct dma_buf_phys_vec {
- phys_addr_t paddr;
- size_t len;
-};
-
-/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
*
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 64639e104110..d4c92fd35092 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -364,11 +364,12 @@ static inline void dma_fence_end_signalling(bool cookie) {}
static inline void __dma_fence_might_wait(void) {}
#endif
-int dma_fence_signal(struct dma_fence *fence);
-int dma_fence_signal_locked(struct dma_fence *fence);
-int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
-int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
- ktime_t timestamp);
+void dma_fence_signal(struct dma_fence *fence);
+bool dma_fence_check_and_signal(struct dma_fence *fence);
+bool dma_fence_check_and_signal_locked(struct dma_fence *fence);
+void dma_fence_signal_locked(struct dma_fence *fence);
+void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+void dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -401,6 +402,26 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+/*
+ * dma_fence_test_signaled_flag - Only check whether a fence is signaled yet.
+ * @fence: the fence to check
+ *
+ * This function just checks whether @fence is signaled, without interacting
+ * with the fence in any way. The user must, therefore, ensure through other
+ * means that fences get signaled eventually.
+ *
+ * This function uses test_bit(), which is thread-safe. Naturally, this function
+ * should be used opportunistically; a fence could get signaled at any moment
+ * after the check is done.
+ *
+ * Return: true if signaled, false otherwise.
+ */
+static inline bool
+dma_fence_test_signaled_flag(struct dma_fence *fence)
+{
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
@@ -418,7 +439,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (dma_fence_test_signaled_flag(fence))
return true;
if (fence->ops->signaled && fence->ops->signaled(fence)) {
@@ -448,7 +469,7 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (dma_fence_test_signaled_flag(fence))
return true;
if (fence->ops->signaled && fence->ops->signaled(fence)) {
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 27d15f60950a..648328a64b27 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -46,4 +46,6 @@ const char *dma_heap_get_name(struct dma_heap *heap);
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+extern bool mem_accounting;
+
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 4809204c674c..60b63756df82 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -377,14 +377,6 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
}
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
-#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
-void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
-#else
-static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
-{
-}
-#endif /* ARCH_HAS_DMA_MARK_CLEAN */
-
void *arch_dma_set_uncached(void *addr, size_t size);
void arch_dma_clear_uncached(void *addr, size_t size);
@@ -395,11 +387,15 @@ bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
+bool arch_dma_alloc_direct(struct device *dev);
+bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle);
#else
#define arch_dma_map_phys_direct(d, a) (false)
#define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#define arch_dma_alloc_direct(d) (false)
+#define arch_dma_free_direct(d, a) (false)
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index aa36a0d1d9df..29973baa0581 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -7,6 +7,7 @@
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/bug.h>
+#include <linux/cache.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
@@ -79,6 +80,13 @@
#define DMA_ATTR_MMIO (1UL << 10)
/*
+ * DMA_ATTR_CPU_CACHE_CLEAN: Indicates the CPU will not dirty any cacheline
+ * overlapping this buffer while it is mapped for DMA. All mappings sharing
+ * a cacheline must have this attribute for this to be considered safe.
+ */
+#define DMA_ATTR_CPU_CACHE_CLEAN (1UL << 11)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
* given device and there may be a translation between the CPU physical address
@@ -703,6 +711,18 @@ static inline int dma_get_cache_alignment(void)
}
#endif
+#ifdef ARCH_HAS_DMA_MINALIGN
+#define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN)
+#else
+#define ____dma_from_device_aligned
+#endif
+/* Mark start of DMA buffer */
+#define __dma_from_device_group_begin(GROUP) \
+ __cacheline_group_begin(GROUP) ____dma_from_device_aligned
+/* Mark end of DMA buffer */
+#define __dma_from_device_group_end(GROUP) \
+ __cacheline_group_end(GROUP) ____dma_from_device_aligned
+
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 562f520b23c2..2ce295b46b8c 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -11,15 +11,23 @@
#include <linux/device.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <linux/rtnetlink.h>
struct dpll_device;
struct dpll_pin;
struct dpll_pin_esync;
+struct fwnode_handle;
+struct ref_tracker;
struct dpll_device_ops {
int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ int (*mode_set)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode mode, struct netlink_ext_ack *extack);
+ int (*supported_modes_get)(const struct dpll_device *dpll,
+ void *dpll_priv, unsigned long *modes,
+ struct netlink_ext_ack *extack);
int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
enum dpll_lock_status_error *status_error,
@@ -166,6 +174,36 @@ struct dpll_pin_properties {
u32 phase_gran;
};
+#ifdef CONFIG_DPLL_REFCNT_TRACKER
+typedef struct ref_tracker *dpll_tracker;
+#else
+typedef struct {} dpll_tracker;
+#endif
+
+#define DPLL_DEVICE_CREATED 1
+#define DPLL_DEVICE_DELETED 2
+#define DPLL_DEVICE_CHANGED 3
+#define DPLL_PIN_CREATED 4
+#define DPLL_PIN_DELETED 5
+#define DPLL_PIN_CHANGED 6
+
+struct dpll_device_notifier_info {
+ struct dpll_device *dpll;
+ u32 id;
+ u32 idx;
+ u64 clock_id;
+ enum dpll_type type;
+};
+
+struct dpll_pin_notifier_info {
+ struct dpll_pin *pin;
+ u32 id;
+ u32 idx;
+ u64 clock_id;
+ const struct fwnode_handle *fwnode;
+ const struct dpll_pin_properties *prop;
+};
+
#if IS_ENABLED(CONFIG_DPLL)
void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
void dpll_netdev_pin_clear(struct net_device *dev);
@@ -173,6 +211,9 @@ void dpll_netdev_pin_clear(struct net_device *dev);
size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
int dpll_netdev_add_pin_handle(struct sk_buff *msg,
const struct net_device *dev);
+
+struct dpll_pin *fwnode_dpll_pin_find(struct fwnode_handle *fwnode,
+ dpll_tracker *tracker);
#else
static inline void
dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
@@ -188,12 +229,19 @@ dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
{
return 0;
}
+
+static inline struct dpll_pin *
+fwnode_dpll_pin_find(struct fwnode_handle *fwnode, dpll_tracker *tracker)
+{
+ return NULL;
+}
#endif
struct dpll_device *
-dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ dpll_tracker *tracker);
-void dpll_device_put(struct dpll_device *dpll);
+void dpll_device_put(struct dpll_device *dpll, dpll_tracker *tracker);
int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
const struct dpll_device_ops *ops, void *priv);
@@ -201,9 +249,11 @@ int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
void dpll_device_unregister(struct dpll_device *dpll,
const struct dpll_device_ops *ops, void *priv);
+#define DPLL_PIN_IDX_UNSPEC U32_MAX
+
struct dpll_pin *
dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
- const struct dpll_pin_properties *prop);
+ const struct dpll_pin_properties *prop, dpll_tracker *tracker);
int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
@@ -211,7 +261,9 @@ int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
-void dpll_pin_put(struct dpll_pin *pin);
+void dpll_pin_put(struct dpll_pin *pin, dpll_tracker *tracker);
+
+void dpll_pin_fwnode_set(struct dpll_pin *pin, struct fwnode_handle *fwnode);
int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
@@ -226,4 +278,8 @@ int dpll_device_change_ntf(struct dpll_device *dpll);
int dpll_pin_change_ntf(struct dpll_pin *pin);
+int register_dpll_notifier(struct notifier_block *nb);
+
+int unregister_dpll_notifier(struct notifier_block *nb);
+
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2a43094e23f7..664898d09ff5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -406,11 +406,12 @@ void efi_native_runtime_setup(void);
#define EFI_CC_FINAL_EVENTS_TABLE_GUID EFI_GUID(0xdd4a4648, 0x2de7, 0x4665, 0x96, 0x4d, 0x21, 0xd9, 0xef, 0x5f, 0xb4, 0x46)
/*
- * This GUID is used to pass to the kernel proper the struct screen_info
- * structure that was populated by the stub based on the GOP protocol instance
- * associated with ConOut
+ * This GUIDs are used to pass to the kernel proper the primary
+ * display that has been populated by the stub based on the GOP
+ * instance associated with ConOut.
*/
-#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_PRIMARY_DISPLAY_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 87efb38b7081..f83ca0abf2cd 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
+#include <linux/audit.h>
#include <linux/irq-entry-common.h>
#include <linux/livepatch.h>
#include <linux/ptrace.h>
@@ -36,8 +37,8 @@
SYSCALL_WORK_SYSCALL_EMU | \
SYSCALL_WORK_SYSCALL_AUDIT | \
SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_RSEQ_SLICE | \
ARCH_SYSCALL_WORK_ENTER)
-
#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
SYSCALL_WORK_SYSCALL_TRACE | \
SYSCALL_WORK_SYSCALL_AUDIT | \
@@ -45,7 +46,84 @@
SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
-long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
+/**
+ * arch_ptrace_report_syscall_entry - Architecture specific ptrace_report_syscall_entry() wrapper
+ *
+ * Invoked from syscall_trace_enter() to wrap ptrace_report_syscall_entry().
+ *
+ * This allows architecture specific ptrace_report_syscall_entry()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_entry().
+ */
+static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs);
+
+#ifndef arch_ptrace_report_syscall_entry
+static __always_inline int arch_ptrace_report_syscall_entry(struct pt_regs *regs)
+{
+ return ptrace_report_syscall_entry(regs);
+}
+#endif
+
+bool syscall_user_dispatch(struct pt_regs *regs);
+long trace_syscall_enter(struct pt_regs *regs, long syscall);
+void trace_syscall_exit(struct pt_regs *regs, long ret);
+
+static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
+{
+ if (unlikely(audit_context())) {
+ unsigned long args[6];
+
+ syscall_get_arguments(current, regs, args);
+ audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
+ }
+}
+
+static __always_inline long syscall_trace_enter(struct pt_regs *regs, unsigned long work)
+{
+ long syscall, ret = 0;
+
+ /*
+ * Handle Syscall User Dispatch. This must comes first, since
+ * the ABI here can be something that doesn't make sense for
+ * other syscall_work features.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (syscall_user_dispatch(regs))
+ return -1L;
+ }
+
+ /*
+ * User space got a time slice extension granted and relinquishes
+ * the CPU. The work stops the slice timer to avoid an extra round
+ * through hrtimer_interrupt().
+ */
+ if (work & SYSCALL_WORK_SYSCALL_RSEQ_SLICE)
+ rseq_syscall_enter_work(syscall_get_nr(current, regs));
+
+ /* Handle ptrace */
+ if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
+ ret = arch_ptrace_report_syscall_entry(regs);
+ if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
+ return -1L;
+ }
+
+ /* Do seccomp after ptrace, to catch any tracer changes. */
+ if (work & SYSCALL_WORK_SECCOMP) {
+ ret = __secure_computing();
+ if (ret == -1L)
+ return ret;
+ }
+
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
+ syscall = trace_syscall_enter(regs, syscall);
+
+ syscall_enter_audit(regs, syscall);
+
+ return ret ? : syscall;
+}
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
@@ -75,7 +153,7 @@ static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *re
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
if (work & SYSCALL_WORK_ENTER)
- syscall = syscall_trace_enter(regs, syscall, work);
+ syscall = syscall_trace_enter(regs, work);
return syscall;
}
@@ -112,6 +190,37 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
return ret;
}
+/*
+ * If SYSCALL_EMU is set, then the only reason to report is when
+ * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
+ * instruction has been already reported in syscall_enter_from_user_mode().
+ */
+static __always_inline bool report_single_step(unsigned long work)
+{
+ if (work & SYSCALL_WORK_SYSCALL_EMU)
+ return false;
+
+ return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
+}
+
+/**
+ * arch_ptrace_report_syscall_exit - Architecture specific ptrace_report_syscall_exit()
+ *
+ * This allows architecture specific ptrace_report_syscall_exit()
+ * implementations. If not defined by the architecture this falls back to
+ * to ptrace_report_syscall_exit().
+ */
+static __always_inline void arch_ptrace_report_syscall_exit(struct pt_regs *regs,
+ int step);
+
+#ifndef arch_ptrace_report_syscall_exit
+static __always_inline void arch_ptrace_report_syscall_exit(struct pt_regs *regs,
+ int step)
+{
+ ptrace_report_syscall_exit(regs, step);
+}
+#endif
+
/**
* syscall_exit_work - Handle work before returning to user mode
* @regs: Pointer to current pt_regs
@@ -119,20 +228,40 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
*
* Do one-time syscall specific work.
*/
-void syscall_exit_work(struct pt_regs *regs, unsigned long work);
+static __always_inline void syscall_exit_work(struct pt_regs *regs, unsigned long work)
+{
+ bool step;
+
+ /*
+ * If the syscall was rolled back due to syscall user dispatching,
+ * then the tracers below are not invoked for the same reason as
+ * the entry side was not invoked in syscall_trace_enter(): The ABI
+ * of these syscalls is unknown.
+ */
+ if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
+ if (unlikely(current->syscall_dispatch.on_dispatch)) {
+ current->syscall_dispatch.on_dispatch = false;
+ return;
+ }
+ }
+
+ audit_syscall_exit(regs);
+
+ if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
+ trace_syscall_exit(regs, syscall_get_return_value(current, regs));
+
+ step = report_single_step(work);
+ if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
+ arch_ptrace_report_syscall_exit(regs, step);
+}
/**
- * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * syscall_exit_to_user_mode_work - Handle one time work before returning to user mode
* @regs: Pointer to currents pt_regs
*
- * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
- * exit_to_user_mode() to perform the final transition to user mode.
+ * Step 1 of syscall_exit_to_user_mode() with the same calling convention.
*
- * Calling convention is the same as for syscall_exit_to_user_mode() and it
- * returns with all work handled and interrupts disabled. The caller must
- * invoke exit_to_user_mode() before actually switching to user mode to
- * make the final state transitions. Interrupts must stay disabled between
- * return from this function and the invocation of exit_to_user_mode().
+ * The caller must invoke steps 2-3 of syscall_exit_to_user_mode() afterwards.
*/
static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
@@ -155,15 +284,13 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
*/
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
- local_irq_disable_exit_to_user();
- syscall_exit_to_user_mode_prepare(regs);
}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
*
- * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * Invoked with interrupts enabled and fully valid @regs. Returns with all
* work handled, interrupts disabled such that the caller can immediately
* switch to user mode. Called from architecture specific syscall and ret
* from fork code.
@@ -176,6 +303,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
* - ptrace (single stepping)
*
* 2) Preparatory work
+ * - Disable interrupts
* - Exit to user mode loop (common TIF handling). Invokes
* arch_exit_to_user_mode_work() for architecture specific TIF work
* - Architecture specific one time work arch_exit_to_user_mode_prepare()
@@ -184,14 +312,17 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
* 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
* functionality in exit_to_user_mode().
*
- * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
- * exit_to_user_mode(). This function is preferred unless there is a
- * compelling architectural reason to use the separate functions.
+ * This is a combination of syscall_exit_to_user_mode_work() (1), disabling
+ * interrupts followed by syscall_exit_to_user_mode_prepare() (2) and
+ * exit_to_user_mode() (3). This function is preferred unless there is a
+ * compelling architectural reason to invoke the functions separately.
*/
static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
syscall_exit_to_user_mode_work(regs);
+ local_irq_disable_exit_to_user();
+ syscall_exit_to_user_mode_prepare(regs);
instrumentation_end();
exit_to_user_mode();
}
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 5c9162193d26..798abec67a1b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -216,13 +216,43 @@ static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
struct link_mode_info {
- int speed;
- u8 lanes;
- u8 duplex;
+ int speed;
+ u8 lanes;
+ u8 min_pairs;
+ u8 pairs;
+ u8 duplex;
+ u16 mediums;
};
extern const struct link_mode_info link_mode_params[];
+enum ethtool_link_medium {
+ ETHTOOL_LINK_MEDIUM_BASET = 0,
+ ETHTOOL_LINK_MEDIUM_BASEK,
+ ETHTOOL_LINK_MEDIUM_BASES,
+ ETHTOOL_LINK_MEDIUM_BASEC,
+ ETHTOOL_LINK_MEDIUM_BASEL,
+ ETHTOOL_LINK_MEDIUM_BASED,
+ ETHTOOL_LINK_MEDIUM_BASEE,
+ ETHTOOL_LINK_MEDIUM_BASEF,
+ ETHTOOL_LINK_MEDIUM_BASEV,
+ ETHTOOL_LINK_MEDIUM_BASEMLD,
+ ETHTOOL_LINK_MEDIUM_NONE,
+
+ __ETHTOOL_LINK_MEDIUM_LAST,
+};
+
+#define ETHTOOL_MEDIUM_FIBER_BITS (BIT(ETHTOOL_LINK_MEDIUM_BASES) | \
+ BIT(ETHTOOL_LINK_MEDIUM_BASEL) | \
+ BIT(ETHTOOL_LINK_MEDIUM_BASEF))
+
+enum ethtool_link_medium ethtool_str_to_medium(const char *str);
+
+static inline int ethtool_linkmode_n_pairs(unsigned int mode)
+{
+ return link_mode_params[mode].pairs;
+}
+
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index f0cf2714ec52..262e24d83313 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -201,9 +201,9 @@ struct handle_to_path_ctx {
* @commit_metadata: commit metadata changes to stable storage
*
* See Documentation/filesystems/nfs/exporting.rst for details on how to use
- * this interface correctly.
+ * this interface correctly and the definition of the flags.
*
- * encode_fh:
+ * @encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
* file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
@@ -215,7 +215,7 @@ struct handle_to_path_ctx {
* greater than @max_len*4 bytes). On error @max_len contains the minimum
* size(in 4 byte unit) needed to encode the file handle.
*
- * fh_to_dentry:
+ * @fh_to_dentry:
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle
* fragment (@fh, @fh_len). It should return a &struct dentry which refers
* to the same file that the file handle fragment refers to. If it cannot,
@@ -227,31 +227,44 @@ struct handle_to_path_ctx {
* created with d_alloc_root. The caller can then find any other extant
* dentries by following the d_alias links.
*
- * fh_to_parent:
+ * @fh_to_parent:
* Same as @fh_to_dentry, except that it returns a pointer to the parent
* dentry if it was encoded into the filehandle fragment by @encode_fh.
*
- * get_name:
+ * @get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX + 1 sized
* buffer. get_name() should return %0 on success, a negative error code
* or error. @get_name will be called without @parent->i_rwsem held.
*
- * get_parent:
+ * @get_parent:
* @get_parent should find the parent directory for the given @child which
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
- * permission:
+ * @permission:
* Allow filesystems to specify a custom permission function.
*
- * open:
+ * @open:
* Allow filesystems to specify a custom open function.
*
- * commit_metadata:
+ * @commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
+ * @get_uuid:
+ * Get a filesystem unique signature exposed to clients.
+ *
+ * @map_blocks:
+ * Map and, if necessary, allocate blocks for a layout.
+ *
+ * @commit_blocks:
+ * Commit blocks in a layout once the client is done with them.
+ *
+ * @flags:
+ * Allows the filesystem to communicate to nfsd that it may want to do things
+ * differently when dealing with it.
+ *
* Locking rules:
* get_parent is called with child->d_inode->i_rwsem down
* get_name is not (which is possibly inconsistent)
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index a7880787cad3..dc41722fcc9d 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -17,7 +17,6 @@
#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
-#define F2FS_SUM_BLKSIZE 4096 /* only support 4096 byte sum block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
@@ -442,10 +441,8 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM (F2FS_SUM_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
-#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
/* a summary entry for a block in a segment */
struct f2fs_summary {
@@ -468,22 +465,6 @@ struct summary_footer {
__le32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (F2FS_SUM_BLKSIZE - SUM_FOOTER_SIZE -\
- SUM_ENTRY_SIZE)
-#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
- sizeof(struct nat_journal_entry))
-#define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
- sizeof(struct nat_journal_entry))
-#define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
- sizeof(struct sit_journal_entry))
-#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
- sizeof(struct sit_journal_entry))
-
-/* Reserved area should make size of f2fs_extra_info equals to
- * that of nat_journal and sit_journal.
- */
-#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
-
/*
* frequently updated NAT/SIT entries can be stored in the spare area in
* summary blocks
@@ -498,9 +479,16 @@ struct nat_journal_entry {
struct f2fs_nat_entry ne;
} __packed;
+/*
+ * The nat_journal structure is a placeholder whose actual size varies depending
+ * on the use of packed_ssa. Therefore, it must always be accessed only through
+ * specific sets of macros and fields, and size calculations should use
+ * size-related macros instead of sizeof().
+ * Relevant macros: sbi->nat_journal_entries, nat_in_journal(),
+ * nid_in_journal(), MAX_NAT_JENTRIES().
+ */
struct nat_journal {
- struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES];
- __u8 reserved[NAT_JOURNAL_RESERVED];
+ struct nat_journal_entry entries[0];
} __packed;
struct sit_journal_entry {
@@ -508,14 +496,21 @@ struct sit_journal_entry {
struct f2fs_sit_entry se;
} __packed;
+/*
+ * The sit_journal structure is a placeholder whose actual size varies depending
+ * on the use of packed_ssa. Therefore, it must always be accessed only through
+ * specific sets of macros and fields, and size calculations should use
+ * size-related macros instead of sizeof().
+ * Relevant macros: sbi->sit_journal_entries, sit_in_journal(),
+ * segno_in_journal(), MAX_SIT_JENTRIES().
+ */
struct sit_journal {
- struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES];
- __u8 reserved[SIT_JOURNAL_RESERVED];
+ struct sit_journal_entry entries[0];
} __packed;
struct f2fs_extra_info {
__le64 kbytes_written;
- __u8 reserved[EXTRA_INFO_RESERVED];
+ __u8 reserved[];
} __packed;
struct f2fs_journal {
@@ -531,11 +526,33 @@ struct f2fs_journal {
};
} __packed;
-/* Block-sized summary block structure */
+/*
+ * Block-sized summary block structure
+ *
+ * The f2fs_summary_block structure is a placeholder whose actual size varies
+ * depending on the use of packed_ssa. Therefore, it must always be accessed
+ * only through specific sets of macros and fields, and size calculations should
+ * use size-related macros instead of sizeof().
+ * Relevant macros: sbi->sum_blocksize, sbi->entries_in_sum,
+ * sbi->sum_entry_size, sum_entries(), sum_journal(), sum_footer().
+ *
+ * Summary Block Layout
+ *
+ * +-----------------------+ <--- Block Start
+ * | struct f2fs_summary |
+ * | entries[0] |
+ * | ... |
+ * | entries[N-1] |
+ * +-----------------------+
+ * | struct f2fs_journal |
+ * +-----------------------+
+ * | struct summary_footer |
+ * +-----------------------+ <--- Block End
+ */
struct f2fs_summary_block {
- struct f2fs_summary entries[ENTRIES_IN_SUM];
- struct f2fs_journal journal;
- struct summary_footer footer;
+ struct f2fs_summary entries[0];
+ // struct f2fs_journal journal;
+ // struct summary_footer footer;
} __packed;
/*
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 05cc251035da..6d4a58084fd5 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -18,6 +18,7 @@ struct backlight_device;
struct device;
struct device_node;
struct fb_info;
+struct fbcon_par;
struct file;
struct i2c_adapter;
struct inode;
@@ -304,10 +305,6 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
-
- /* called at KDB enter and leave time to prepare the console */
- int (*fb_debug_enter)(struct fb_info *info);
- int (*fb_debug_leave)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
@@ -497,7 +494,6 @@ struct fb_info {
#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
#endif
- int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
#endif
@@ -510,7 +506,7 @@ struct fb_info {
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
- void *fbcon_par; /* fbcon use-only private area */
+ struct fbcon_par *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
@@ -628,6 +624,15 @@ static inline void unlock_fb_info(struct fb_info *info)
mutex_unlock(&info->lock);
}
+static inline struct device *dev_of_fbinfo(const struct fb_info *info)
+{
+#ifdef CONFIG_FB_DEVICE
+ return info->dev;
+#else
+ return NULL;
+#endif
+}
+
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
u8 *src, u32 s_pitch, u32 height)
{
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index 2f5e5588ee07..d2c9740e26a8 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -242,7 +242,14 @@ bool locks_owner_has_blockers(struct file_lock_context *flctx,
static inline struct file_lock_context *
locks_inode_context(const struct inode *inode)
{
- return smp_load_acquire(&inode->i_flctx);
+ /*
+ * Paired with smp_store_release in locks_get_lock_context().
+ *
+ * Ensures ->i_flctx will be visible if we spotted the flag.
+ */
+ if (likely(!(smp_load_acquire(&inode->i_opflags) & IOP_FLCTX)))
+ return NULL;
+ return READ_ONCE(inode->i_flctx);
}
#else /* !CONFIG_FILE_LOCKING */
@@ -469,7 +476,7 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
@@ -488,7 +495,7 @@ static inline int break_deleg(struct inode *inode, unsigned int flags)
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
@@ -533,8 +540,11 @@ static inline int break_deleg_wait(struct delegated_inode *di)
static inline int break_layout(struct inode *inode, bool wait)
{
+ struct file_lock_context *flctx;
+
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ flctx = locks_inode_context(inode);
+ if (flctx && !list_empty_careful(&flctx->flc_lease)) {
unsigned int flags = LEASE_BREAK_LAYOUT;
if (!wait)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fd54fed8f95f..44d7ae95ddbc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1167,6 +1167,7 @@ bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
bool bpf_jit_supports_private_stack(void);
bool bpf_jit_supports_timed_may_goto(void);
+bool bpf_jit_supports_fsession(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
u64 arch_bpf_timed_may_goto(void);
@@ -1375,24 +1376,13 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-int __bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char *sym);
+int bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline int
-bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym)
-{
- int ret = __bpf_address_lookup(addr, size, off, sym);
-
- if (ret && modname)
- *modname = NULL;
- return ret;
-}
-
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
@@ -1431,8 +1421,8 @@ static inline bool bpf_jit_kallsyms_enabled(void)
}
static inline int
-__bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char *sym)
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym)
{
return 0;
}
@@ -1453,13 +1443,6 @@ static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
return NULL;
}
-static inline int
-bpf_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym)
-{
- return 0;
-}
-
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
}
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 6143b7d28eac..986d712e4d94 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -526,14 +526,13 @@ struct fw_iso_packet {
struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
+ dma_addr_t *dma_addrs;
int page_count;
- int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
@@ -547,21 +546,26 @@ union fw_iso_callback {
fw_iso_mc_callback_t mc;
};
+enum fw_iso_context_flag {
+ FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS = BIT(0),
+};
+
struct fw_iso_context {
struct fw_card *card;
struct work_struct work;
int type;
int channel;
int speed;
- bool drop_overflow_headers;
+ int flags;
size_t header_size;
+ size_t header_storage_size;
union fw_iso_callback callback;
void *callback_data;
};
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
+struct fw_iso_context *__fw_iso_context_create(struct fw_card *card, int type, int channel,
+ int speed, size_t header_size, size_t header_storage_size,
+ union fw_iso_callback callback, void *callback_data);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -570,6 +574,26 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+static inline struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
+ int channel, int speed, size_t header_size, fw_iso_callback_t callback,
+ void *callback_data)
+{
+ union fw_iso_callback cb = { .sc = callback };
+
+ return __fw_iso_context_create(card, type, channel, speed, header_size, PAGE_SIZE, cb,
+ callback_data);
+}
+
+static inline struct fw_iso_context *fw_iso_context_create_with_header_storage_size(
+ struct fw_card *card, int type, int channel, int speed, size_t header_size,
+ size_t header_storage_size, fw_iso_callback_t callback, void *callback_data)
+{
+ union fw_iso_callback cb = { .sc = callback };
+
+ return __fw_iso_context_create(card, type, channel, speed, header_size, header_storage_size,
+ cb, callback_data);
+}
+
/**
* fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
* @ctx: the isochronous context
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
index 1f97764fdfd7..51e99f47e90e 100644
--- a/include/linux/firmware/cirrus/cs_dsp_test_utils.h
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -126,7 +126,7 @@ struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
unsigned int fw_version);
void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
unsigned int alg_id, unsigned int alg_ver,
- int type, unsigned int offset,
+ int type, u16 offset, u32 offset32,
const void *payload_data, size_t payload_len_bytes);
void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
const char *info);
@@ -136,6 +136,10 @@ void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
unsigned int alg_id, unsigned int alg_ver,
int mem_region, unsigned int reg_addr_offset,
const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_patch_off32(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
index 74e5a4f6c13a..eae24dde9e41 100644
--- a/include/linux/firmware/cirrus/wmfw.h
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -172,7 +172,7 @@ struct wmfw_coeff_item {
__le16 type;
__le32 id;
__le32 ver;
- __le32 sr;
+ __le32 offset32;
__le32 len;
u8 data[];
} __packed;
@@ -200,4 +200,9 @@ struct wmfw_coeff_item {
#define WMFW_HALO_XM_PACKED 0x11
#define WMFW_HALO_YM_PACKED 0x12
+#define WMFW_ADSP2_XM_LONG 0xf405
+#define WMFW_ADSP2_YM_LONG 0xf406
+#define WMFW_HALO_XM_PACKED_LONG 0xf411
+#define WMFW_HALO_YM_PACKED_LONG 0xf412
+
#endif
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
index a33b45027356..ba5d93bd6158 100644
--- a/include/linux/firmware/imx/sm.h
+++ b/include/linux/firmware/imx/sm.h
@@ -26,6 +26,8 @@
#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */
#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */
+#define SCMI_IMX952_CTRL_BYPASS_AUDMIX 8U /* WAKE AUDMIX */
+
#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
int scmi_imx_misc_ctrl_set(u32 id, u32 val);
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index a55ca771286b..5747bd191bf1 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -66,19 +66,33 @@ int qcom_scm_set_warm_boot_addr(void *entry);
void qcom_scm_cpu_power_down(u32 flags);
int qcom_scm_set_remote_state(u32 state, u32 id);
-struct qcom_scm_pas_metadata {
+struct qcom_scm_pas_context {
+ struct device *dev;
+ u32 pas_id;
+ phys_addr_t mem_phys;
+ size_t mem_size;
void *ptr;
dma_addr_t phys;
ssize_t size;
+ bool use_tzmem;
};
-int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
- struct qcom_scm_pas_metadata *ctx);
-void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
-int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
-int qcom_scm_pas_auth_and_reset(u32 peripheral);
-int qcom_scm_pas_shutdown(u32 peripheral);
-bool qcom_scm_pas_supported(u32 peripheral);
+struct qcom_scm_pas_context *devm_qcom_scm_pas_context_alloc(struct device *dev,
+ u32 pas_id,
+ phys_addr_t mem_phys,
+ size_t mem_size);
+int qcom_scm_pas_init_image(u32 pas_id, const void *metadata, size_t size,
+ struct qcom_scm_pas_context *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_context *ctx);
+int qcom_scm_pas_mem_setup(u32 pas_id, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 pas_id);
+int qcom_scm_pas_shutdown(u32 pas_id);
+bool qcom_scm_pas_supported(u32 pas_id);
+struct resource_table *qcom_scm_pas_get_rsc_table(struct qcom_scm_pas_context *ctx,
+ void *input_rt, size_t input_rt_size,
+ size_t *output_rt_size);
+
+int qcom_scm_pas_prepare_and_auth_reset(struct qcom_scm_pas_context *ctx);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
diff --git a/include/linux/firmware/xlnx-zynqmp-crypto.h b/include/linux/firmware/xlnx-zynqmp-crypto.h
new file mode 100644
index 000000000000..56595ab37c43
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-crypto.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for XilSECURE APIs.
+ *
+ * Copyright (C) 2014-2022 Xilinx, Inc.
+ * Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
+#define __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
+
+/**
+ * struct xlnx_feature - Feature data
+ * @family: Family code of platform
+ * @subfamily: Subfamily code of platform
+ * @feature_id: Feature id of module
+ * @data: Collection of all supported platform data
+ */
+struct xlnx_feature {
+ u32 family;
+ u32 feature_id;
+ void *data;
+};
+
+/* xilSecure API commands module id + api id */
+#define XSECURE_API_AES_INIT 0x509
+#define XSECURE_API_AES_OP_INIT 0x50a
+#define XSECURE_API_AES_UPDATE_AAD 0x50b
+#define XSECURE_API_AES_ENCRYPT_UPDATE 0x50c
+#define XSECURE_API_AES_ENCRYPT_FINAL 0x50d
+#define XSECURE_API_AES_DECRYPT_UPDATE 0x50e
+#define XSECURE_API_AES_DECRYPT_FINAL 0x50f
+#define XSECURE_API_AES_KEY_ZERO 0x510
+#define XSECURE_API_AES_WRITE_KEY 0x511
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
+void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map);
+int versal_pm_aes_key_write(const u32 keylen,
+ const u32 keysrc, const u64 keyaddr);
+int versal_pm_aes_key_zero(const u32 keysrc);
+int versal_pm_aes_op_init(const u64 hw_req);
+int versal_pm_aes_update_aad(const u64 aad_addr, const u32 aad_len);
+int versal_pm_aes_enc_update(const u64 in_params, const u64 in_addr);
+int versal_pm_aes_dec_update(const u64 in_params, const u64 in_addr);
+int versal_pm_aes_dec_final(const u64 gcm_addr);
+int versal_pm_aes_enc_final(const u64 gcm_addr);
+int versal_pm_aes_init(void);
+
+#else
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int versal_pm_aes_key_write(const u32 keylen,
+ const u32 keysrc, const u64 keyaddr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_key_zero(const u32 keysrc)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_op_init(const u64 hw_req)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_update_aad(const u64 aad_addr,
+ const u32 aad_len)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_enc_update(const u64 in_params,
+ const u64 in_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_dec_update(const u64 in_params,
+ const u64 in_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_enc_final(const u64 gcm_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_dec_final(const u64 gcm_addr)
+{
+ return -ENODEV;
+}
+
+static inline int versal_pm_aes_init(void)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 15fdbd089bbf..d70dcd462b44 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp-ufs.h>
+#include <linux/firmware/xlnx-zynqmp-crypto.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -589,9 +590,7 @@ int zynqmp_pm_release_node(const u32 node);
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
-int zynqmp_pm_aes_engine(const u64 address, u32 *out);
int zynqmp_pm_efuse_access(const u64 address, u32 *out);
-int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
int zynqmp_pm_fpga_get_config_status(u32 *value);
@@ -772,22 +771,11 @@ static inline int zynqmp_pm_set_requirement(const u32 node,
return -ENODEV;
}
-static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
{
return -ENODEV;
}
-static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
- const u32 flags)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index b3b53f8c1b28..171982e53c9a 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
-#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/const.h>
#include <linux/limits.h>
@@ -10,10 +9,9 @@
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
#define __RENAME(x) __asm__(#x)
-#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
-#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
-#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
- FIELD_PREP(GENMASK(7, 1), func))
+#define FORTIFY_REASON_DIR(r) ((r) & 1)
+#define FORTIFY_REASON_FUNC(r) ((r) >> 1)
+#define FORTIFY_REASON(func, write) ((func) << 1 | (write))
/* Overridden by KUnit tests. */
#ifndef fortify_panic
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
index 2b85fe9e7f9a..b1e575665fc5 100644
--- a/include/linux/framer/framer.h
+++ b/include/linux/framer/framer.h
@@ -96,7 +96,7 @@ struct framer {
#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
int framer_pm_runtime_get(struct framer *framer);
int framer_pm_runtime_get_sync(struct framer *framer);
-int framer_pm_runtime_put(struct framer *framer);
+void framer_pm_runtime_put(struct framer *framer);
int framer_pm_runtime_put_sync(struct framer *framer);
int framer_init(struct framer *framer);
int framer_exit(struct framer *framer);
@@ -124,9 +124,8 @@ static inline int framer_pm_runtime_get_sync(struct framer *framer)
return -ENOSYS;
}
-static inline int framer_pm_runtime_put(struct framer *framer)
+static inline void framer_pm_runtime_put(struct framer *framer)
{
- return -ENOSYS;
}
static inline int framer_pm_runtime_put_sync(struct framer *framer)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f5c9cf28c4dc..2e4d1e8b0e71 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -631,6 +631,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_MGTIME 0x0020
#define IOP_CACHED_LINK 0x0040
#define IOP_FASTPERM_MAY_EXEC 0x0080
+#define IOP_FLCTX 0x0100
/*
* Inode state bits. Protected by inode->i_lock
@@ -1717,6 +1718,13 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
struct timespec64 simple_inode_init_ts(struct inode *inode);
+static inline int inode_time_dirty_flag(struct inode *inode)
+{
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
+ return I_DIRTY_TIME;
+ return I_DIRTY_SYNC;
+}
+
/*
* Snapshotting support.
*/
@@ -1855,6 +1863,8 @@ struct dir_context {
* INT_MAX unlimited
*/
int count;
+ /* @actor supports these flags in d_type high bits */
+ unsigned int dt_flags_mask;
};
/* If OR-ed with d_type, pending signals are not checked */
@@ -1983,6 +1993,11 @@ int wrap_directory_iterator(struct file *, struct dir_context *,
static int shared_##x(struct file *file , struct dir_context *ctx) \
{ return wrap_directory_iterator(file, ctx, x); }
+enum fs_update_time {
+ FS_UPD_ATIME,
+ FS_UPD_CMTIME,
+};
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
@@ -2010,7 +2025,9 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, int);
+ int (*update_time)(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
+ void (*sync_lazytime)(struct inode *inode);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
@@ -2237,16 +2254,8 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-enum file_time_flags {
- S_ATIME = 1,
- S_MTIME = 2,
- S_CTIME = 4,
- S_VERSION = 8,
-};
-
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
-int inode_update_time(struct inode *inode, int flags);
static inline void file_accessed(struct file *file)
{
@@ -2274,8 +2283,6 @@ struct file_system_type {
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
- struct dentry *(*mount) (struct file_system_type *, int,
- const char *, void *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
@@ -2399,8 +2406,10 @@ static inline void super_set_sysfs_name_generic(struct super_block *sb, const ch
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
void iput_not_last(struct inode *);
-int inode_update_timestamps(struct inode *inode, int flags);
-int generic_update_time(struct inode *, int);
+int inode_update_time(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
+int generic_update_time(struct inode *inode, enum fs_update_time type,
+ unsigned int flags);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2409,14 +2418,19 @@ extern struct kobject *fs_kobj;
/* fs/open.c */
struct audit_names;
-struct filename {
+
+struct __filename_head {
const char *name; /* pointer to actual string */
- const __user char *uptr; /* original userland pointer */
- atomic_t refcnt;
+ int refcnt;
struct audit_names *aname;
- const char iname[];
+};
+#define EMBEDDED_NAME_MAX (192 - sizeof(struct __filename_head))
+struct filename {
+ struct __filename_head;
+ const char iname[EMBEDDED_NAME_MAX];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+static_assert(sizeof(struct filename) % 64 == 0);
static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
@@ -2457,7 +2471,7 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_open_nonotify(const struct path *path, int flags,
const struct cred *cred);
-struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+struct file *dentry_create(struct path *path, int flags, umode_t mode,
const struct cred *cred);
const struct path *backing_file_user_path(const struct file *f);
@@ -2511,11 +2525,23 @@ static inline struct filename *getname_maybe_null(const char __user *name, int f
extern void putname(struct filename *name);
DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
-static inline struct filename *refname(struct filename *name)
-{
- atomic_inc(&name->refcnt);
- return name;
-}
+struct delayed_filename {
+ struct filename *__incomplete_filename; // don't touch
+};
+#define INIT_DELAYED_FILENAME(ptr) \
+ ((void)(*(ptr) = (struct delayed_filename){}))
+int delayed_getname(struct delayed_filename *, const char __user *);
+int delayed_getname_uflags(struct delayed_filename *v, const char __user *, int);
+void dismiss_delayed_filename(struct delayed_filename *);
+int putname_to_delayed(struct delayed_filename *, struct filename *);
+struct filename *complete_getname(struct delayed_filename *);
+
+DEFINE_CLASS(filename, struct filename *, putname(_T), getname(p), const char __user *p)
+EXTEND_CLASS(filename, _kernel, getname_kernel(p), const char *p)
+EXTEND_CLASS(filename, _flags, getname_flags(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _uflags, getname_uflags(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _maybe_null, getname_maybe_null(p, f), const char __user *p, unsigned int f)
+EXTEND_CLASS(filename, _complete_delayed, complete_getname(p), struct delayed_filename *p)
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
@@ -2534,10 +2560,8 @@ static inline int finish_open_simple(struct file *file, int error)
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
-extern struct kmem_cache *names_cachep;
-
-#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname() kmalloc(PATH_MAX, GFP_KERNEL)
+#define __putname(name) kfree(name)
void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
@@ -2657,6 +2681,11 @@ static inline int path_permission(const struct path *path, int mask)
int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode);
+int may_delete_dentry(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *victim, bool isdir);
+int may_create_dentry(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *child);
+
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
@@ -3217,7 +3246,6 @@ extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode);
-extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
@@ -3524,7 +3552,9 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
+ unsigned int dt_mask = S_DT_MASK | ctx->dt_flags_mask;
+
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type & dt_mask);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
index 6bd3009e09b3..fa7638b81246 100644
--- a/include/linux/fs/super_types.h
+++ b/include/linux/fs/super_types.h
@@ -35,6 +35,7 @@ struct user_namespace;
struct workqueue_struct;
struct writeback_control;
struct xattr_handler;
+struct fserror_event;
extern struct super_block *blockdev_superblock;
@@ -96,7 +97,6 @@ struct super_operations {
const void *owner);
int (*unfreeze_fs)(struct super_block *sb);
int (*statfs)(struct dentry *dentry, struct kstatfs *kstatfs);
- int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin)(struct super_block *sb);
int (*show_options)(struct seq_file *seq, struct dentry *dentry);
@@ -124,6 +124,9 @@ struct super_operations {
*/
int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
void (*shutdown)(struct super_block *sb);
+
+ /* Report a filesystem error */
+ void (*report_error)(const struct fserror_event *event);
};
struct super_block {
@@ -268,6 +271,9 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
long s_min_writeback_pages;
+
+ /* number of fserrors that are being sent to fsnotify/filesystems */
+ refcount_t s_pending_errors;
} __randomize_layout;
/*
diff --git a/include/linux/fserror.h b/include/linux/fserror.h
new file mode 100644
index 000000000000..5e1ad78c346e
--- /dev/null
+++ b/include/linux/fserror.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef _LINUX_FSERROR_H__
+#define _LINUX_FSERROR_H__
+
+void fserror_mount(struct super_block *sb);
+void fserror_unmount(struct super_block *sb);
+
+enum fserror_type {
+ /* pagecache I/O failed */
+ FSERR_BUFFERED_READ,
+ FSERR_BUFFERED_WRITE,
+
+ /* direct I/O failed */
+ FSERR_DIRECTIO_READ,
+ FSERR_DIRECTIO_WRITE,
+
+ /* out of band media error reported */
+ FSERR_DATA_LOST,
+
+ /* filesystem metadata */
+ FSERR_METADATA,
+};
+
+struct fserror_event {
+ struct work_struct work;
+ struct super_block *sb;
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ enum fserror_type type;
+
+ /* negative error number */
+ int error;
+};
+
+void fserror_report(struct super_block *sb, struct inode *inode,
+ enum fserror_type type, loff_t pos, u64 len, int error,
+ gfp_t gfp);
+
+static inline void fserror_report_io(struct inode *inode,
+ enum fserror_type type, loff_t pos,
+ u64 len, int error, gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, type, pos, len, error, gfp);
+}
+
+static inline void fserror_report_data_lost(struct inode *inode, loff_t pos,
+ u64 len, gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, FSERR_DATA_LOST, pos, len, -EIO,
+ gfp);
+}
+
+static inline void fserror_report_file_metadata(struct inode *inode, int error,
+ gfp_t gfp)
+{
+ fserror_report(inode->i_sb, inode, FSERR_METADATA, 0, 0, error, gfp);
+}
+
+static inline void fserror_report_metadata(struct super_block *sb, int error,
+ gfp_t gfp)
+{
+ fserror_report(sb, NULL, FSERR_METADATA, 0, 0, error, gfp);
+}
+
+static inline void fserror_report_shutdown(struct super_block *sb, gfp_t gfp)
+{
+ fserror_report(sb, NULL, FSERR_METADATA, 0, 0, -ESHUTDOWN, gfp);
+}
+
+#endif /* _LINUX_FSERROR_H__ */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0d954ea7b179..95985400d3d8 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -553,7 +553,7 @@ struct fsnotify_mark_connector {
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
- struct hlist_head list;
+ struct hlist_head list; /* List of marks */
};
/*
@@ -562,6 +562,9 @@ struct fsnotify_mark_connector {
*/
struct fsnotify_sb_info {
struct fsnotify_mark_connector __rcu *sb_marks;
+ /* List of connectors for inode marks */
+ struct list_head inode_conn_list;
+ spinlock_t list_lock; /* Lock protecting inode_conn_list */
/*
* Number of inode/mount/sb objects that are being watched in this sb.
* Note that inodes objects are currently double-accounted.
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 5bc7280425a7..fed91023bea9 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -31,13 +31,6 @@ struct fsverity_info;
/* Verity operations for filesystems */
struct fsverity_operations {
/**
- * The offset of the pointer to struct fsverity_info in the
- * filesystem-specific part of the inode, relative to the beginning of
- * the common part of the inode (the 'struct inode').
- */
- ptrdiff_t inode_info_offs;
-
- /**
* Begin enabling verity on the given file.
*
* @filp: a readonly file descriptor for the file
@@ -97,10 +90,6 @@ struct fsverity_operations {
*
* @inode: the inode
* @index: 0-based index of the page within the Merkle tree
- * @num_ra_pages: The number of Merkle tree pages that should be
- * prefetched starting at @index if the page at @index
- * isn't already cached. Implementations may ignore this
- * argument; it's only a performance optimization.
*
* This can be called at any time on an open verity file. It may be
* called by multiple processes concurrently, even with the same page.
@@ -110,13 +99,28 @@ struct fsverity_operations {
* Return: the page on success, ERR_PTR() on failure
*/
struct page *(*read_merkle_tree_page)(struct inode *inode,
- pgoff_t index,
- unsigned long num_ra_pages);
+ pgoff_t index);
+
+ /**
+ * Perform readahead of a Merkle tree for the given inode.
+ *
+ * @inode: the inode
+ * @index: 0-based index of the first page within the Merkle tree
+ * @nr_pages: number of pages to be read ahead.
+ *
+ * This can be called at any time on an open verity file. It may be
+ * called by multiple processes concurrently, even with the same range.
+ *
+ * Optional method so that ->read_merkle_tree_page preferably finds
+ * cached data instead of issuing dependent I/O.
+ */
+ void (*readahead_merkle_tree)(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
/**
- * Write a Merkle tree block to the given inode.
+ * Write a Merkle tree block to the given file.
*
- * @inode: the inode for which the Merkle tree is being built
+ * @file: the file for which the Merkle tree is being built
* @buf: the Merkle tree block to write
* @pos: the position of the block in the Merkle tree (in bytes)
* @size: the Merkle tree block size (in bytes)
@@ -126,43 +130,48 @@ struct fsverity_operations {
*
* Return: 0 on success, -errno on failure
*/
- int (*write_merkle_tree_block)(struct inode *inode, const void *buf,
+ int (*write_merkle_tree_block)(struct file *file, const void *buf,
u64 pos, unsigned int size);
};
#ifdef CONFIG_FS_VERITY
-
-/*
- * Returns the address of the verity info pointer within the filesystem-specific
- * part of the inode. (To save memory on filesystems that don't support
- * fsverity, a field in 'struct inode' itself is no longer used.)
+/**
+ * fsverity_active() - do reads from the inode need to go through fs-verity?
+ * @inode: inode to check
+ *
+ * This checks whether the inode's verity info has been set, and reads need
+ * to verify the file data.
+ *
+ * Return: true if reads need to go through fs-verity, otherwise false
*/
-static inline struct fsverity_info **
-fsverity_info_addr(const struct inode *inode)
+static inline bool fsverity_active(const struct inode *inode)
{
- VFS_WARN_ON_ONCE(inode->i_sb->s_vop->inode_info_offs == 0);
- return (void *)inode + inode->i_sb->s_vop->inode_info_offs;
+ if (IS_VERITY(inode)) {
+ /*
+ * This pairs with the try_cmpxchg in set_mask_bits()
+ * used to set the S_VERITY bit in i_flags.
+ */
+ smp_mb();
+ return true;
+ }
+
+ return false;
}
+struct fsverity_info *__fsverity_get_info(const struct inode *inode);
+/**
+ * fsverity_get_info - get fsverity information for an inode
+ * @inode: inode to operate on.
+ *
+ * This gets the fsverity_info for @inode if it exists. Safe to call without
+ * knowin that a fsverity_info exist for @inode, including on file systems that
+ * do not support fsverity.
+ */
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
- /*
- * Since this function can be called on inodes belonging to filesystems
- * that don't support fsverity at all, and fsverity_info_addr() doesn't
- * work on such filesystems, we have to start with an IS_VERITY() check.
- * Checking IS_VERITY() here is also useful to minimize the overhead of
- * fsverity_active() on non-verity files.
- */
- if (!IS_VERITY(inode))
+ if (!fsverity_active(inode))
return NULL;
-
- /*
- * Pairs with the cmpxchg_release() in fsverity_set_info(). I.e.,
- * another task may publish the inode's verity info concurrently,
- * executing a RELEASE barrier. Use smp_load_acquire() here to safely
- * ACQUIRE the memory the other task published.
- */
- return smp_load_acquire(fsverity_info_addr(inode));
+ return __fsverity_get_info(inode);
}
/* enable.c */
@@ -179,27 +188,6 @@ int fsverity_get_digest(struct inode *inode,
/* open.c */
int __fsverity_file_open(struct inode *inode, struct file *filp);
-int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
-void __fsverity_cleanup_inode(struct inode *inode);
-
-/**
- * fsverity_cleanup_inode() - free the inode's verity info, if present
- * @inode: an inode being evicted
- *
- * Filesystems must call this on inode eviction to free the inode's verity info.
- */
-static inline void fsverity_cleanup_inode(struct inode *inode)
-{
- /*
- * Only IS_VERITY() inodes can have verity info, so start by checking
- * for IS_VERITY() (which is faster than retrieving the pointer to the
- * verity info). This minimizes overhead for non-verity inodes.
- */
- if (IS_VERITY(inode))
- __fsverity_cleanup_inode(inode);
- else
- VFS_WARN_ON_ONCE(*fsverity_info_addr(inode) != NULL);
-}
/* read_metadata.c */
@@ -207,12 +195,18 @@ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
-void fsverity_verify_bio(struct bio *bio);
+bool fsverity_verify_blocks(struct fsverity_info *vi, struct folio *folio,
+ size_t len, size_t offset);
+void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
#else /* !CONFIG_FS_VERITY */
+static inline bool fsverity_active(const struct inode *inode)
+{
+ return false;
+}
+
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
return NULL;
@@ -251,16 +245,6 @@ static inline int __fsverity_file_open(struct inode *inode, struct file *filp)
return -EOPNOTSUPP;
}
-static inline int __fsverity_prepare_setattr(struct dentry *dentry,
- struct iattr *attr)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fsverity_cleanup_inode(struct inode *inode)
-{
-}
-
/* read_metadata.c */
static inline int fsverity_ioctl_read_metadata(struct file *filp,
@@ -271,14 +255,16 @@ static inline int fsverity_ioctl_read_metadata(struct file *filp,
/* verify.c */
-static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+static inline bool fsverity_verify_blocks(struct fsverity_info *vi,
+ struct folio *folio, size_t len,
size_t offset)
{
WARN_ON_ONCE(1);
return false;
}
-static inline void fsverity_verify_bio(struct bio *bio)
+static inline void fsverity_verify_bio(struct fsverity_info *vi,
+ struct bio *bio)
{
WARN_ON_ONCE(1);
}
@@ -290,32 +276,16 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
#endif /* !CONFIG_FS_VERITY */
-static inline bool fsverity_verify_folio(struct folio *folio)
-{
- return fsverity_verify_blocks(folio, folio_size(folio), 0);
-}
-
-static inline bool fsverity_verify_page(struct page *page)
+static inline bool fsverity_verify_folio(struct fsverity_info *vi,
+ struct folio *folio)
{
- return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+ return fsverity_verify_blocks(vi, folio, folio_size(folio), 0);
}
-/**
- * fsverity_active() - do reads from the inode need to go through fs-verity?
- * @inode: inode to check
- *
- * This checks whether the inode's verity info has been set.
- *
- * Filesystems call this from ->readahead() to check whether the pages need to
- * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
- * a race condition where the file is being read concurrently with
- * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before the verity info.)
- *
- * Return: true if reads need to go through fs-verity, otherwise false
- */
-static inline bool fsverity_active(const struct inode *inode)
+static inline bool fsverity_verify_page(struct fsverity_info *vi,
+ struct page *page)
{
- return fsverity_get_info(inode) != NULL;
+ return fsverity_verify_blocks(vi, page_folio(page), PAGE_SIZE, 0);
}
/**
@@ -338,22 +308,12 @@ static inline int fsverity_file_open(struct inode *inode, struct file *filp)
return 0;
}
-/**
- * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
- * @dentry: dentry through which the inode is being changed
- * @attr: attributes to change
- *
- * Verity files are immutable, so deny truncates. This isn't covered by the
- * open-time check because sys_truncate() takes a path, not a file descriptor.
- *
- * Return: 0 on success, -errno on failure
- */
-static inline int fsverity_prepare_setattr(struct dentry *dentry,
- struct iattr *attr)
-{
- if (IS_VERITY(d_inode(dentry)))
- return __fsverity_prepare_setattr(dentry, attr);
- return 0;
-}
+void fsverity_cleanup_inode(struct inode *inode);
+void fsverity_readahead(struct fsverity_info *vi, pgoff_t index,
+ unsigned long nr_pages);
+
+struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index);
+void generic_readahead_merkle_tree(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
#endif /* _LINUX_FSVERITY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a3a8989e3268..1a4d36fc9085 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,16 +82,19 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+struct ftrace_func_entry;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym);
+ unsigned long *off, char **modname,
+ const unsigned char **modbuildid, char *sym);
#else
static inline int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
- unsigned long *off, char **modname, char *sym)
+ unsigned long *off, char **modname,
+ const unsigned char **modbuildid, char *sym)
{
return 0;
}
@@ -359,7 +362,6 @@ enum {
FTRACE_OPS_FL_DIRECT = BIT(17),
FTRACE_OPS_FL_SUBOP = BIT(18),
FTRACE_OPS_FL_GRAPH = BIT(19),
- FTRACE_OPS_FL_JMP = BIT(20),
};
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -403,9 +405,17 @@ enum ftrace_ops_cmd {
* Negative on failure. The return value is dependent on the
* callback.
*/
-typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, unsigned long ip, enum ftrace_ops_cmd cmd);
#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_HASH_DEFAULT_BITS 10
+
+struct ftrace_hash *alloc_ftrace_hash(int size_bits);
+void free_ftrace_hash(struct ftrace_hash *hash);
+struct ftrace_func_entry *add_ftrace_hash_entry_direct(struct ftrace_hash *hash,
+ unsigned long ip, unsigned long direct);
+
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
struct ftrace_hash __rcu *notrace_hash;
@@ -535,6 +545,10 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash);
+int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash);
+int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock);
+
void ftrace_stub_direct_tramp(void);
#else
@@ -561,6 +575,21 @@ static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned l
return -ENODEV;
}
+static inline int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ return -ENODEV;
+}
+
+static inline int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ return -ENODEV;
+}
+
+static inline int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock)
+{
+ return -ENODEV;
+}
+
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
index 15627ceea9bc..386fa48c4a95 100644
--- a/include/linux/ftrace_regs.h
+++ b/include/linux/ftrace_regs.h
@@ -33,6 +33,31 @@ struct ftrace_regs;
#define ftrace_regs_get_frame_pointer(fregs) \
frame_pointer(&arch_ftrace_regs(fregs)->regs)
+static __always_inline void
+ftrace_partial_regs_update(struct ftrace_regs *fregs, struct pt_regs *regs) { }
+
+#else
+
+/*
+ * ftrace_partial_regs_update - update the original ftrace_regs from regs
+ * @fregs: The ftrace_regs to update from @regs
+ * @regs: The partial regs from ftrace_partial_regs() that was updated
+ *
+ * Some architectures have the partial regs living in the ftrace_regs
+ * structure, whereas other architectures need to make a different copy
+ * of the @regs. If a partial @regs is retrieved by ftrace_partial_regs() and
+ * if the code using @regs updates a field (like the instruction pointer or
+ * stack pointer) it may need to propagate that change to the original @fregs
+ * it retrieved the partial @regs from. Use this function to guarantee that
+ * update happens.
+ */
+static __always_inline void
+ftrace_partial_regs_update(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ ftrace_regs_set_instruction_pointer(fregs, instruction_pointer(regs));
+ ftrace_regs_set_return_value(fregs, regs_return_value(regs));
+}
+
#endif /* HAVE_ARCH_FTRACE_REGS */
/* This can be overridden by the architectures */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
deleted file mode 100644
index c304dcdb4eac..000000000000
--- a/include/linux/getcpu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GETCPU_H
-#define _LINUX_GETCPU_H 1
-
-/* Cache for getcpu() to speed it up. Results might be a short time
- out of date, but will be faster.
-
- User programs should not refer to the contents of this structure.
- I repeat they should not refer to it. If they do they will break
- in future kernels.
-
- It is only a private cache for vgetcpu(). It will change in future kernels.
- The user program must store this information per thread (__thread)
- If you want 100% accurate information pass NULL instead. */
-struct getcpu_cache {
- unsigned long blob[128 / sizeof(long)];
-};
-
-#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b155929af5b1..6ecf6dda93e0 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -407,9 +407,15 @@ extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
+/* A helper for checking if gfp includes all the specified flags */
+static inline bool gfp_has_flags(gfp_t gfp, gfp_t flags)
+{
+ return (gfp & flags) == flags;
+}
+
static inline bool gfp_has_io_fs(gfp_t gfp)
{
- return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+ return gfp_has_flags(gfp, __GFP_IO | __GFP_FS);
}
/*
@@ -430,39 +436,29 @@ typedef unsigned int __bitwise acr_flags_t;
#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
/* The below functions must be run on a range from a single zone. */
-extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- acr_flags_t alloc_flags, gfp_t gfp_mask);
-#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
-
-extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask);
-#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
-
-#endif
+int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_frozen_range(...) \
+ alloc_hooks(alloc_contig_frozen_range_noprof(__VA_ARGS__))
+
+int alloc_contig_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_range(...) \
+ alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
+
+struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages,
+ gfp_t gfp_mask, int nid, nodemask_t *nodemask);
+#define alloc_contig_frozen_pages(...) \
+ alloc_hooks(alloc_contig_frozen_pages_noprof(__VA_ARGS__))
+
+struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
+#define alloc_contig_pages(...) \
+ alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
+
+void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages);
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
-
-#ifdef CONFIG_CONTIG_ALLOC
-static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
- int nid, nodemask_t *node)
-{
- struct page *page;
-
- if (WARN_ON(!order || !(gfp & __GFP_COMP)))
- return NULL;
-
- page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
-
- return page ? page_folio(page) : NULL;
-}
-#else
-static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
- int nid, nodemask_t *node)
-{
- return NULL;
-}
#endif
-/* This should be paired with folio_put() rather than free_contig_range(). */
-#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 3de43b12209e..814bb2892f99 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -309,8 +309,10 @@ enum {
*
* %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
* watermark is applied to allow access to "atomic reserves".
- * The current implementation doesn't support NMI and few other strict
- * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ * The current implementation doesn't support NMI, nor contexts that disable
+ * preemption under PREEMPT_RT. This includes raw_spin_lock() and plain
+ * preempt_disable() - see "Memory allocation" in
+ * Documentation/core-api/real-time/differences.rst for more info.
*
* %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
@@ -321,6 +323,7 @@ enum {
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback. It is very
* likely to fail to allocate memory, even for very small allocations.
+ * The same restrictions on calling contexts apply as for %GFP_ATOMIC.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index cafeb7a40ad1..0d8408582918 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -607,6 +607,42 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
flags, label);
}
+/**
+ * devm_fwnode_gpiod_get_optional - obtain an optional GPIO from firmware node
+ * @dev: GPIO consumer
+ * @fwnode: handle of the firmware node
+ * @con_id: function within the GPIO consumer
+ * @flags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * This function can be used for drivers that get their configuration
+ * from opaque firmware.
+ *
+ * GPIO descriptors returned from this function are automatically disposed on
+ * driver detach.
+ *
+ * Returns:
+ * The GPIO descriptor corresponding to the optional function @con_id of device
+ * dev, NULL if no GPIO has been assigned to the requested function, or
+ * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
+ */
+static inline
+struct gpio_desc *devm_fwnode_gpiod_get_optional(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+
+ desc = devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+ return NULL;
+
+ return desc;
+}
+
struct acpi_gpio_params {
unsigned int crs_entry_index;
unsigned short line_index;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index abc20f9810fd..af03db851a1d 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -197,15 +197,111 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
}
#endif
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
+#ifndef clear_user_page
+/**
+ * clear_user_page() - clear a page to be mapped to user space
+ * @addr: the address of the page
+ * @vaddr: the address of the user mapping
+ * @page: the page
+ *
+ * We condition the definition of clear_user_page() on the architecture
+ * not having a custom clear_user_highpage(). That's because if there
+ * is some special flushing needed for clear_user_highpage() then it
+ * is likely that clear_user_page() also needs some magic. And, since
+ * our only caller is the generic clear_user_highpage(), not defining
+ * is not much of a loss.
+ */
+static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
+{
+ clear_page(addr);
+}
+#endif
+
+/**
+ * clear_user_pages() - clear a page range to be mapped to user space
+ * @addr: start address
+ * @vaddr: start address of the user mapping
+ * @page: start page
+ * @npages: number of pages
+ *
+ * Assumes that the region (@addr, +@npages) has been validated
+ * already so this does no exception handling.
+ *
+ * If the architecture provides a clear_user_page(), use that;
+ * otherwise, we can safely use clear_pages().
+ */
+static inline void clear_user_pages(void *addr, unsigned long vaddr,
+ struct page *page, unsigned int npages)
+{
+
+#ifdef clear_user_page
+ do {
+ clear_user_page(addr, vaddr, page);
+ addr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+#else
+ /*
+ * Prefer clear_pages() to allow for architectural optimizations
+ * when operating on contiguous page ranges.
+ */
+ clear_pages(addr, npages);
+#endif
+}
+
+/**
+ * clear_user_highpage() - clear a page to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ *
+ * With !CONFIG_HIGHMEM this (and the copy_user_highpage() below) will
+ * be plain clear_user_page() (and copy_user_page()).
+ */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
kunmap_local(addr);
}
+#endif /* clear_user_highpage */
+
+/**
+ * clear_user_highpages() - clear a page range to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ * @npages: number of pages
+ *
+ * Assumes that all the pages in the region (@page, +@npages) are valid
+ * so this does no exception handling.
+ */
+static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
+ unsigned int npages)
+{
+
+#if defined(clear_user_highpage) || defined(CONFIG_HIGHMEM)
+ /*
+ * An architecture defined clear_user_highpage() implies special
+ * handling is needed.
+ *
+ * So we use that or, the generic variant if CONFIG_HIGHMEM is
+ * enabled.
+ */
+ do {
+ clear_user_highpage(page, vaddr);
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+#else
+
+ /*
+ * Prefer clear_user_pages() to allow for architectural optimizations
+ * when operating on contiguous page ranges.
+ */
+ clear_user_pages(page_address(page), vaddr, page, npages);
#endif
+}
#ifndef vma_alloc_zeroed_movable_folio
/**
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
deleted file mode 100644
index 07414c241e65..000000000000
--- a/include/linux/hippidevice.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Definitions for the HIPPI handlers.
- *
- * Version: @(#)hippidevice.h 1.0.0 05/26/97
- *
- * Author: Jes Sorensen, <Jes.Sorensen@cern.ch>
- *
- * hippidevice.h is based on previous fddidevice.h work by
- * Ross Biro
- * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- * Alan Cox, <gw4pts@gw4pts.ampr.org>
- * Lawrence V. Stefani, <stefani@lkg.dec.com>
- */
-#ifndef _LINUX_HIPPIDEVICE_H
-#define _LINUX_HIPPIDEVICE_H
-
-#include <linux/if_hippi.h>
-
-#ifdef __KERNEL__
-
-struct neigh_parms;
-struct net_device;
-struct sk_buff;
-
-struct hippi_cb {
- __u32 ifield;
-};
-
-__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-int hippi_mac_addr(struct net_device *dev, void *p);
-int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-struct net_device *alloc_hippi_dev(int sizeof_priv);
-#endif
-
-#endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index ca1ec437a3ca..51a6dc2b97e9 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -447,12 +447,16 @@ struct hisi_qp_ops {
int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
};
+struct instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
+};
+
struct hisi_qp {
u32 qp_id;
u16 sq_depth;
u16 cq_depth;
u8 alg_type;
- u8 req_type;
struct qm_dma qdma;
void *sqe;
@@ -462,7 +466,6 @@ struct hisi_qp {
struct hisi_qp_status qp_status;
struct hisi_qp_ops *hw_ops;
- void *qp_ctx;
void (*req_cb)(struct hisi_qp *qp, void *data);
void (*event_cb)(struct hisi_qp *qp);
@@ -471,6 +474,11 @@ struct hisi_qp {
bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
+
+ u32 ref_count;
+ spinlock_t qp_lock;
+ struct instance_backlog backlog;
+ const void **msg;
};
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
@@ -563,6 +571,7 @@ void hisi_qm_reset_done(struct pci_dev *pdev);
int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op);
+int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
@@ -575,7 +584,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
void hisi_acc_free_sgl_pool(struct device *dev,
struct hisi_acc_sgl_pool *pool);
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
- u8 alg_type, int node, struct hisi_qp **qps);
+ u8 *alg_type, int node, struct hisi_qp **qps);
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 9fa9c30a34e6..5e7a63143a4a 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -380,7 +380,7 @@ struct host1x_driver {
struct list_head list;
int (*probe)(struct host1x_device *device);
- int (*remove)(struct host1x_device *device);
+ void (*remove)(struct host1x_device *device);
void (*shutdown)(struct host1x_device *device);
};
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 0de12f14d6a4..74adbd4e7003 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -112,12 +112,6 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
}
-static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
-{
- timer->node.expires = tv64;
- timer->_softexpires = tv64;
-}
-
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = ktime_add_safe(timer->node.expires, time);
@@ -140,15 +134,6 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
return timer->_softexpires;
}
-static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
-{
- return timer->node.expires;
-}
-static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
-{
- return timer->_softexpires;
-}
-
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->node.expires);
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index aa49ffa130e5..02b010df6570 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -6,26 +6,6 @@
#include <linux/timerqueue.h>
#include <linux/seqlock.h>
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-# define HIGH_RES_NSEC 1
-# define KTIME_HIGH_RES (HIGH_RES_NSEC)
-# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
-
-#else
-
-# define MONOTONIC_RES_NSEC LOW_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-
-#endif
-
#ifdef CONFIG_64BIT
# define __hrtimer_clock_base_align ____cacheline_aligned
#else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 019a1c5281e4..94a03591990c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -171,11 +171,11 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
+extern int movable_gigantic_pages __read_mostly;
extern int sysctl_hugetlb_shm_group __read_mostly;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
void hugetlb_bootmem_alloc(void);
-bool hugetlb_bootmem_allocated(void);
extern nodemask_t hugetlb_bootmem_nodes;
void hugetlb_bootmem_set_nodes(void);
@@ -240,8 +240,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
+int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
@@ -279,6 +280,8 @@ void fixup_hugetlb_reservations(struct vm_area_struct *vma);
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+unsigned int arch_hugetlb_cma_order(void);
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
@@ -300,13 +303,17 @@ static inline struct address_space *hugetlb_folio_mapping_lock_write(
return NULL;
}
-static inline int huge_pmd_unshare(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline int huge_pmd_unshare(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
return 0;
}
+static inline void huge_pmd_unshare_flush(struct mmu_gather *tlb,
+ struct vm_area_struct *vma)
+{
+}
+
static inline void adjust_range_if_pmd_sharing_possible(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
@@ -924,7 +931,7 @@ static inline bool hugepage_movable_supported(struct hstate *h)
if (!hugepage_migration_supported(h))
return false;
- if (hstate_is_gigantic(h))
+ if (hstate_is_gigantic(h) && !movable_gigantic_pages)
return false;
return true;
}
@@ -1298,11 +1305,6 @@ static inline bool hugetlbfs_pagecache_present(
static inline void hugetlb_bootmem_alloc(void)
{
}
-
-static inline bool hugetlb_bootmem_allocated(void)
-{
- return false;
-}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -1316,9 +1318,9 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
}
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
-extern void __init hugetlb_cma_reserve(int order);
+extern void __init hugetlb_cma_reserve(void);
#else
-static inline __init void hugetlb_cma_reserve(int order)
+static inline __init void hugetlb_cma_reserve(void)
{
}
#endif
@@ -1326,7 +1328,7 @@ static inline __init void hugetlb_cma_reserve(int order)
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
- return page_count(virt_to_page(pte)) > 1;
+ return ptdesc_pmd_is_shared(virt_to_ptdesc(pte));
}
#else
static inline bool hugetlb_pmd_shared(pte_t *pte)
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index b424555753b1..b77bc55a4cf3 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -15,6 +15,7 @@
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -48,6 +49,7 @@ struct hwrng {
/* internal. */
struct list_head list;
struct kref ref;
+ struct work_struct cleanup_work;
struct completion cleanup_done;
struct completion dying;
};
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 9fcb6410a584..971d53349b6f 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -25,7 +25,7 @@
* @I3C_ERROR_M2: M2 error
*
* These are the standard error codes as defined by the I3C specification.
- * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * When -EIO is returned by the i3c_device_do_i3c_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
* &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
@@ -79,9 +79,6 @@ struct i3c_xfer {
enum i3c_error_code err;
};
-/* keep back compatible */
-#define i3c_priv_xfer i3c_xfer
-
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
@@ -308,15 +305,23 @@ static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
i3c_i2c_driver_unregister, \
__i2cdrv)
+#if IS_ENABLED(CONFIG_I3C)
int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
int nxfers, enum i3c_xfer_mode mode);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
+#else
+static inline int
+i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
+{
+ return -EOPNOTSUPP;
+}
-static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_xfer *xfers,
- int nxfers)
+static inline u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev)
{
- return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+ return 0;
}
+#endif
int i3c_device_do_setdasa(struct i3c_device *dev);
@@ -358,6 +363,5 @@ int i3c_device_request_ibi(struct i3c_device *dev,
void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
-u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 58d01ed4cce7..592b646f6134 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -462,6 +462,8 @@ struct i3c_bus {
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
* @set_speed: adjust I3C open drain mode timing.
+ * @set_dev_nack_retry: configure device NACK retry count for the master
+ * controller.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -491,6 +493,8 @@ struct i3c_master_controller_ops {
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
+ int (*set_dev_nack_retry)(struct i3c_master_controller *master,
+ unsigned long dev_nack_retry_cnt);
};
/**
@@ -505,6 +509,8 @@ struct i3c_master_controller_ops {
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
* @hotjoin: true if the master support hotjoin
+ * @rpm_allowed: true if Runtime PM allowed
+ * @rpm_ibi_allowed: true if IBI and Hot-Join allowed while runtime suspended
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
@@ -514,6 +520,7 @@ struct i3c_master_controller_ops {
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
* be done from a sleep-able context
+ * @dev_nack_retry_count: retry count when slave device nack
*
* A &struct i3c_master_controller has to be registered to the I3C subsystem
* through i3c_master_register(). None of &struct i3c_master_controller fields
@@ -528,12 +535,15 @@ struct i3c_master_controller {
unsigned int secondary : 1;
unsigned int init_done : 1;
unsigned int hotjoin: 1;
+ unsigned int rpm_allowed: 1;
+ unsigned int rpm_ibi_allowed: 1;
struct {
struct list_head i3c;
struct list_head i2c;
} boardinfo;
struct i3c_bus bus;
struct workqueue_struct *wq;
+ unsigned int dev_nack_retry_count;
};
/**
@@ -595,6 +605,7 @@ int i3c_master_get_free_addr(struct i3c_master_controller *master,
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr);
int i3c_master_do_daa(struct i3c_master_controller *master);
+int i3c_master_do_daa_ext(struct i3c_master_controller *master, bool rstdaa);
struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
size_t len, bool force_bounce,
enum dma_data_direction dir);
diff --git a/include/linux/ieee80211-eht.h b/include/linux/ieee80211-eht.h
index f9782e46c5e5..f8e9f5d36d2a 100644
--- a/include/linux/ieee80211-eht.h
+++ b/include/linux/ieee80211-eht.h
@@ -558,6 +558,17 @@ struct ieee80211_mle_tdls_common_info {
#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+#define IEEE80211_EML_CTRL_EMLSR_MODE BIT(0)
+#define IEEE80211_EML_CTRL_EMLMR_MODE BIT(1)
+#define IEEE80211_EML_CTRL_EMLSR_PARAM_UPDATE BIT(2)
+#define IEEE80211_EML_CTRL_INDEV_COEX_ACT BIT(3)
+
+#define IEEE80211_EML_EMLSR_PAD_DELAY 0x07
+#define IEEE80211_EML_EMLSR_TRANS_DELAY 0x38
+
+#define IEEE80211_EML_EMLMR_RX_MCS_MAP 0xf0
+#define IEEE80211_EML_EMLMR_TX_MCS_MAP 0x0f
+
/* no fixed fields in PRIO_ACCESS */
/**
@@ -1179,4 +1190,4 @@ static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
_data + ieee80211_mle_common_size(_data),\
_len - ieee80211_mle_common_size(_data))
-#endif /* LINUX_IEEE80211_H */
+#endif /* LINUX_IEEE80211_EHT_H */
diff --git a/include/linux/ieee80211-s1g.h b/include/linux/ieee80211-s1g.h
index 5b9ed2dcc00e..22dde4cbc1b0 100644
--- a/include/linux/ieee80211-s1g.h
+++ b/include/linux/ieee80211-s1g.h
@@ -572,4 +572,4 @@ static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
}
}
-#endif /* LINUX_IEEE80211_H */
+#endif /* LINUX_IEEE80211_S1G_H */
diff --git a/include/linux/ieee80211-uhr.h b/include/linux/ieee80211-uhr.h
new file mode 100644
index 000000000000..132acced7d79
--- /dev/null
+++ b/include/linux/ieee80211-uhr.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 UHR definitions
+ *
+ * Copyright (c) 2025-2026 Intel Corporation
+ */
+#ifndef LINUX_IEEE80211_UHR_H
+#define LINUX_IEEE80211_UHR_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_UHR_OPER_PARAMS_DPS_ENA 0x0001
+#define IEEE80211_UHR_OPER_PARAMS_NPCA_ENA 0x0002
+#define IEEE80211_UHR_OPER_PARAMS_DBE_ENA 0x0004
+#define IEEE80211_UHR_OPER_PARAMS_PEDCA_ENA 0x0008
+
+struct ieee80211_uhr_operation {
+ __le16 params;
+ u8 basic_mcs_nss_set[4];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_UHR_NPCA_PARAMS_PRIMARY_CHAN_OFFS 0x0000000F
+#define IEEE80211_UHR_NPCA_PARAMS_MIN_DUR_THRESH 0x000000F0
+#define IEEE80211_UHR_NPCA_PARAMS_SWITCH_DELAY 0x00003F00
+#define IEEE80211_UHR_NPCA_PARAMS_SWITCH_BACK_DELAY 0x000FC000
+#define IEEE80211_UHR_NPCA_PARAMS_INIT_QSRC 0x00300000
+#define IEEE80211_UHR_NPCA_PARAMS_MOPLEN 0x00400000
+#define IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES 0x00800000
+
+struct ieee80211_uhr_npca_info {
+ __le32 params;
+ __le16 dis_subch_bmap[];
+} __packed;
+
+static inline bool ieee80211_uhr_oper_size_ok(const u8 *data, u8 len,
+ bool beacon)
+{
+ const struct ieee80211_uhr_operation *oper = (const void *)data;
+ u8 needed = sizeof(*oper);
+
+ if (len < needed)
+ return false;
+
+ /* nothing else present in beacons */
+ if (beacon)
+ return true;
+
+ /* FIXME: DPS, DBE, P-EDCA (consider order, also relative to NPCA) */
+
+ if (oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_NPCA_ENA)) {
+ const struct ieee80211_uhr_npca_info *npca =
+ (const void *)oper->variable;
+
+ needed += sizeof(*npca);
+
+ if (len < needed)
+ return false;
+
+ if (npca->params & cpu_to_le32(IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES))
+ needed += sizeof(npca->dis_subch_bmap[0]);
+ }
+
+ return len >= needed;
+}
+
+/*
+ * Note: cannot call this on the element coming from a beacon,
+ * must ensure ieee80211_uhr_oper_size_ok(..., false) first
+ */
+static inline const struct ieee80211_uhr_npca_info *
+ieee80211_uhr_npca_info(const struct ieee80211_uhr_operation *oper)
+{
+ if (!(oper->params & cpu_to_le16(IEEE80211_UHR_OPER_PARAMS_NPCA_ENA)))
+ return NULL;
+
+ /* FIXME: DPS */
+
+ return (const void *)oper->variable;
+}
+
+static inline const __le16 *
+ieee80211_uhr_npca_dis_subch_bitmap(const struct ieee80211_uhr_operation *oper)
+{
+ const struct ieee80211_uhr_npca_info *npca;
+
+ npca = ieee80211_uhr_npca_info(oper);
+ if (!npca)
+ return NULL;
+ if (!(npca->params & cpu_to_le32(IEEE80211_UHR_NPCA_PARAMS_DIS_SUBCH_BMAP_PRES)))
+ return NULL;
+ return npca->dis_subch_bmap;
+}
+
+#define IEEE80211_UHR_MAC_CAP0_DPS_SUPP 0x01
+#define IEEE80211_UHR_MAC_CAP0_DPS_ASSIST_SUPP 0x02
+#define IEEE80211_UHR_MAC_CAP0_DPS_AP_STATIC_HCM_SUPP 0x04
+#define IEEE80211_UHR_MAC_CAP0_NPCA_SUPP 0x10
+#define IEEE80211_UHR_MAC_CAP0_ENH_BSR_SUPP 0x20
+#define IEEE80211_UHR_MAC_CAP0_ADD_MAP_TID_SUPP 0x40
+#define IEEE80211_UHR_MAC_CAP0_EOTSP_SUPP 0x80
+
+#define IEEE80211_UHR_MAC_CAP1_DSO_SUPP 0x01
+#define IEEE80211_UHR_MAC_CAP1_PEDCA_SUPP 0x02
+#define IEEE80211_UHR_MAC_CAP1_DBE_SUPP 0x04
+#define IEEE80211_UHR_MAC_CAP1_UL_LLI_SUPP 0x08
+#define IEEE80211_UHR_MAC_CAP1_P2P_LLI_SUPP 0x10
+#define IEEE80211_UHR_MAC_CAP1_PUO_SUPP 0x20
+#define IEEE80211_UHR_MAC_CAP1_AP_PUO_SUPP 0x40
+#define IEEE80211_UHR_MAC_CAP1_DUO_SUPP 0x80
+
+#define IEEE80211_UHR_MAC_CAP2_OMC_UL_MU_DIS_RX_SUPP 0x01
+#define IEEE80211_UHR_MAC_CAP2_AOM_SUPP 0x02
+#define IEEE80211_UHR_MAC_CAP2_IFCS_LOC_SUPP 0x04
+#define IEEE80211_UHR_MAC_CAP2_UHR_TRS_SUPP 0x08
+#define IEEE80211_UHR_MAC_CAP2_TXSPG_SUPP 0x10
+#define IEEE80211_UHR_MAC_CAP2_TXOP_RET_IN_TXSPG 0x20
+#define IEEE80211_UHR_MAC_CAP2_UHR_OM_PU_TO_LOW 0xC0
+
+#define IEEE80211_UHR_MAC_CAP3_UHR_OM_PU_TO_HIGH 0x03
+#define IEEE80211_UHR_MAC_CAP3_PARAM_UPD_ADV_NOTIF_INTV 0x1C
+#define IEEE80211_UHR_MAC_CAP3_UPD_IND_TIM_INTV_LOW 0xE0
+
+#define IEEE80211_UHR_MAC_CAP4_UPD_IND_TIM_INTV_HIGH 0x03
+#define IEEE80211_UHR_MAC_CAP4_BOUNDED_ESS 0x04
+#define IEEE80211_UHR_MAC_CAP4_BTM_ASSURANCE 0x08
+#define IEEE80211_UHR_MAC_CAP4_CO_BF_SUPP 0x10
+
+#define IEEE80211_UHR_MAC_CAP_DBE_MAX_BW 0x07
+#define IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES 0x08
+#define IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES 0x10
+
+struct ieee80211_uhr_cap_mac {
+ u8 mac_cap[5];
+} __packed;
+
+struct ieee80211_uhr_cap {
+ struct ieee80211_uhr_cap_mac mac;
+ /* DBE, PHY capabilities */
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_SND_NDP_LE80 0x01
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_DL_MU_LE80 0x02
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_SND_NDP_160 0x04
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_DL_MU_160 0x08
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_SND_NDP_320 0x10
+#define IEEE80211_UHR_PHY_CAP_MAX_NSS_RX_DL_MU_320 0x20
+#define IEEE80211_UHR_PHY_CAP_ELR_RX 0x40
+#define IEEE80211_UHR_PHY_CAP_ELR_TX 0x80
+
+struct ieee80211_uhr_cap_phy {
+ u8 cap;
+} __packed;
+
+static inline bool ieee80211_uhr_capa_size_ok(const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_uhr_cap *cap = (const void *)data;
+ size_t needed = sizeof(*cap) + sizeof(struct ieee80211_uhr_cap_phy);
+
+ if (len < needed)
+ return false;
+
+ /*
+ * A non-AP STA does not include the DBE Capability Parameters field
+ * in the UHR MAC Capabilities Information field.
+ */
+ if (from_ap && cap->mac.mac_cap[1] & IEEE80211_UHR_MAC_CAP1_DBE_SUPP) {
+ u8 dbe;
+
+ needed += 1;
+ if (len < needed)
+ return false;
+
+ dbe = cap->variable[0];
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES)
+ needed += 3;
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES)
+ needed += 3;
+ }
+
+ return len >= needed;
+}
+
+static inline const struct ieee80211_uhr_cap_phy *
+ieee80211_uhr_phy_cap(const struct ieee80211_uhr_cap *cap, bool from_ap)
+{
+ u8 offs = 0;
+
+ if (from_ap && cap->mac.mac_cap[1] & IEEE80211_UHR_MAC_CAP1_DBE_SUPP) {
+ u8 dbe = cap->variable[0];
+
+ offs += 1;
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_160_PRES)
+ offs += 3;
+
+ if (dbe & IEEE80211_UHR_MAC_CAP_DBE_EHT_MCS_MAP_320_PRES)
+ offs += 3;
+ }
+
+ return (const void *)&cap->variable[offs];
+}
+
+#define IEEE80211_SMD_INFO_CAPA_DL_DATA_FWD 0x01
+#define IEEE80211_SMD_INFO_CAPA_MAX_NUM_PREP 0x0E
+#define IEEE80211_SMD_INFO_CAPA_TYPE 0x10
+#define IEEE80211_SMD_INFO_CAPA_PTK_PER_AP_MLD 0x20
+
+struct ieee80211_smd_info {
+ u8 id[ETH_ALEN];
+ u8 capa;
+ __le16 timeout;
+} __packed;
+
+#endif /* LINUX_IEEE80211_UHR_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 96439de55f07..0aa2fb8f88de 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2025 Intel Corporation
+ * Copyright (c) 2018 - 2026 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -1186,6 +1186,12 @@ struct ieee80211_mgmt {
u8 action_code;
u8 variable[];
} __packed epcs;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 control;
+ u8 variable[];
+ } __packed eml_omn;
} u;
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -1200,8 +1206,9 @@ struct ieee80211_mgmt {
#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
+#define BSS_MEMBERSHIP_SELECTOR_UHR_PHY 120
-#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY
+#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_UHR_PHY
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1351,6 +1358,7 @@ struct ieee80211_tdls_data {
#define WLAN_AUTH_FILS_SK 4
#define WLAN_AUTH_FILS_SK_PFS 5
#define WLAN_AUTH_FILS_PK 6
+#define WLAN_AUTH_EPPKE 9
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1801,6 +1809,15 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
+ WLAN_EID_EXT_UHR_OPER = 151,
+ WLAN_EID_EXT_UHR_CAPA = 152,
+ WLAN_EID_EXT_MACP = 153,
+ WLAN_EID_EXT_SMD = 154,
+ WLAN_EID_EXT_BSS_SMD_TRANS_PARAMS = 155,
+ WLAN_EID_EXT_CHAN_USAGE = 156,
+ WLAN_EID_EXT_UHR_MODE_CHG = 157,
+ WLAN_EID_EXT_UHR_PARAM_UPD = 158,
+ WLAN_EID_EXT_TXPI = 159,
};
/* Action category code */
@@ -2744,6 +2761,22 @@ static inline bool for_each_element_completed(const struct element *element,
#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4)
#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
+/* EBPCC = Enhanced BSS Parameter Change Count */
+#define IEEE80211_ENH_CRIT_UPD_EBPCC 0x0F
+#define IEEE80211_ENH_CRIT_UPD_TYPE 0x70
+#define IEEE80211_ENH_CRIT_UPD_TYPE_NO_UHR 0
+#define IEEE80211_ENH_CRIT_UPD_TYPE_UHR 1
+#define IEEE80211_ENH_CRIT_UPD_ALL 0x80
+
+/**
+ * struct ieee80211_enh_crit_upd - enhanced critical update (UHR)
+ * @v: value of the enhanced critical update data,
+ * see %IEEE80211_ENH_CRIT_UPD_* to parse the bits
+ */
+struct ieee80211_enh_crit_upd {
+ u8 v;
+} __packed;
+
/*
* reduced neighbor report, based on Draft P802.11ax_D6.1,
* section 9.4.2.170 and accepted contributions.
@@ -2762,6 +2795,7 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10
#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+#define IEEE80211_RNR_TBTT_PARAMS_SAME_SMD 0x80
#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127
#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
@@ -2814,12 +2848,14 @@ struct ieee80211_tbtt_info_ge_11 {
u8 bss_params;
s8 psd_20;
struct ieee80211_rnr_mld_params mld_params;
+ struct ieee80211_enh_crit_upd enh_crit_upd;
} __packed;
#include "ieee80211-ht.h"
#include "ieee80211-vht.h"
#include "ieee80211-he.h"
#include "ieee80211-eht.h"
+#include "ieee80211-uhr.h"
#include "ieee80211-mesh.h"
#include "ieee80211-s1g.h"
#include "ieee80211-p2p.h"
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index f7f34eb15e06..e6272f9c5e42 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -594,8 +594,17 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
}
+struct vlan_type_depth {
+ __be16 type;
+ u16 depth;
+};
+
+struct vlan_type_depth __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset);
+
/**
- * __vlan_get_protocol_offset() - get protocol EtherType.
+ * vlan_get_protocol_offset_inline() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
* @mac_offset: MAC offset
@@ -604,40 +613,24 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
- __be16 type,
- int mac_offset,
- int *depth)
+static inline
+__be16 vlan_get_protocol_offset_inline(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
- unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
-
- /* if type is 802.1Q/AD then the header should already be
- * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
- * ETH_HLEN otherwise
- */
if (eth_type_vlan(type)) {
- if (vlan_depth) {
- if (WARN_ON(vlan_depth < VLAN_HLEN))
- return 0;
- vlan_depth -= VLAN_HLEN;
- } else {
- vlan_depth = ETH_HLEN;
- }
- do {
- struct vlan_hdr vhdr, *vh;
+ struct vlan_type_depth res;
- vh = skb_header_pointer(skb, mac_offset + vlan_depth,
- sizeof(vhdr), &vhdr);
- if (unlikely(!vh || !--parse_depth))
- return 0;
+ res = __vlan_get_protocol_offset(skb, type, mac_offset);
- type = vh->h_vlan_encapsulated_proto;
- vlan_depth += VLAN_HLEN;
- } while (eth_type_vlan(type));
+ if (depth && res.type)
+ *depth = res.depth;
+ return res.type;
}
if (depth)
- *depth = vlan_depth;
+ *depth = skb->mac_len;
return type;
}
@@ -645,7 +638,7 @@ static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- return __vlan_get_protocol_offset(skb, type, 0, depth);
+ return vlan_get_protocol_offset_inline(skb, type, 0, depth);
}
/**
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 4247497f3f8b..b87841a355f8 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -14,6 +14,7 @@
* @mlock: lock used to prevent simultaneous device state changes
* @mlock_key: lockdep class for iio_dev lock
* @info_exist_lock: lock to prevent use during removal
+ * @info_exist_key: lockdep class for info_exist lock
* @trig_readonly: mark the current trigger immutable
* @event_interface: event chrdevs associated with interrupt lines
* @attached_buffers: array of buffers statically attached by the driver
@@ -47,6 +48,7 @@ struct iio_dev_opaque {
struct mutex mlock;
struct lock_class_key mlock_key;
struct mutex info_exist_lock;
+ struct lock_class_key info_exist_key;
bool trig_readonly;
struct iio_event_interface *event_interface;
struct iio_buffer **attached_buffers;
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 8e29cb4e6a01..abf8923f8fc5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -69,6 +69,7 @@ static inline int ima_measure_critical_data(const char *event_label,
#ifdef CONFIG_HAVE_IMA_KEXEC
int __init ima_free_kexec_buffer(void);
int __init ima_get_kexec_buffer(void **addr, size_t *size);
+int ima_validate_range(phys_addr_t phys, size_t size);
#endif
#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 5730ba6b1cfa..dccbeb25f701 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -38,11 +38,11 @@ struct in_device {
struct ip_mc_list *mc_tomb;
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
- unsigned long mr_maxdelay;
unsigned long mr_qi; /* Query Interval */
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
+ u32 mr_maxdelay;
u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
index 92045d18cbfc..28776ee28d8e 100644
--- a/include/linux/init_syscalls.h
+++ b/include/linux/init_syscalls.h
@@ -17,3 +17,4 @@ int __init init_mkdir(const char *pathname, umode_t mode);
int __init init_rmdir(const char *pathname);
int __init init_utimes(char *filename, struct timespec64 *ts);
int __init init_dup(struct file *file);
+int __init init_pivot_root(const char *new_root, const char *put_old);
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index f1a1f4c92ded..7e5d26c8136f 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -3,8 +3,6 @@
#ifndef __LINUX_INITRD_H
#define __LINUX_INITRD_H
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
/* starting block # of image */
extern int rd_image_start;
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 711a1f0d1a73..a1b4cf81adc2 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
@@ -55,6 +56,19 @@ static __always_inline void instrument_read_write(const volatile void *v, size_t
kcsan_check_read_write(v, size);
}
+static __always_inline void instrument_atomic_check_alignment(const volatile void *v, size_t size)
+{
+#ifndef __DISABLE_EXPORTS
+ if (IS_ENABLED(CONFIG_DEBUG_ATOMIC)) {
+ unsigned int mask = size - 1;
+
+ if (IS_ENABLED(CONFIG_DEBUG_ATOMIC_LARGEST_ALIGN))
+ mask &= sizeof(struct { long x; } __aligned_largest) - 1;
+ WARN_ON_ONCE((unsigned long)v & mask);
+ }
+#endif
+}
+
/**
* instrument_atomic_read - instrument atomic read access
* @v: address of access
@@ -67,6 +81,7 @@ static __always_inline void instrument_atomic_read(const volatile void *v, size_
{
kasan_check_read(v, size);
kcsan_check_atomic_read(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
@@ -81,6 +96,7 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
{
kasan_check_write(v, size);
kcsan_check_atomic_write(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
@@ -95,6 +111,7 @@ static __always_inline void instrument_atomic_read_write(const volatile void *v,
{
kasan_check_write(v, size);
kcsan_check_atomic_read_write(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 266f2b39213a..6cd26ffb0505 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -181,9 +181,8 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
extern int __must_check
-__request_percpu_irq(unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *devname,
- const cpumask_t *affinity, void __percpu *percpu_dev_id);
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler, const char *devname,
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -193,17 +192,8 @@ static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
- return __request_percpu_irq(irq, handler, 0,
- devname, NULL, percpu_dev_id);
-}
-
-static inline int __must_check
-request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
- const char *devname, const cpumask_t *affinity,
- void __percpu *percpu_dev_id)
-{
- return __request_percpu_irq(irq, handler, 0,
- devname, affinity, percpu_dev_id);
+ return request_percpu_irq_affinity(irq, handler, devname,
+ NULL, percpu_dev_id);
}
extern int __must_check
@@ -228,7 +218,7 @@ static inline int __must_check
devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
- return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+ return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags | IRQF_COND_ONESHOT,
devname, dev_id);
}
@@ -871,12 +861,6 @@ static inline void init_irq_proc(void)
}
#endif
-#ifdef CONFIG_IRQ_TIMINGS
-void irq_timings_enable(void);
-void irq_timings_disable(void);
-u64 irq_timings_next_event(u64 now);
-#endif
-
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 85fe4e6b275c..d1aa4edfc2a5 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -12,6 +12,7 @@ void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
bool io_is_uring_fops(struct file *file);
+int __io_uring_fork(struct task_struct *tsk);
static inline void io_uring_files_cancel(void)
{
@@ -25,9 +26,16 @@ static inline void io_uring_task_cancel(void)
}
static inline void io_uring_free(struct task_struct *tsk)
{
- if (tsk->io_uring)
+ if (tsk->io_uring || tsk->io_uring_restrict)
__io_uring_free(tsk);
}
+static inline int io_uring_fork(struct task_struct *tsk)
+{
+ if (tsk->io_uring_restrict)
+ return __io_uring_fork(tsk);
+
+ return 0;
+}
#else
static inline void io_uring_task_cancel(void)
{
@@ -46,6 +54,10 @@ static inline bool io_is_uring_fops(struct file *file)
{
return false;
}
+static inline int io_uring_fork(struct task_struct *tsk)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index a3e8ddc9b380..3e4a82a6f817 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -219,12 +219,26 @@ struct io_rings {
struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
+struct io_bpf_filter;
+struct io_bpf_filters {
+ refcount_t refs; /* ref for ->bpf_filters */
+ spinlock_t lock; /* protects ->bpf_filters modifications */
+ struct io_bpf_filter __rcu **filters;
+ struct rcu_head rcu_head;
+};
+
struct io_restriction {
DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ struct io_bpf_filters *bpf_filters;
+ /* ->bpf_filters needs COW on modification */
+ bool bpf_filters_cow;
u8 sqe_flags_allowed;
u8 sqe_flags_required;
- bool registered;
+ /* IORING_OP_* restrictions exist */
+ bool op_registered;
+ /* IORING_REGISTER_* restrictions exist */
+ bool reg_registered;
};
struct io_submit_link {
@@ -259,7 +273,8 @@ struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int drain_next: 1;
- unsigned int restricted: 1;
+ unsigned int op_restricted: 1;
+ unsigned int reg_restricted: 1;
unsigned int off_timeout_used: 1;
unsigned int drain_active: 1;
unsigned int has_evfd: 1;
@@ -274,6 +289,8 @@ struct io_ring_ctx {
struct task_struct *submitter_task;
struct io_rings *rings;
+ /* cache of ->restrictions.bpf_filters->filters */
+ struct io_bpf_filter __rcu **bpf_filters;
struct percpu_ref refs;
clockid_t clockid;
@@ -316,7 +333,7 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there.
*/
bool poll_multi_queue;
- struct io_wq_work_list iopoll_list;
+ struct list_head iopoll_list;
struct io_file_table file_table;
struct io_rsrc_data buf_table;
@@ -444,6 +461,9 @@ struct io_ring_ctx {
struct list_head defer_list;
unsigned nr_drained;
+ /* protected by ->completion_lock */
+ unsigned nr_req_allocated;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list; /* track busy poll napi_id */
spinlock_t napi_lock; /* napi_list lock */
@@ -456,10 +476,6 @@ struct io_ring_ctx {
DECLARE_HASHTABLE(napi_ht, 4);
#endif
- /* protected by ->completion_lock */
- unsigned evfd_last_cq_tail;
- unsigned nr_req_allocated;
-
/*
* Protection for resize vs mmap races - both the mmap and resize
* side will need to grab this lock, to prevent either side from
@@ -714,15 +730,21 @@ struct io_kiocb {
atomic_t refs;
bool cancel_seq_set;
- struct io_task_work io_task_work;
+
+ union {
+ struct io_task_work io_task_work;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ };
+
union {
/*
* for polled requests, i.e. IORING_OP_POLL_ADD and async armed
* poll
*/
struct hlist_node hash_node;
- /* For IOPOLL setup queues, with hybrid polling */
- u64 iopoll_start;
+ /* IOPOLL completion handling */
+ struct list_head iopoll_node;
/* for private io_kiocb freeing */
struct rcu_head rcu_head;
};
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6bb941707d12..99b7209dabd7 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -345,9 +345,9 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
void iomap_read_folio(const struct iomap_ops *ops,
- struct iomap_read_folio_ctx *ctx);
+ struct iomap_read_folio_ctx *ctx, void *private);
void iomap_readahead(const struct iomap_ops *ops,
- struct iomap_read_folio_ctx *ctx);
+ struct iomap_read_folio_ctx *ctx, void *private);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
@@ -566,6 +566,15 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+/*
+ * Bounce buffer instead of using zero copy access.
+ *
+ * This is needed if the device needs stable data to checksum or generate
+ * parity. The file system must hook into the I/O submission and offload
+ * completions to user context for reads when this is set.
+ */
+#define IOMAP_DIO_BOUNCE (1 << 4)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
@@ -599,7 +608,7 @@ static inline void iomap_bio_read_folio(struct folio *folio,
.cur_folio = folio,
};
- iomap_read_folio(ops, &ctx);
+ iomap_read_folio(ops, &ctx, NULL);
}
static inline void iomap_bio_readahead(struct readahead_control *rac,
@@ -610,7 +619,7 @@ static inline void iomap_bio_readahead(struct readahead_control *rac,
.rac = rac,
};
- iomap_readahead(ops, &ctx);
+ iomap_readahead(ops, &ctx, NULL);
}
#endif /* CONFIG_BLOCK */
diff --git a/include/linux/iommu-debug-pagealloc.h b/include/linux/iommu-debug-pagealloc.h
new file mode 100644
index 000000000000..46c3c1f70150
--- /dev/null
+++ b/include/linux/iommu-debug-pagealloc.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 - Google Inc
+ * Author: Mostafa Saleh <smostafa@google.com>
+ * IOMMU API debug page alloc sanitizer
+ */
+
+#ifndef __LINUX_IOMMU_DEBUG_PAGEALLOC_H
+#define __LINUX_IOMMU_DEBUG_PAGEALLOC_H
+
+#ifdef CONFIG_IOMMU_DEBUG_PAGEALLOC
+DECLARE_STATIC_KEY_FALSE(iommu_debug_initialized);
+
+extern struct page_ext_operations page_iommu_debug_ops;
+
+void __iommu_debug_check_unmapped(const struct page *page, int numpages);
+
+static inline void iommu_debug_check_unmapped(const struct page *page, int numpages)
+{
+ if (static_branch_unlikely(&iommu_debug_initialized))
+ __iommu_debug_check_unmapped(page, numpages);
+}
+
+#else
+static inline void iommu_debug_check_unmapped(const struct page *page,
+ int numpages)
+{
+}
+
+#endif /* CONFIG_IOMMU_DEBUG_PAGEALLOC */
+
+#endif /* __LINUX_IOMMU_DEBUG_PAGEALLOC_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8c66284a91a8..54b8b48c762e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -910,6 +910,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+struct iommu_domain *iommu_driver_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
@@ -1187,6 +1188,10 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
ioasid_t iommu_alloc_global_pasid(struct device *dev);
void iommu_free_global_pasid(ioasid_t pasid);
+
+/* PCI device reset functions */
+int pci_dev_reset_iommu_prepare(struct pci_dev *pdev);
+void pci_dev_reset_iommu_done(struct pci_dev *pdev);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1510,6 +1515,15 @@ static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
}
static inline void iommu_free_global_pasid(ioasid_t pasid) {}
+
+static inline int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline void pci_dev_reset_iommu_done(struct pci_dev *pdev)
+{
+}
#endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IRQ_MSI_IOMMU
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 9afa30f9346f..5533a5debf3f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -10,6 +10,7 @@
#define _LINUX_IOPORT_H
#ifndef __ASSEMBLY__
+#include <linux/args.h>
#include <linux/bits.h>
#include <linux/compiler.h>
#include <linux/minmax.h>
@@ -165,8 +166,12 @@ enum {
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
-#define DEFINE_RES(_start, _size, _flags) \
+#define __DEFINE_RES0() \
+ DEFINE_RES_NAMED(0, 0, NULL, IORESOURCE_UNSET)
+#define __DEFINE_RES3(_start, _size, _flags) \
DEFINE_RES_NAMED(_start, _size, NULL, _flags)
+#define DEFINE_RES(...) \
+ CONCATENATE(__DEFINE_RES, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
@@ -232,6 +237,7 @@ struct resource_constraint {
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
+extern struct resource soft_reserve_resource;
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
@@ -338,7 +344,7 @@ static inline bool resource_union(const struct resource *r1, const struct resour
* Check if this resource is added to a resource tree or detached. Caller is
* responsible for not racing assignment.
*/
-static inline bool resource_assigned(struct resource *res)
+static inline bool resource_assigned(const struct resource *res)
{
return res->parent;
}
@@ -418,6 +424,10 @@ walk_system_ram_res_rev(u64 start, u64 end, void *arg,
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
+extern int walk_soft_reserve_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
+region_intersects_soft_reserve(resource_size_t start, size_t size);
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7294e4e89b79..443053a76dcf 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -126,6 +126,28 @@ static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
skb_network_header_len(skb);
}
+static inline unsigned int
+ipv6_payload_len(const struct sk_buff *skb, const struct ipv6hdr *ip6)
+{
+ u32 len = ntohs(ip6->payload_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len :
+ skb->len - skb_network_offset(skb) - sizeof(struct ipv6hdr);
+}
+
+static inline unsigned int skb_ipv6_payload_len(const struct sk_buff *skb)
+{
+ return ipv6_payload_len(skb, ipv6_hdr(skb));
+}
+
+#define IPV6_MAXPLEN 65535
+
+static inline void ipv6_set_payload_len(struct ipv6hdr *ip6, unsigned int len)
+{
+ ip6->payload_len = len <= IPV6_MAXPLEN ? htons(len) : 0;
+}
+
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
@@ -155,7 +177,6 @@ struct inet6_skb_parm {
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
-#define IP6SKB_FAKEJUMBO 512
#define IP6SKB_MULTIPATH 1024
#define IP6SKB_MCROUTE 2048
};
@@ -205,17 +226,14 @@ struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;
-struct inet6_cork {
- struct ipv6_txoptions *opt;
- u8 hop_limit;
- u8 tclass;
- u8 dontfrag:1;
-};
-
/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
/* Used in tx path (inet6_csk_route_socket(), ip6_xmit()) */
struct in6_addr saddr;
+ union {
+ struct in6_addr daddr;
+ struct in6_addr final;
+ };
__be32 flow_label;
u32 dst_cookie;
struct ipv6_txoptions __rcu *opt;
@@ -267,7 +285,6 @@ struct ipv6_pinfo {
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
- struct inet6_cork cork;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4a9f1d7b08c3..951acbdb9f84 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -459,6 +459,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* checks against the supplied affinity mask are not
* required. This is used for CPU hotplug where the
* target CPU is not yet set in the cpu_online_mask.
+ * @irq_pre_redirect: Optional function to be invoked before redirecting
+ * an interrupt via irq_work. Called only on CONFIG_SMP.
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
@@ -503,6 +505,7 @@ struct irq_chip {
void (*irq_eoi)(struct irq_data *data);
int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
+ void (*irq_pre_redirect)(struct irq_data *data);
int (*irq_retrigger)(struct irq_data *data);
int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
@@ -595,9 +598,6 @@ enum {
#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
-struct irqaction;
-extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
@@ -658,7 +658,7 @@ extern void handle_fasteoi_nmi(struct irq_desc *desc);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
-extern int irq_chip_pm_put(struct irq_data *data);
+extern void irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
@@ -687,6 +687,13 @@ extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
+#ifdef CONFIG_SMP
+void irq_chip_pre_redirect_parent(struct irq_data *data);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+int irq_chip_redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force);
#endif
/* Disable or mask interrupts during a kernel kexec */
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
index 68ddcdb1cec5..b78488df6c98 100644
--- a/include/linux/irqchip/arm-gic-v5.h
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -43,6 +43,7 @@
/*
* IRS registers and tables structures
*/
+#define GICV5_IRS_IDR0 0x0000
#define GICV5_IRS_IDR1 0x0004
#define GICV5_IRS_IDR2 0x0008
#define GICV5_IRS_IDR5 0x0014
@@ -63,6 +64,8 @@
#define GICV5_IRS_IST_STATUSR 0x0194
#define GICV5_IRS_MAP_L2_ISTR 0x01c0
+#define GICV5_IRS_IDR0_VIRT BIT(6)
+
#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20)
#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16)
@@ -265,6 +268,12 @@
#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+#define GICV5_GSI_IC_TYPE GENMASK(31, 29)
+#define GICV5_GSI_IWB_TYPE 0x7
+
+#define GICV5_GSI_IWB_FRAME_ID GENMASK(28, 16)
+#define GICV5_GSI_IWB_WIRE GENMASK(15, 0)
+
/*
* Global Data structures and functions
*/
@@ -278,6 +287,7 @@ struct gicv5_chip_data {
u8 cpuif_pri_bits;
u8 cpuif_id_bits;
u8 irs_pri_bits;
+ bool virt_capable;
struct {
__le64 *l1ist_addr;
u32 l2_size;
@@ -344,6 +354,7 @@ void __init gicv5_init_lpi_domain(void);
void __init gicv5_free_lpi_domain(void);
int gicv5_irs_of_probe(struct device_node *parent);
+int gicv5_irs_acpi_probe(void);
void gicv5_irs_remove(void);
int gicv5_irs_enable(void);
void gicv5_irs_its_probe(void);
@@ -391,4 +402,5 @@ int gicv5_alloc_lpi(void);
void gicv5_free_lpi(u32 lpi);
void __init gicv5_its_of_probe(struct device_node *parent);
+void __init gicv5_its_acpi_probe(void);
#endif
diff --git a/include/linux/irqchip/irq-renesas-rzt2h.h b/include/linux/irqchip/irq-renesas-rzt2h.h
new file mode 100644
index 000000000000..853fd5ee0b22
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzt2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/T2H Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZT2H
+#define __LINUX_IRQ_RENESAS_RZT2H
+
+#include <linux/platform_device.h>
+
+#define RZT2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
+
+#ifdef CONFIG_RENESAS_RZT2H_ICU
+void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no);
+#else
+static inline void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZT2H */
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
index 7f3ff5c5ea53..4b348836de7a 100644
--- a/include/linux/irqchip/riscv-imsic.h
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -68,6 +68,9 @@ struct imsic_global_config {
/* Number of guest interrupt identities */
u32 nr_guest_ids;
+ /* Number of guest interrupt files per core */
+ u32 nr_guest_files;
+
/* Per-CPU IMSIC addresses */
struct imsic_local_config __percpu *local;
};
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 17902861de76..dae9a9b93665 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -2,9 +2,10 @@
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
-#include <linux/rcupdate.h>
+#include <linux/irq_work.h>
#include <linux/kobject.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
/*
* Core internal functions to deal with irq descriptors
@@ -30,6 +31,17 @@ struct irqstat {
};
/**
+ * struct irq_redirect - interrupt redirection metadata
+ * @work: Harg irq_work item for handler execution on a different CPU
+ * @target_cpu: CPU to run irq handler on in case the current CPU is not part
+ * of the irq affinity mask
+ */
+struct irq_redirect {
+ struct irq_work work;
+ unsigned int target_cpu;
+};
+
+/**
* struct irq_desc - interrupt descriptor
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
@@ -46,6 +58,7 @@ struct irqstat {
* @threads_handled: stats field for deferred spurious detection of threaded handlers
* @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
+ * @redirect: Facility for redirecting interrupts via irq_work
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
@@ -83,6 +96,7 @@ struct irq_desc {
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
+ struct irq_redirect redirect;
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -185,6 +199,7 @@ int generic_handle_irq_safe(unsigned int irq);
int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq);
+bool generic_handle_demux_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 62f81bbeb490..73c25d40846c 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -257,7 +257,8 @@ static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
- const char *name, phys_addr_t *pa);
+ const char *name, phys_addr_t *pa,
+ struct fwnode_handle *parent);
enum {
IRQCHIP_FWNODE_REAL,
@@ -267,18 +268,39 @@ enum {
static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_parented_fwnode(const char *name,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL, parent);
}
static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
- NULL);
+ NULL, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_id_parented_fwnode(const char *name, int id,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
+ NULL, parent);
}
static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa, NULL);
+}
+
+static inline struct fwnode_handle *irq_domain_alloc_parented_fwnode(phys_addr_t *pa,
+ struct fwnode_handle *parent)
+{
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa, parent);
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f5eaf76198f3..a53a00d36228 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1815,7 +1815,4 @@ static inline int jbd2_handle_buffer_credits(handle_t *handle)
#endif /* __KERNEL__ */
-#define EFSBADCRC EBADMSG /* Bad CRC detected */
-#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
-
#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 9c6ac4b62eb9..338a1921a50a 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -641,6 +641,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
}
+void __kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size);
+
+static __always_inline void kasan_vrealloc(const void *start,
+ unsigned long old_size,
+ unsigned long new_size)
+{
+ if (kasan_enabled())
+ __kasan_vrealloc(start, old_size, new_size);
+}
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -670,6 +681,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{ }
+static inline void kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size) { }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5b46924fdff5..e5570a16cbb1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -21,7 +21,6 @@
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/bitops.h>
-#include <linux/hex.h>
#include <linux/kstrtox.h>
#include <linux/log2.h>
#include <linux/math.h>
@@ -32,7 +31,7 @@
#include <linux/build_bug.h>
#include <linux/sprintf.h>
#include <linux/static_call_types.h>
-#include <linux/instruction_pointer.h>
+#include <linux/trace_printk.h>
#include <linux/util_macros.h>
#include <linux/wordpart.h>
@@ -40,8 +39,6 @@
#include <uapi/linux/kernel.h>
-#define STACK_MAGIC 0xdeadbeef
-
struct completion;
struct user;
@@ -192,215 +189,9 @@ enum system_states {
};
extern enum system_states system_state;
-/*
- * General tracing related utility functions - trace_printk(),
- * tracing_on/tracing_off and tracing_start()/tracing_stop
- *
- * Use tracing_on/tracing_off when you want to quickly turn on or off
- * tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/tracing/tracing_on
- * file, which gives a means for the kernel and userspace to interact.
- * Place a tracing_off() in the kernel where you want tracing to end.
- * From user space, examine the trace, and then echo 1 > tracing_on
- * to continue tracing.
- *
- * tracing_stop/tracing_start has slightly more overhead. It is used
- * by things like suspend to ram where disabling the recording of the
- * trace is not enough, but tracing must actually stop because things
- * like calling smp_processor_id() may crash the system.
- *
- * Most likely, you want to use tracing_on/tracing_off.
- */
-
-enum ftrace_dump_mode {
- DUMP_NONE,
- DUMP_ALL,
- DUMP_ORIG,
- DUMP_PARAM,
-};
-
-#ifdef CONFIG_TRACING
-void tracing_on(void);
-void tracing_off(void);
-int tracing_is_on(void);
-void tracing_snapshot(void);
-void tracing_snapshot_alloc(void);
-
-extern void tracing_start(void);
-extern void tracing_stop(void);
-
-static inline __printf(1, 2)
-void ____trace_printk_check_format(const char *fmt, ...)
-{
-}
-#define __trace_printk_check_format(fmt, args...) \
-do { \
- if (0) \
- ____trace_printk_check_format(fmt, ##args); \
-} while (0)
-
-/**
- * trace_printk - printf formatting in the ftrace buffer
- * @fmt: the printf format for printing
- *
- * Note: __trace_printk is an internal function for trace_printk() and
- * the @ip is passed in via the trace_printk() macro.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving trace_printks scattered around in
- * your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used.)
- *
- * A little optimization trick is done here. If there's only one
- * argument, there's no need to scan the string for printf formats.
- * The trace_puts() will suffice. But how can we take advantage of
- * using trace_puts() when trace_printk() has only one argument?
- * By stringifying the args and checking the size we can tell
- * whether or not there are args. __stringify((__VA_ARGS__)) will
- * turn into "()\0" with a size of 3 when there are no args, anything
- * else will be bigger. All we need to do is define a string to this,
- * and then take its size and compare to 3. If it's bigger, use
- * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
- * let gcc optimize the rest.
- */
-
-#define trace_printk(fmt, ...) \
-do { \
- char _______STR[] = __stringify((__VA_ARGS__)); \
- if (sizeof(_______STR) > 3) \
- do_trace_printk(fmt, ##__VA_ARGS__); \
- else \
- trace_puts(fmt); \
-} while (0)
-
-#define do_trace_printk(fmt, args...) \
-do { \
- static const char *trace_printk_fmt __used \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(fmt) ? fmt : NULL; \
- \
- __trace_printk_check_format(fmt, ##args); \
- \
- if (__builtin_constant_p(fmt)) \
- __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
- else \
- __trace_printk(_THIS_IP_, fmt, ##args); \
-} while (0)
-
-extern __printf(2, 3)
-int __trace_bprintk(unsigned long ip, const char *fmt, ...);
-
-extern __printf(2, 3)
-int __trace_printk(unsigned long ip, const char *fmt, ...);
-
-/**
- * trace_puts - write a string into the ftrace buffer
- * @str: the string to record
- *
- * Note: __trace_bputs is an internal function for trace_puts and
- * the @ip is passed in via the trace_puts macro.
- *
- * This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" effects,
- * where the processing of the print format is still too much.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving trace_puts scattered around in
- * your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used.)
- *
- * Returns: 0 if nothing was written, positive # if string was.
- * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
- */
-
-#define trace_puts(str) ({ \
- static const char *trace_printk_fmt __used \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(str) ? str : NULL; \
- \
- if (__builtin_constant_p(str)) \
- __trace_bputs(_THIS_IP_, trace_printk_fmt); \
- else \
- __trace_puts(_THIS_IP_, str, strlen(str)); \
-})
-extern int __trace_bputs(unsigned long ip, const char *str);
-extern int __trace_puts(unsigned long ip, const char *str, int size);
-
-extern void trace_dump_stack(int skip);
-
-/*
- * The double __builtin_constant_p is because gcc will give us an error
- * if we try to allocate the static variable to fmt if it is not a
- * constant. Even with the outer if statement.
- */
-#define ftrace_vprintk(fmt, vargs) \
-do { \
- if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt __used \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(fmt) ? fmt : NULL; \
- \
- __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
- } else \
- __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
-} while (0)
-
-extern __printf(2, 0) int
-__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
-
-extern __printf(2, 0) int
-__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
-
-extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
-#else
-static inline void tracing_start(void) { }
-static inline void tracing_stop(void) { }
-static inline void trace_dump_stack(int skip) { }
-
-static inline void tracing_on(void) { }
-static inline void tracing_off(void) { }
-static inline int tracing_is_on(void) { return 0; }
-static inline void tracing_snapshot(void) { }
-static inline void tracing_snapshot_alloc(void) { }
-
-static inline __printf(1, 2)
-int trace_printk(const char *fmt, ...)
-{
- return 0;
-}
-static __printf(1, 0) inline int
-ftrace_vprintk(const char *fmt, va_list ap)
-{
- return 0;
-}
-static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
-#endif /* CONFIG_TRACING */
-
/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_DYNAMIC_FTRACE
# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
-/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
-#define VERIFY_OCTAL_PERMISSIONS(perms) \
- (BUILD_BUG_ON_ZERO((perms) < 0) + \
- BUILD_BUG_ON_ZERO((perms) > 0777) + \
- /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
- BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
- BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
- /* USER_WRITABLE >= GROUP_WRITABLE */ \
- BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
- /* OTHER_WRITABLE? Generally considered a bad idea. */ \
- BUILD_BUG_ON_ZERO((perms) & 2) + \
- (perms))
#endif
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index 5f7b9de97e8d..ac4129d1d741 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -11,49 +11,26 @@ struct kho_scratch {
phys_addr_t size;
};
+struct kho_vmalloc;
+
struct folio;
struct page;
-#define DECLARE_KHOSER_PTR(name, type) \
- union { \
- phys_addr_t phys; \
- type ptr; \
- } name
-#define KHOSER_STORE_PTR(dest, val) \
- ({ \
- typeof(val) v = val; \
- typecheck(typeof((dest).ptr), v); \
- (dest).phys = virt_to_phys(v); \
- })
-#define KHOSER_LOAD_PTR(src) \
- ({ \
- typeof(src) s = src; \
- (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
- })
-
-struct kho_vmalloc_chunk;
-struct kho_vmalloc {
- DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
- unsigned int total_pages;
- unsigned short flags;
- unsigned short order;
-};
-
#ifdef CONFIG_KEXEC_HANDOVER
bool kho_is_enabled(void);
bool is_kho_boot(void);
int kho_preserve_folio(struct folio *folio);
void kho_unpreserve_folio(struct folio *folio);
-int kho_preserve_pages(struct page *page, unsigned int nr_pages);
-void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
+int kho_preserve_pages(struct page *page, unsigned long nr_pages);
+void kho_unpreserve_pages(struct page *page, unsigned long nr_pages);
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
void *kho_alloc_preserve(size_t size);
void kho_unpreserve_free(void *mem);
void kho_restore_free(void *mem);
struct folio *kho_restore_folio(phys_addr_t phys);
-struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
+struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages);
void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
int kho_add_subtree(const char *name, void *fdt);
void kho_remove_subtree(void *fdt);
diff --git a/include/linux/kho/abi/kexec_handover.h b/include/linux/kho/abi/kexec_handover.h
new file mode 100644
index 000000000000..2201a0d2c159
--- /dev/null
+++ b/include/linux/kho/abi/kexec_handover.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
+ * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
+ * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
+ * Copyright (C) 2025 Google LLC, Jason Miu <jasonmiu@google.com>
+ */
+
+#ifndef _LINUX_KHO_ABI_KEXEC_HANDOVER_H
+#define _LINUX_KHO_ABI_KEXEC_HANDOVER_H
+
+#include <linux/types.h>
+
+/**
+ * DOC: Kexec Handover ABI
+ *
+ * Kexec Handover uses the ABI defined below for passing preserved data from
+ * one kernel to the next.
+ * The ABI uses Flattened Device Tree (FDT) format. The first kernel creates an
+ * FDT which is then passed to the next kernel during a kexec handover.
+ *
+ * This interface is a contract. Any modification to the FDT structure, node
+ * properties, compatible string, or the layout of the data structures
+ * referenced here constitutes a breaking change. Such changes require
+ * incrementing the version number in KHO_FDT_COMPATIBLE to prevent a new kernel
+ * from misinterpreting data from an older kernel. Changes are allowed provided
+ * the compatibility version is incremented. However, backward/forward
+ * compatibility is only guaranteed for kernels supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The FDT serves as a central registry for physical
+ * addresses of preserved data structures and sub-FDTs. The first kernel
+ * populates this FDT with references to memory regions and other FDTs that
+ * need to persist across the kexec transition. The subsequent kernel then
+ * parses this FDT to locate and restore the preserved data.::
+ *
+ * / {
+ * compatible = "kho-v1";
+ *
+ * preserved-memory-map = <0x...>;
+ *
+ * <subnode-name-1> {
+ * fdt = <0x...>;
+ * };
+ *
+ * <subnode-name-2> {
+ * fdt = <0x...>;
+ * };
+ * ... ...
+ * <subnode-name-N> {
+ * fdt = <0x...>;
+ * };
+ * };
+ *
+ * Root KHO Node (/):
+ * - compatible: "kho-v1"
+ *
+ * Indentifies the overall KHO ABI version.
+ *
+ * - preserved-memory-map: u64
+ *
+ * Physical memory address pointing to the root of the
+ * preserved memory map data structure.
+ *
+ * Subnodes (<subnode-name-N>):
+ * Subnodes can also be added to the root node to
+ * describe other preserved data blobs. The <subnode-name-N>
+ * is provided by the subsystem that uses KHO for preserving its
+ * data.
+ *
+ * - fdt: u64
+ *
+ * Physical address pointing to a subnode FDT blob that is also
+ * being preserved.
+ */
+
+/* The compatible string for the KHO FDT root node. */
+#define KHO_FDT_COMPATIBLE "kho-v1"
+
+/* The FDT property for the preserved memory map. */
+#define KHO_FDT_MEMORY_MAP_PROP_NAME "preserved-memory-map"
+
+/* The FDT property for sub-FDTs. */
+#define KHO_FDT_SUB_TREE_PROP_NAME "fdt"
+
+/**
+ * DOC: Kexec Handover ABI for vmalloc Preservation
+ *
+ * The Kexec Handover ABI for preserving vmalloc'ed memory is defined by
+ * a set of structures and helper macros. The layout of these structures is a
+ * stable contract between kernels and is versioned by the KHO_FDT_COMPATIBLE
+ * string.
+ *
+ * The preservation is managed through a main descriptor &struct kho_vmalloc,
+ * which points to a linked list of &struct kho_vmalloc_chunk structures. These
+ * chunks contain the physical addresses of the preserved pages, allowing the
+ * next kernel to reconstruct the vmalloc area with the same content and layout.
+ * Helper macros are also defined for storing and loading pointers within
+ * these structures.
+ */
+
+/* Helper macro to define a union for a serializable pointer. */
+#define DECLARE_KHOSER_PTR(name, type) \
+ union { \
+ u64 phys; \
+ type ptr; \
+ } name
+
+/* Stores the physical address of a serializable pointer. */
+#define KHOSER_STORE_PTR(dest, val) \
+ ({ \
+ typeof(val) v = val; \
+ typecheck(typeof((dest).ptr), v); \
+ (dest).phys = virt_to_phys(v); \
+ })
+
+/* Loads the stored physical address back to a pointer. */
+#define KHOSER_LOAD_PTR(src) \
+ ({ \
+ typeof(src) s = src; \
+ (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
+ })
+
+/*
+ * This header is embedded at the beginning of each `kho_vmalloc_chunk`
+ * and contains a pointer to the next chunk in the linked list,
+ * stored as a physical address for handover.
+ */
+struct kho_vmalloc_hdr {
+ DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
+};
+
+#define KHO_VMALLOC_SIZE \
+ ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
+ sizeof(u64))
+
+/*
+ * Each chunk is a single page and is part of a linked list that describes
+ * a preserved vmalloc area. It contains the header with the link to the next
+ * chunk and a zero terminated array of physical addresses of the pages that
+ * make up the preserved vmalloc area.
+ */
+struct kho_vmalloc_chunk {
+ struct kho_vmalloc_hdr hdr;
+ u64 phys[KHO_VMALLOC_SIZE];
+};
+
+static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
+
+/*
+ * Describes a preserved vmalloc memory area, including the
+ * total number of pages, allocation flags, page order, and a pointer to the
+ * first chunk of physical page addresses.
+ */
+struct kho_vmalloc {
+ DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
+ unsigned int total_pages;
+ unsigned short flags;
+ unsigned short order;
+};
+
+#endif /* _LINUX_KHO_ABI_KEXEC_HANDOVER_H */
diff --git a/include/linux/kho/abi/luo.h b/include/linux/kho/abi/luo.h
index bb099c92e469..46750a0ddf88 100644
--- a/include/linux/kho/abi/luo.h
+++ b/include/linux/kho/abi/luo.h
@@ -8,10 +8,10 @@
/**
* DOC: Live Update Orchestrator ABI
*
- * This header defines the stable Application Binary Interface used by the
- * Live Update Orchestrator to pass state from a pre-update kernel to a
- * post-update kernel. The ABI is built upon the Kexec HandOver framework
- * and uses a Flattened Device Tree to describe the preserved data.
+ * Live Update Orchestrator uses the stable Application Binary Interface
+ * defined below to pass state from a pre-update kernel to a post-update
+ * kernel. The ABI is built upon the Kexec HandOver framework and uses a
+ * Flattened Device Tree to describe the preserved data.
*
* This interface is a contract. Any modification to the FDT structure, node
* properties, compatible strings, or the layout of the `__packed` serialization
@@ -37,6 +37,11 @@
* compatible = "luo-session-v1";
* luo-session-header = <phys_addr_of_session_header_ser>;
* };
+ *
+ * luo-flb {
+ * compatible = "luo-flb-v1";
+ * luo-flb-header = <phys_addr_of_flb_header_ser>;
+ * };
* };
*
* Main LUO Node (/):
@@ -56,6 +61,17 @@
* is the header for a contiguous block of memory containing an array of
* `struct luo_session_ser`, one for each preserved session.
*
+ * File-Lifecycle-Bound Node (luo-flb):
+ * This node describes all preserved global objects whose lifecycle is bound
+ * to that of the preserved files (e.g., shared IOMMU state).
+ *
+ * - compatible: "luo-flb-v1"
+ * Identifies the FLB ABI version.
+ * - luo-flb-header: u64
+ * The physical address of a `struct luo_flb_header_ser`. This structure is
+ * the header for a contiguous block of memory containing an array of
+ * `struct luo_flb_ser`, one for each preserved global object.
+ *
* Serialization Structures:
* The FDT properties point to memory regions containing arrays of simple,
* `__packed` structures. These structures contain the actual preserved state.
@@ -74,6 +90,16 @@
* Metadata for a single preserved file. Contains the `compatible` string to
* find the correct handler in the new kernel, a user-provided `token` for
* identification, and an opaque `data` handle for the handler to use.
+ *
+ * - struct luo_flb_header_ser:
+ * Header for the FLB array. Contains the total page count of the
+ * preserved memory block and the number of `struct luo_flb_ser` entries
+ * that follow.
+ *
+ * - struct luo_flb_ser:
+ * Metadata for a single preserved global object. Contains its `name`
+ * (compatible string), an opaque `data` handle, and the `count`
+ * number of files depending on it.
*/
#ifndef _LINUX_KHO_ABI_LUO_H
@@ -163,4 +189,59 @@ struct luo_session_ser {
struct luo_file_set_ser file_set_ser;
} __packed;
+/* The max size is set so it can be reliably used during in serialization */
+#define LIVEUPDATE_FLB_COMPAT_LENGTH 48
+
+#define LUO_FDT_FLB_NODE_NAME "luo-flb"
+#define LUO_FDT_FLB_COMPATIBLE "luo-flb-v1"
+#define LUO_FDT_FLB_HEADER "luo-flb-header"
+
+/**
+ * struct luo_flb_header_ser - Header for the serialized FLB data block.
+ * @pgcnt: The total number of pages occupied by the entire preserved memory
+ * region, including this header and the subsequent array of
+ * &struct luo_flb_ser entries.
+ * @count: The number of &struct luo_flb_ser entries that follow this header
+ * in the memory block.
+ *
+ * This structure is located at the physical address specified by the
+ * `LUO_FDT_FLB_HEADER` FDT property. It provides the new kernel with the
+ * necessary information to find and iterate over the array of preserved
+ * File-Lifecycle-Bound objects and to manage the underlying memory.
+ *
+ * If this structure is modified, LUO_FDT_FLB_COMPATIBLE must be updated.
+ */
+struct luo_flb_header_ser {
+ u64 pgcnt;
+ u64 count;
+} __packed;
+
+/**
+ * struct luo_flb_ser - Represents the serialized state of a single FLB object.
+ * @name: The unique compatibility string of the FLB object, used to find the
+ * corresponding &struct liveupdate_flb handler in the new kernel.
+ * @data: The opaque u64 handle returned by the FLB's .preserve() operation
+ * in the old kernel. This handle encapsulates the entire state needed
+ * for restoration.
+ * @count: The reference count at the time of serialization; i.e., the number
+ * of preserved files that depended on this FLB. This is used by the
+ * new kernel to correctly manage the FLB's lifecycle.
+ *
+ * An array of these structures is created in a preserved memory region and
+ * passed to the new kernel. Each entry allows the LUO core to restore one
+ * global, shared object.
+ *
+ * If this structure is modified, LUO_FDT_FLB_COMPATIBLE must be updated.
+ */
+struct luo_flb_ser {
+ char name[LIVEUPDATE_FLB_COMPAT_LENGTH];
+ u64 data;
+ u64 count;
+} __packed;
+
+/* Kernel Live Update Test ABI */
+#ifdef CONFIG_LIVEUPDATE_TEST
+#define LIVEUPDATE_TEST_FLB_COMPATIBLE(i) "liveupdate-test-flb-v" #i
+#endif
+
#endif /* _LINUX_KHO_ABI_LUO_H */
diff --git a/include/linux/kho/abi/memblock.h b/include/linux/kho/abi/memblock.h
new file mode 100644
index 000000000000..27b042f470e1
--- /dev/null
+++ b/include/linux/kho/abi/memblock.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KHO_ABI_MEMBLOCK_H
+#define _LINUX_KHO_ABI_MEMBLOCK_H
+
+/**
+ * DOC: memblock kexec handover ABI
+ *
+ * Memblock can serialize its current memory reservations created with
+ * reserve_mem command line option across kexec through KHO.
+ * The post-KHO kernel can then consume these reservations and they are
+ * guaranteed to have the same physical address.
+ *
+ * The state is serialized using Flattened Device Tree (FDT) format. Any
+ * modification to the FDT structure, node properties, or the compatible
+ * strings constitutes a breaking change. Such changes require incrementing the
+ * version number in the relevant `_COMPATIBLE` string to prevent a new kernel
+ * from misinterpreting data from an old kernel.
+ *
+ * Changes are allowed provided the compatibility version is incremented.
+ * However, backward/forward compatibility is only guaranteed for kernels
+ * supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The entire memblock state is encapsulated within a single KHO entry named
+ * "memblock".
+ * This entry contains an FDT with the following layout:
+ *
+ * .. code-block:: none
+ *
+ * / {
+ * compatible = "memblock-v1";
+ *
+ * n1 {
+ * compatible = "reserve-mem-v1";
+ * start = <0xc06b 0x4000000>;
+ * size = <0x04 0x00>;
+ * };
+ * };
+ *
+ * Main memblock node (/):
+ *
+ * - compatible: "memblock-v1"
+
+ * Identifies the overall memblock ABI version.
+ *
+ * reserved_mem node:
+ * These nodes describe all reserve_mem regions. The node name is the name
+ * defined by the user for a reserve_mem region.
+ *
+ * - compatible: "reserve-mem-v1"
+ *
+ * Identifies the ABI version of reserve_mem descriptions
+ *
+ * - start: u64
+ *
+ * Physical address of the reserved memory region.
+ *
+ * - size: u64
+ *
+ * size in bytes of the reserved memory region.
+ */
+
+/* Top level memblock FDT node name. */
+#define MEMBLOCK_KHO_FDT "memblock"
+
+/* The compatible string for the memblock FDT root node. */
+#define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1"
+
+/* The compatible string for the reserve_mem FDT nodes. */
+#define RESERVE_MEM_KHO_NODE_COMPATIBLE "reserve-mem-v1"
+
+#endif /* _LINUX_KHO_ABI_MEMBLOCK_H */
diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h
index da7d063474a1..68cb6303b846 100644
--- a/include/linux/kho/abi/memfd.h
+++ b/include/linux/kho/abi/memfd.h
@@ -12,13 +12,13 @@
#define _LINUX_KHO_ABI_MEMFD_H
#include <linux/types.h>
-#include <linux/kexec_handover.h>
+#include <linux/kho/abi/kexec_handover.h>
/**
* DOC: memfd Live Update ABI
*
- * This header defines the ABI for preserving the state of a memfd across a
- * kexec reboot using the LUO.
+ * memfd uses the ABI defined below for preserving its state across a kexec
+ * reboot using the LUO.
*
* The state is serialized into a packed structure `struct memfd_luo_ser`
* which is handed over to the next kernel via the KHO mechanism.
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index eb1946a70cff..d7a9053ff4fe 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -17,8 +17,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
-extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
- bool install_pmd);
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -42,10 +42,9 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
}
-static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr, bool install_pmd)
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
- return 0;
}
static inline void khugepaged_min_free_kbytes_update(void)
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 88e82ab1367c..9bc6abe57572 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -81,6 +81,7 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *mutex)
+ __cond_acquires(true, mutex)
{
if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
@@ -102,6 +103,7 @@ static inline int kref_put_mutex(struct kref *kref,
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
+ __cond_acquires(true, lock)
{
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8d27403888ce..c92c1149ee6e 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -100,6 +100,7 @@ void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
void kthread_exit(long result) __noreturn;
void kthread_complete_and_exit(struct completion *, long) __noreturn;
+int kthreads_update_housekeeping(void);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d93f75b05ae2..d42a95cbcfbc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -645,7 +645,9 @@ static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *mem
struct kvm_s390_adapter_int {
u64 ind_addr;
+ u64 ind_gaddr;
u64 summary_addr;
+ u64 summary_gaddr;
u64 ind_offset;
u32 summary_offset;
u32 adapter_id;
@@ -1381,6 +1383,7 @@ bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
bool writable);
@@ -1398,6 +1401,13 @@ static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
return __kvm_vcpu_map(vcpu, gpa, map, false);
}
+static inline void kvm_vcpu_map_mark_dirty(struct kvm_vcpu *vcpu,
+ struct kvm_host_map *map)
+{
+ if (kvm_vcpu_mapped(map))
+ kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
+}
+
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -1410,7 +1420,6 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data
int offset, int len);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
-void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
/**
* kvm_gpc_init - initialize gfn_to_pfn_cache.
@@ -1749,10 +1758,17 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
#ifdef CONFIG_GUEST_PERF_EVENTS
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
-void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void __kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void),
+ void (*mediated_pmi_handler)(void));
+
+static inline void kvm_register_perf_callbacks(void)
+{
+ __kvm_register_perf_callbacks(NULL, NULL);
+}
+
void kvm_unregister_perf_callbacks(void);
#else
-static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_register_perf_callbacks(void) {}
static inline void kvm_unregister_perf_callbacks(void) {}
#endif /* CONFIG_GUEST_PERF_EVENTS */
@@ -2566,7 +2582,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
* @gfn: starting GFN to be populated
* @src: userspace-provided buffer containing data to copy into GFN range
* (passed to @post_populate, and incremented on each iteration
- * if not NULL)
+ * if not NULL). Must be page-aligned.
* @npages: number of pages to copy from userspace-buffer
* @post_populate: callback to issue for each gmem page that backs the GPA
* range
@@ -2581,7 +2597,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
* Returns the number of pages that were populated.
*/
typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, int order, void *opaque);
+ struct page *page, void *opaque);
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 39534fafa36a..00346ce3af5e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -46,13 +46,12 @@
/*
* Quirk flags bits.
- * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ * ata_device->quirks is an u64, so __ATA_QUIRK_MAX must not exceed 64.
*/
enum ata_quirks {
__ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
__ATA_QUIRK_NODMA, /* DMA problems */
__ATA_QUIRK_NONCQ, /* Don't use NCQ */
- __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
__ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
__ATA_QUIRK_DISABLE, /* Disable it */
__ATA_QUIRK_HPA_SIZE, /* Native size off by one */
@@ -74,8 +73,7 @@ enum ata_quirks {
__ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
__ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
__ATA_QUIRK_NOTRIM, /* Do not use TRIM */
- __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
- __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */
+ __ATA_QUIRK_MAX_SEC, /* Limit max sectors */
__ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
__ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
__ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */
@@ -91,38 +89,36 @@ enum ata_quirks {
* Some quirks may be drive/controller pair dependent.
*/
enum {
- ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
- ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
- ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
- ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
- ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
- ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
- ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
- ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
- ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
- ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
- ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
- ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
- ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
- ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
- ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
- ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
- ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
- ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
- ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
- ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
- ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
- ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
- ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
- ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
- ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
- ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191),
- ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
- ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
- ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
- ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
- ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
- ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+ ATA_QUIRK_DIAGNOSTIC = BIT_ULL(__ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = BIT_ULL(__ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = BIT_ULL(__ATA_QUIRK_NONCQ),
+ ATA_QUIRK_BROKEN_HPA = BIT_ULL(__ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = BIT_ULL(__ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = BIT_ULL(__ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = BIT_ULL(__ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = BIT_ULL(__ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = BIT_ULL(__ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = BIT_ULL(__ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = BIT_ULL(__ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = BIT_ULL(__ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = BIT_ULL(__ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = BIT_ULL(__ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = BIT_ULL(__ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = BIT_ULL(__ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = BIT_ULL(__ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = BIT_ULL(__ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = BIT_ULL(__ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = BIT_ULL(__ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = BIT_ULL(__ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = BIT_ULL(__ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = BIT_ULL(__ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC = BIT_ULL(__ATA_QUIRK_MAX_SEC),
+ ATA_QUIRK_MAX_TRIM_128M = BIT_ULL(__ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = BIT_ULL(__ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_LPM_ON_ATI = BIT_ULL(__ATA_QUIRK_NO_LPM_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = BIT_ULL(__ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = BIT_ULL(__ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = BIT_ULL(__ATA_QUIRK_NO_FUA),
};
enum {
@@ -723,7 +719,7 @@ struct ata_cdl {
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int quirks; /* List of broken features */
+ u64 quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -903,6 +899,9 @@ struct ata_port {
u64 qc_active;
int nr_active_links; /* #links with active qcs */
+ struct work_struct deferred_qc_work;
+ struct ata_queued_cmd *deferred_qc;
+
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -1150,7 +1149,8 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
#else
#define ATA_SCSI_COMPAT_IOCTL /* empty */
#endif
-extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+extern enum scsi_qc_status ata_scsi_queuecmd(struct Scsi_Host *h,
+ struct scsi_cmnd *cmd);
#if IS_REACHABLE(CONFIG_ATA)
bool ata_scsi_dma_need_drain(struct request *rq);
#else
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index e37699b7e839..1e727a2cb4c1 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -33,14 +33,6 @@ struct linux_logo {
extern const struct linux_logo logo_linux_mono;
extern const struct linux_logo logo_linux_vga16;
extern const struct linux_logo logo_linux_clut224;
-extern const struct linux_logo logo_dec_clut224;
-extern const struct linux_logo logo_mac_clut224;
-extern const struct linux_logo logo_parisc_clut224;
-extern const struct linux_logo logo_sgi_clut224;
-extern const struct linux_logo logo_sun_clut224;
-extern const struct linux_logo logo_superh_mono;
-extern const struct linux_logo logo_superh_vga16;
-extern const struct linux_logo logo_superh_clut224;
extern const struct linux_logo logo_spe_clut224;
extern const struct linux_logo *fb_find_logo(int depth);
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ae1b541446c9..df9eebe6afca 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -144,11 +144,13 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
}
static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ __acquires(__bitlock(0, b))
{
bit_spin_lock(0, (unsigned long *)b);
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ __releases(__bitlock(0, b))
{
__bit_spin_unlock(0, (unsigned long *)b);
}
diff --git a/include/linux/list_private.h b/include/linux/list_private.h
new file mode 100644
index 000000000000..19b01d16beda
--- /dev/null
+++ b/include/linux/list_private.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef _LINUX_LIST_PRIVATE_H
+#define _LINUX_LIST_PRIVATE_H
+
+/**
+ * DOC: Private List Primitives
+ *
+ * Provides a set of list primitives identical in function to those in
+ * ``<linux/list.h>``, but designed for cases where the embedded
+ * ``&struct list_head`` is private member.
+ */
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+
+#define __list_private_offset(type, member) \
+ ((size_t)(&ACCESS_PRIVATE(((type *)0), member)))
+
+/**
+ * list_private_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the identifier passed to ACCESS_PRIVATE.
+ */
+#define list_private_entry(ptr, type, member) ({ \
+ const struct list_head *__mptr = (ptr); \
+ (type *)((char *)__mptr - __list_private_offset(type, member)); \
+})
+
+/**
+ * list_private_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the identifier passed to ACCESS_PRIVATE.
+ */
+#define list_private_first_entry(ptr, type, member) \
+ list_private_entry((ptr)->next, type, member)
+
+/**
+ * list_private_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the identifier passed to ACCESS_PRIVATE.
+ */
+#define list_private_last_entry(ptr, type, member) \
+ list_private_entry((ptr)->prev, type, member)
+
+/**
+ * list_private_next_entry - get the next element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_next_entry(pos, member) \
+ list_private_entry(ACCESS_PRIVATE(pos, member).next, typeof(*(pos)), member)
+
+/**
+ * list_private_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_private_next_entry_circular(pos, head, member) \
+ (list_is_last(&ACCESS_PRIVATE(pos, member), head) ? \
+ list_private_first_entry(head, typeof(*(pos)), member) : \
+ list_private_next_entry(pos, member))
+
+/**
+ * list_private_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_prev_entry(pos, member) \
+ list_private_entry(ACCESS_PRIVATE(pos, member).prev, typeof(*(pos)), member)
+
+/**
+ * list_private_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_private_prev_entry_circular(pos, head, member) \
+ (list_is_first(&ACCESS_PRIVATE(pos, member), head) ? \
+ list_private_last_entry(head, typeof(*(pos)), member) : \
+ list_private_prev_entry(pos, member))
+
+/**
+ * list_private_entry_is_head - test if the entry points to the head of the list
+ * @pos: the type * to cursor
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_entry_is_head(pos, head, member) \
+ list_is_head(&ACCESS_PRIVATE(pos, member), (head))
+
+/**
+ * list_private_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_for_each_entry(pos, head, member) \
+ for (pos = list_private_first_entry(head, typeof(*pos), member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_next_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_private_last_entry(head, typeof(*pos), member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_prev_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_private_for_each_entry_continue(pos, head, member) \
+ for (pos = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_next_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_private_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_private_prev_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_prev_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_from - iterate over list of given type from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing from current position.
+ */
+#define list_private_for_each_entry_from(pos, head, member) \
+ for (; !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_next_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_from_reverse - iterate backwards over list of given type
+ * from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, continuing from current position.
+ */
+#define list_private_for_each_entry_from_reverse(pos, head, member) \
+ for (; !list_private_entry_is_head(pos, head, member); \
+ pos = list_private_prev_entry(pos, member))
+
+/**
+ * list_private_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_private_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_private_first_entry(head, typeof(*pos), member), \
+ n = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_next_entry(n, member))
+
+/**
+ * list_private_for_each_entry_safe_continue - continue list iteration safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing after current point,
+ * safe against removal of list entry.
+ */
+#define list_private_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_private_next_entry(pos, member), \
+ n = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_next_entry(n, member))
+
+/**
+ * list_private_for_each_entry_safe_from - iterate over list from current point safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type from current point, safe against
+ * removal of list entry.
+ */
+#define list_private_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_private_next_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_next_entry(n, member))
+
+/**
+ * list_private_for_each_entry_safe_reverse - iterate backwards over list safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, safe against removal
+ * of list entry.
+ */
+#define list_private_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_private_last_entry(head, typeof(*pos), member), \
+ n = list_private_prev_entry(pos, member); \
+ !list_private_entry_is_head(pos, head, member); \
+ pos = n, n = list_private_prev_entry(n, member))
+
+/**
+ * list_private_safe_reset_next - reset a stale list_for_each_entry_safe loop
+ * @pos: the loop cursor used in the list_for_each_entry_safe loop
+ * @n: temporary storage used in list_for_each_entry_safe
+ * @member: the name of the list_head within the struct.
+ *
+ * list_safe_reset_next is not safe to use in general if the list may be
+ * modified concurrently (eg. the lock is dropped in the loop body). An
+ * exception to this is if the cursor element (pos) is pinned in the list,
+ * and list_safe_reset_next is called after re-taking the lock and before
+ * completing the current iteration of the loop body.
+ */
+#define list_private_safe_reset_next(pos, n, member) \
+ n = list_private_next_entry(pos, member)
+
+#endif /* _LINUX_LIST_PRIVATE_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 772919e8096a..ba9e3988c07c 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -175,6 +175,9 @@ int klp_enable_patch(struct klp_patch *);
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
+void *klp_find_section_by_name(const struct module *mod, const char *name,
+ size_t *sec_size);
+
void klp_copy_process(struct task_struct *child);
void klp_update_patch_state(struct task_struct *task);
diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h
index a7f6ee5b6771..fe82a6c3005f 100644
--- a/include/linux/liveupdate.h
+++ b/include/linux/liveupdate.h
@@ -11,10 +11,13 @@
#include <linux/compiler.h>
#include <linux/kho/abi/luo.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/types.h>
#include <uapi/linux/liveupdate.h>
struct liveupdate_file_handler;
+struct liveupdate_flb;
+struct liveupdate_session;
struct file;
/**
@@ -99,6 +102,118 @@ struct liveupdate_file_handler {
* registered file handlers.
*/
struct list_head __private list;
+ /* A list of FLB dependencies. */
+ struct list_head __private flb_list;
+};
+
+/**
+ * struct liveupdate_flb_op_args - Arguments for FLB operation callbacks.
+ * @flb: The global FLB instance for which this call is performed.
+ * @data: For .preserve(): [OUT] The callback sets this field.
+ * For .unpreserve(): [IN] The handle from .preserve().
+ * For .retrieve(): [IN] The handle from .preserve().
+ * @obj: For .preserve(): [OUT] Sets this to the live object.
+ * For .retrieve(): [OUT] Sets this to the live object.
+ * For .finish(): [IN] The live object from .retrieve().
+ *
+ * This structure bundles all parameters for the FLB operation callbacks.
+ */
+struct liveupdate_flb_op_args {
+ struct liveupdate_flb *flb;
+ u64 data;
+ void *obj;
+};
+
+/**
+ * struct liveupdate_flb_ops - Callbacks for global File-Lifecycle-Bound data.
+ * @preserve: Called when the first file using this FLB is preserved.
+ * The callback must save its state and return a single,
+ * self-contained u64 handle by setting the 'argp->data'
+ * field and 'argp->obj'.
+ * @unpreserve: Called when the last file using this FLB is unpreserved
+ * (aborted before reboot). Receives the handle via
+ * 'argp->data' and live object via 'argp->obj'.
+ * @retrieve: Called on-demand in the new kernel, the first time a
+ * component requests access to the shared object. It receives
+ * the preserved handle via 'argp->data' and must reconstruct
+ * the live object, returning it by setting the 'argp->obj'
+ * field.
+ * @finish: Called in the new kernel when the last file using this FLB
+ * is finished. Receives the live object via 'argp->obj' for
+ * cleanup.
+ * @owner: Module reference
+ *
+ * Operations that manage global shared data with file bound lifecycle,
+ * triggered by the first file that uses it and concluded by the last file that
+ * uses it, across all sessions.
+ */
+struct liveupdate_flb_ops {
+ int (*preserve)(struct liveupdate_flb_op_args *argp);
+ void (*unpreserve)(struct liveupdate_flb_op_args *argp);
+ int (*retrieve)(struct liveupdate_flb_op_args *argp);
+ void (*finish)(struct liveupdate_flb_op_args *argp);
+ struct module *owner;
+};
+
+/*
+ * struct luo_flb_private_state - Private FLB state structures.
+ * @count: The number of preserved files currently depending on this FLB.
+ * This is used to trigger the preserve/unpreserve/finish ops on the
+ * first/last file.
+ * @data: The opaque u64 handle returned by .preserve() or passed to
+ * .retrieve().
+ * @obj: The live kernel object returned by .preserve() or .retrieve().
+ * @lock: A mutex that protects all fields within this structure, providing
+ * the synchronization service for the FLB's ops.
+ * @finished: True once the FLB's finish() callback has run.
+ * @retrieved: True once the FLB's retrieve() callback has run.
+ */
+struct luo_flb_private_state {
+ long count;
+ u64 data;
+ void *obj;
+ struct mutex lock;
+ bool finished;
+ bool retrieved;
+};
+
+/*
+ * struct luo_flb_private - Keep separate incoming and outgoing states.
+ * @list: A global list of registered FLBs.
+ * @outgoing: The runtime state for the pre-reboot
+ * (preserve/unpreserve) lifecycle.
+ * @incoming: The runtime state for the post-reboot (retrieve/finish)
+ * lifecycle.
+ * @users: With how many File-Handlers this FLB is registered.
+ * @initialized: true when private fields have been initialized.
+ */
+struct luo_flb_private {
+ struct list_head list;
+ struct luo_flb_private_state outgoing;
+ struct luo_flb_private_state incoming;
+ int users;
+ bool initialized;
+};
+
+/**
+ * struct liveupdate_flb - A global definition for a shared data object.
+ * @ops: Callback functions
+ * @compatible: The compatibility string (e.g., "iommu-core-v1"
+ * that uniquely identifies the FLB type this handler
+ * supports. This is matched against the compatible string
+ * associated with individual &struct liveupdate_flb
+ * instances.
+ *
+ * This struct is the "template" that a driver registers to define a shared,
+ * file-lifecycle-bound object. The actual runtime state (the live object,
+ * refcount, etc.) is managed privately by the LUO core.
+ */
+struct liveupdate_flb {
+ const struct liveupdate_flb_ops *ops;
+ const char compatible[LIVEUPDATE_FLB_COMPAT_LENGTH];
+
+ /* private: */
+ struct luo_flb_private __private private;
};
#ifdef CONFIG_LIVEUPDATE
@@ -112,6 +227,14 @@ int liveupdate_reboot(void);
int liveupdate_register_file_handler(struct liveupdate_file_handler *fh);
int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh);
+int liveupdate_register_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb);
+int liveupdate_unregister_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb);
+
+int liveupdate_flb_get_incoming(struct liveupdate_flb *flb, void **objp);
+int liveupdate_flb_get_outgoing(struct liveupdate_flb *flb, void **objp);
+
#else /* CONFIG_LIVEUPDATE */
static inline bool liveupdate_enabled(void)
@@ -134,5 +257,29 @@ static inline int liveupdate_unregister_file_handler(struct liveupdate_file_hand
return -EOPNOTSUPP;
}
+static inline int liveupdate_register_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_unregister_flb(struct liveupdate_file_handler *fh,
+ struct liveupdate_flb *flb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_flb_get_incoming(struct liveupdate_flb *flb,
+ void **objp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_flb_get_outgoing(struct liveupdate_flb *flb,
+ void **objp)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_LIVEUPDATE */
#endif /* _LINUX_LIVEUPDATE_H */
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index b0e6ab329b00..b8830148a859 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -14,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
+#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
+#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -29,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(this_cpu_ptr(lock), flags)
+ __local_lock_irqsave(__this_cpu_local_lock(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
+#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
+#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -50,7 +50,7 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+ __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags)
/**
* local_trylock_init - Runtime initialize a lock instance
@@ -66,7 +66,7 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock))
#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
@@ -81,27 +81,44 @@
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock_irqsave(lock, flags) \
- __local_trylock_irqsave(this_cpu_ptr(lock), flags)
-
-DEFINE_GUARD(local_lock, local_lock_t __percpu*,
- local_lock(_T),
- local_unlock(_T))
-DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
- local_lock_irq(_T),
- local_unlock_irq(_T))
+ __local_trylock_irqsave(__this_cpu_local_lock(lock), flags)
+
+DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu,
+ local_lock(_T->lock),
+ local_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu,
+ local_lock_irq(_T->lock),
+ local_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
local_lock_irqsave(_T->lock, _T->flags),
local_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#define local_lock_nested_bh(_lock) \
- __local_lock_nested_bh(this_cpu_ptr(_lock))
+ __local_lock_nested_bh(__this_cpu_local_lock(_lock))
#define local_unlock_nested_bh(_lock) \
- __local_unlock_nested_bh(this_cpu_ptr(_lock))
-
-DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
- local_lock_nested_bh(_T),
- local_unlock_nested_bh(_T))
+ __local_unlock_nested_bh(__this_cpu_local_lock(_lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu,
+ local_lock_nested_bh(_T->lock),
+ local_unlock_nested_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T))
+#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T)
+
+DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T))
+#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T)
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8f82b4eb542f..eff711bf973f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -4,25 +4,30 @@
#endif
#include <linux/percpu-defs.h>
+#include <linux/irqflags.h>
#include <linux/lockdep.h>
+#include <linux/debug_locks.h>
+#include <asm/current.h>
#ifndef CONFIG_PREEMPT_RT
-typedef struct {
+context_lock_struct(local_lock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
-} local_lock_t;
+};
+typedef struct local_lock local_lock_t;
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
-typedef struct {
+context_lock_struct(local_trylock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
u8 acquired;
-} local_trylock_t;
+};
+typedef struct local_trylock local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@@ -84,7 +89,10 @@ do { \
local_lock_debug_init(lock); \
} while (0)
-#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+#define __local_trylock_init(lock) \
+do { \
+ __local_lock_init((local_lock_t *)lock); \
+} while (0)
#define __spinlock_nested_bh_init(lock) \
do { \
@@ -117,22 +125,25 @@ do { \
do { \
preempt_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
preempt_disable(); \
@@ -146,10 +157,10 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
local_irq_save(flags); \
@@ -163,7 +174,7 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
/* preemption or migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
@@ -186,18 +197,21 @@ do { \
#define __local_unlock(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
@@ -206,13 +220,20 @@ do { \
do { \
lockdep_assert_in_softirq(); \
local_lock_acquire((lock)); \
+ __acquire(lock); \
} while (0)
#define __local_unlock_nested_bh(lock) \
- local_lock_release((lock))
+ do { \
+ __release(lock); \
+ local_lock_release((lock)); \
+ } while (0)
#else /* !CONFIG_PREEMPT_RT */
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
@@ -267,7 +288,7 @@ do { \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, context_unsafe(({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
@@ -279,17 +300,40 @@ do { \
migrate_enable(); \
} \
__locked; \
- })
+ })))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
typecheck(unsigned long, flags); \
flags = 0; \
__local_trylock(lock); \
- })
+ }))
/* migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#endif /* CONFIG_PREEMPT_RT */
+
+#if defined(WARN_CONTEXT_ANALYSIS)
+/*
+ * Because the compiler only knows about the base per-CPU variable, use this
+ * helper function to make the compiler think we lock/unlock the @base variable,
+ * and hide the fact we actually pass the per-CPU instance to lock/unlock
+ * functions.
+ */
+static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#ifndef CONFIG_PREEMPT_RT
+static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#endif /* CONFIG_PREEMPT_RT */
+#else /* WARN_CONTEXT_ANALYSIS */
+#define __this_cpu_local_lock(base) this_cpu_ptr(base)
+#endif /* WARN_CONTEXT_ANALYSIS */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index dd634103b014..621566345406 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -282,16 +282,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
#define lockdep_assert_held(l) \
- lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+ do { lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_not_held(l) \
lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
#define lockdep_assert_held_write(l) \
- lockdep_assert(lockdep_is_held_type(l, 0))
+ do { lockdep_assert(lockdep_is_held_type(l, 0)); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_held_read(l) \
- lockdep_assert(lockdep_is_held_type(l, 1))
+ do { lockdep_assert(lockdep_is_held_type(l, 1)); __assume_shared_ctx_lock(l); } while (0)
#define lockdep_assert_held_once(l) \
lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
@@ -389,10 +389,10 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert(c) do { } while (0)
#define lockdep_assert_once(c) do { } while (0)
-#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held(l) __assume_ctx_lock(l)
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) __assume_ctx_lock(l)
+#define lockdep_assert_held_read(l) __assume_shared_ctx_lock(l)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 815d871fadfc..6ded24cdb4a8 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -49,9 +49,7 @@ static inline void lockref_init(struct lockref *lockref)
void lockref_get(struct lockref *lockref);
int lockref_put_return(struct lockref *lockref);
bool lockref_get_not_zero(struct lockref *lockref);
-bool lockref_put_or_lock(struct lockref *lockref);
-#define lockref_put_or_lock(_lockref) \
- (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+bool lockref_put_or_lock(struct lockref *lockref) __cond_acquires(false, &lockref->lock);
void lockref_mark_dead(struct lockref *lockref);
bool lockref_get_not_dead(struct lockref *lockref);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 2eac3fc9303d..e17ceb32e0c9 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -44,7 +44,7 @@ int __ilog2_u64(u64 n)
static __always_inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
- return (n != 0 && ((n & (n - 1)) == 0));
+ return n - 1 < (n ^ (n - 1));
}
/**
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index b92008641242..d48bf0ad26f4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -73,7 +73,7 @@ struct lsm_static_calls_table {
/**
* struct lsm_id - Identify a Linux Security Module.
- * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @name: name of the LSM, must be approved by the LSM maintainers
* @id: LSM ID number from uapi/linux/lsm.h
*
* Contains the information that identifies the LSM.
@@ -164,7 +164,7 @@ enum lsm_order {
* @initcall_core: LSM callback for core_initcall() setup, optional
* @initcall_subsys: LSM callback for subsys_initcall() setup, optional
* @initcall_fs: LSM callback for fs_initcall setup, optional
- * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_device: LSM callback for device_initcall() setup, optional
* @initcall_late: LSM callback for late_initcall() setup, optional
*/
struct lsm_info {
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index e1555e06e7e5..07c1bfbdb8c4 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -70,14 +70,33 @@ struct cmdq_cb_data {
struct cmdq_pkt *pkt;
};
+struct cmdq_mbox_priv {
+ u8 shift_pa;
+ dma_addr_t mminfra_offset;
+};
+
struct cmdq_pkt {
void *va_base;
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
+ struct cmdq_mbox_priv priv; /* for generating instruction */
};
/**
+ * cmdq_get_mbox_priv() - get the private data of mailbox channel
+ * @chan: mailbox channel
+ * @priv: pointer to store the private data of mailbox channel
+ *
+ * While generating the GCE instruction to command buffer, the private data
+ * of GCE hardware may need to be referenced, such as the shift bits of
+ * physical address.
+ *
+ * This function should be called before generating the GCE instruction.
+ */
+void cmdq_get_mbox_priv(struct mbox_chan *chan, struct cmdq_mbox_priv *priv);
+
+/**
* cmdq_get_shift_pa() - get the shift bits of physical address
* @chan: mailbox channel
*
diff --git a/include/linux/mailbox/mtk-vcp-mailbox.h b/include/linux/mailbox/mtk-vcp-mailbox.h
new file mode 100644
index 000000000000..16e59d6780a7
--- /dev/null
+++ b/include/linux/mailbox/mtk-vcp-mailbox.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ */
+
+#ifndef __MTK_VCP_MAILBOX_H__
+#define __MTK_VCP_MAILBOX_H__
+
+#define MTK_VCP_MBOX_SLOT_MAX_SIZE 0x100 /* mbox max slot size */
+
+/**
+ * struct mtk_ipi_info - mailbox message info for mtk-vcp-mailbox
+ * @msg: The share buffer between IPC and mailbox driver
+ * @len: Message length
+ * @id: This is for identification purposes and not actually used
+ * by the mailbox hardware.
+ * @index: The signal number of the mailbox message.
+ * @slot_ofs: Data slot offset.
+ * @irq_status: Captures incoming signals for the RX path.
+ *
+ * It is used between IPC with mailbox driver.
+ */
+struct mtk_ipi_info {
+ void *msg;
+ u32 len;
+ u32 id;
+ u32 index;
+ u32 slot_ofs;
+ u32 irq_status;
+};
+
+#endif
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 66f98a3da8d8..7b8aad47121e 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -129,13 +129,6 @@ struct maple_arange_64 {
struct maple_metadata meta;
};
-struct maple_alloc {
- unsigned long total;
- unsigned char node_count;
- unsigned int request_count;
- struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
-};
-
struct maple_topiary {
struct maple_pnode *parent;
struct maple_enode *next; /* Overlaps the pivot */
@@ -306,7 +299,6 @@ struct maple_node {
};
struct maple_range_64 mr64;
struct maple_arange_64 ma64;
- struct maple_alloc alloc;
};
};
@@ -536,7 +528,6 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
void maple_tree_init(void);
void mas_destroy(struct ma_state *mas);
-int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
void *mas_prev(struct ma_state *mas, unsigned long min);
void *mas_prev_range(struct ma_state *mas, unsigned long max);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 42d6d47e445b..5d1203b9af20 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -29,7 +29,6 @@ struct mdio_device {
struct device dev;
struct mii_bus *bus;
- char modalias[MDIO_NAME_SIZE];
int (*bus_match)(struct device *dev, const struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
@@ -648,6 +647,19 @@ static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
mask, set);
}
+static inline int __mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
+ u16 regnum)
+{
+ return __mdiobus_c45_read(mdiodev->bus, mdiodev->addr, devad, regnum);
+}
+
+static inline int __mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
+ u16 regnum, u16 val)
+{
+ return __mdiobus_c45_write(mdiodev->bus, mdiodev->addr, devad, regnum,
+ val);
+}
+
static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
u32 regnum, u16 mask, u16 set)
{
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 221118b5a16e..6ec5e9ac0699 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -598,9 +598,9 @@ extern void *alloc_large_system_hash(const char *tablename,
*/
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
+extern bool hashdist; /* Distribute hashes across NUMA nodes? */
#else
-#define hashdist (0)
+#define hashdist (false)
#endif
#ifdef CONFIG_MEMTEST
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0651865a4564..1baee139999f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -65,7 +65,7 @@ struct mem_cgroup_reclaim_cookie {
#define MEM_CGROUP_ID_SHIFT 16
-struct mem_cgroup_id {
+struct mem_cgroup_private_id {
int id;
refcount_t ref;
};
@@ -191,7 +191,7 @@ struct mem_cgroup {
struct cgroup_subsys_state css;
/* Private memcg ID. Used to ID objects that outlive the cgroup */
- struct mem_cgroup_id id;
+ struct mem_cgroup_private_id id;
/* Accounted resources */
struct page_counter memory; /* Both v1 & v2 */
@@ -557,13 +557,15 @@ static inline bool mem_cgroup_disabled(void)
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
- unsigned long *low)
+ unsigned long *low,
+ unsigned long *usage)
{
- *min = *low = 0;
+ *min = *low = *usage = 0;
if (mem_cgroup_disabled())
return;
+ *usage = page_counter_read(&memcg->memory);
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
@@ -819,23 +821,21 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*)(struct task_struct *, void *), void *arg);
-static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
return 0;
return memcg->id.id;
}
-struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id);
-#ifdef CONFIG_SHRINKER_DEBUG
-static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
{
- return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+ return memcg ? cgroup_id(memcg->css.cgroup) : 0;
}
-struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
-#endif
+struct mem_cgroup *mem_cgroup_get_from_id(u64 id);
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
@@ -893,7 +893,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
return true;
- return !!(memcg->css.flags & CSS_ONLINE);
+ return css_is_online(&memcg->css);
}
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
@@ -919,8 +919,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -949,7 +947,11 @@ static inline void mod_memcg_page_state(struct page *page,
rcu_read_unlock();
}
+unsigned long memcg_events(struct mem_cgroup *memcg, int event);
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
+bool memcg_stat_item_valid(int idx);
+bool memcg_vm_event_item_valid(enum vm_event_item idx);
unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx);
@@ -1037,6 +1039,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return id;
}
+void mem_cgroup_flush_workqueue(void);
+
extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
@@ -1102,9 +1106,10 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
- unsigned long *low)
+ unsigned long *low,
+ unsigned long *usage)
{
- *min = *low = 0;
+ *min = *low = *usage = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1277,29 +1282,27 @@ static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
{
}
-static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
{
return 0;
}
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+static inline struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
{
WARN_ON_ONCE(id);
/* XXX: This should always return root_mem_cgroup */
return NULL;
}
-#ifdef CONFIG_SHRINKER_DEBUG
-static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
{
return 0;
}
-static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+static inline struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
{
return NULL;
}
-#endif
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
@@ -1328,11 +1331,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return 0;
}
-static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
-{
- return 0;
-}
-
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
@@ -1373,6 +1371,21 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
return 0;
}
+static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
+{
+ return 0;
+}
+
+static inline bool memcg_stat_item_valid(int idx)
+{
+ return false;
+}
+
+static inline bool memcg_vm_event_item_valid(enum vm_event_item idx)
+{
+ return false;
+}
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
@@ -1436,6 +1449,8 @@ static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
return 0;
}
+static inline void mem_cgroup_flush_workqueue(void) { }
+
static inline int mem_cgroup_init(void) { return 0; }
#endif /* CONFIG_MEMCG */
@@ -1727,7 +1742,7 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_virt(void *p);
static inline void count_objcg_events(struct obj_cgroup *objcg,
enum vm_event_item idx,
@@ -1799,7 +1814,7 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return -1;
}
-static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+static inline struct mem_cgroup *mem_cgroup_from_virt(void *p)
{
return NULL;
}
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index cc74de3dbcfe..c328a7b356d0 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -17,6 +17,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
* to by vm_flags_ptr.
*/
int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
+struct file *memfd_alloc_file(const char *name, unsigned int flags);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -31,6 +32,11 @@ static inline int memfd_check_seals_mmap(struct file *file,
{
return 0;
}
+
+static inline struct file *memfd_alloc_file(const char *name, unsigned int flags)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
index 7b5e11cf905f..d333dcdbeae7 100644
--- a/include/linux/memory-failure.h
+++ b/include/linux/memory-failure.h
@@ -4,8 +4,6 @@
#include <linux/interval_tree.h>
-struct pfn_address_space;
-
struct pfn_address_space {
struct interval_tree_node node;
struct address_space *mapping;
@@ -13,7 +11,18 @@ struct pfn_address_space {
unsigned long pfn, pgoff_t *pgoff);
};
+#ifdef CONFIG_MEMORY_FAILURE
int register_pfn_address_space(struct pfn_address_space *pfn_space);
void unregister_pfn_address_space(struct pfn_address_space *pfn_space);
+#else
+static inline int register_pfn_address_space(struct pfn_address_space *pfn_space)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void unregister_pfn_address_space(struct pfn_address_space *pfn_space)
+{
+}
+#endif /* CONFIG_MEMORY_FAILURE */
#endif /* _LINUX_MEMORY_FAILURE_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 713ec0435b48..e3c2ccf872a8 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -224,7 +224,8 @@ static inline bool is_fsdax_page(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page, unsigned int order);
+void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap,
+ unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
@@ -234,9 +235,11 @@ bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
-static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+static inline void zone_device_folio_init(struct folio *folio,
+ struct dev_pagemap *pgmap,
+ unsigned int order)
{
- zone_device_page_init(&folio->page, order);
+ zone_device_page_init(&folio->page, pgmap, order);
if (order)
folio_set_large_rmappable(folio);
}
diff --git a/include/linux/mfd/rohm-bd72720.h b/include/linux/mfd/rohm-bd72720.h
new file mode 100644
index 000000000000..ae7343bcab06
--- /dev/null
+++ b/include/linux/mfd/rohm-bd72720.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2025 ROHM Semiconductors.
+ *
+ * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ */
+
+#ifndef _MFD_BD72720_H
+#define _MFD_BD72720_H
+
+#include <linux/regmap.h>
+
+enum {
+ BD72720_BUCK1,
+ BD72720_BUCK2,
+ BD72720_BUCK3,
+ BD72720_BUCK4,
+ BD72720_BUCK5,
+ BD72720_BUCK6,
+ BD72720_BUCK7,
+ BD72720_BUCK8,
+ BD72720_BUCK9,
+ BD72720_BUCK10,
+ BD72720_BUCK11,
+ BD72720_LDO1,
+ BD72720_LDO2,
+ BD72720_LDO3,
+ BD72720_LDO4,
+ BD72720_LDO5,
+ BD72720_LDO6,
+ BD72720_LDO7,
+ BD72720_LDO8,
+ BD72720_LDO9,
+ BD72720_LDO10,
+ BD72720_LDO11,
+ BD72720_REGULATOR_AMOUNT,
+};
+
+/* BD72720 interrupts */
+#define BD72720_INT_LONGPUSH_MASK BIT(0)
+#define BD72720_INT_MIDPUSH_MASK BIT(1)
+#define BD72720_INT_SHORTPUSH_MASK BIT(2)
+#define BD72720_INT_PUSH_MASK BIT(3)
+#define BD72720_INT_HALL_DET_MASK BIT(4)
+#define BD72720_INT_HALL_TGL_MASK BIT(5)
+#define BD72720_INT_WDOG_MASK BIT(6)
+#define BD72720_INT_SWRESET_MASK BIT(7)
+#define BD72720_INT_SEQ_DONE_MASK BIT(0)
+#define BD72720_INT_PGFAULT_MASK BIT(4)
+#define BD72720_INT_BUCK1_DVS_MASK BIT(0)
+#define BD72720_INT_BUCK2_DVS_MASK BIT(1)
+#define BD72720_INT_BUCK3_DVS_MASK BIT(2)
+#define BD72720_INT_BUCK4_DVS_MASK BIT(3)
+#define BD72720_INT_BUCK5_DVS_MASK BIT(4)
+#define BD72720_INT_BUCK6_DVS_MASK BIT(5)
+#define BD72720_INT_BUCK7_DVS_MASK BIT(6)
+#define BD72720_INT_BUCK8_DVS_MASK BIT(7)
+#define BD72720_INT_BUCK9_DVS_MASK BIT(0)
+#define BD72720_INT_BUCK10_DVS_MASK BIT(1)
+#define BD72720_INT_LDO1_DVS_MASK BIT(4)
+#define BD72720_INT_LDO2_DVS_MASK BIT(5)
+#define BD72720_INT_LDO3_DVS_MASK BIT(6)
+#define BD72720_INT_LDO4_DVS_MASK BIT(7)
+#define BD72720_INT_VBUS_RMV_MASK BIT(0)
+#define BD72720_INT_VBUS_DET_MASK BIT(1)
+#define BD72720_INT_VBUS_MON_RES_MASK BIT(2)
+#define BD72720_INT_VBUS_MON_DET_MASK BIT(3)
+#define BD72720_INT_VSYS_MON_RES_MASK BIT(0)
+#define BD72720_INT_VSYS_MON_DET_MASK BIT(1)
+#define BD72720_INT_VSYS_UV_RES_MASK BIT(2)
+#define BD72720_INT_VSYS_UV_DET_MASK BIT(3)
+#define BD72720_INT_VSYS_LO_RES_MASK BIT(4)
+#define BD72720_INT_VSYS_LO_DET_MASK BIT(5)
+#define BD72720_INT_VSYS_OV_RES_MASK BIT(6)
+#define BD72720_INT_VSYS_OV_DET_MASK BIT(7)
+#define BD72720_INT_BAT_ILIM_MASK BIT(0)
+#define BD72720_INT_CHG_DONE_MASK BIT(1)
+#define BD72720_INT_EXTEMP_TOUT_MASK BIT(2)
+#define BD72720_INT_CHG_WDT_EXP_MASK BIT(3)
+#define BD72720_INT_BAT_MNT_OUT_MASK BIT(4)
+#define BD72720_INT_BAT_MNT_IN_MASK BIT(5)
+#define BD72720_INT_CHG_TRNS_MASK BIT(7)
+#define BD72720_INT_VBAT_MON_RES_MASK BIT(0)
+#define BD72720_INT_VBAT_MON_DET_MASK BIT(1)
+#define BD72720_INT_VBAT_SHT_RES_MASK BIT(2)
+#define BD72720_INT_VBAT_SHT_DET_MASK BIT(3)
+#define BD72720_INT_VBAT_LO_RES_MASK BIT(4)
+#define BD72720_INT_VBAT_LO_DET_MASK BIT(5)
+#define BD72720_INT_VBAT_OV_RES_MASK BIT(6)
+#define BD72720_INT_VBAT_OV_DET_MASK BIT(7)
+#define BD72720_INT_BAT_RMV_MASK BIT(0)
+#define BD72720_INT_BAT_DET_MASK BIT(1)
+#define BD72720_INT_DBAT_DET_MASK BIT(2)
+#define BD72720_INT_BAT_TEMP_TRNS_MASK BIT(3)
+#define BD72720_INT_LOBTMP_RES_MASK BIT(4)
+#define BD72720_INT_LOBTMP_DET_MASK BIT(5)
+#define BD72720_INT_OVBTMP_RES_MASK BIT(6)
+#define BD72720_INT_OVBTMP_DET_MASK BIT(7)
+#define BD72720_INT_OCUR1_RES_MASK BIT(0)
+#define BD72720_INT_OCUR1_DET_MASK BIT(1)
+#define BD72720_INT_OCUR2_RES_MASK BIT(2)
+#define BD72720_INT_OCUR2_DET_MASK BIT(3)
+#define BD72720_INT_OCUR3_RES_MASK BIT(4)
+#define BD72720_INT_OCUR3_DET_MASK BIT(5)
+#define BD72720_INT_CC_MON1_DET_MASK BIT(0)
+#define BD72720_INT_CC_MON2_DET_MASK BIT(1)
+#define BD72720_INT_CC_MON3_DET_MASK BIT(2)
+#define BD72720_INT_GPIO1_IN_MASK BIT(4)
+#define BD72720_INT_GPIO2_IN_MASK BIT(5)
+#define BD72720_INT_VF125_RES_MASK BIT(0)
+#define BD72720_INT_VF125_DET_MASK BIT(1)
+#define BD72720_INT_VF_RES_MASK BIT(2)
+#define BD72720_INT_VF_DET_MASK BIT(3)
+#define BD72720_INT_RTC0_MASK BIT(4)
+#define BD72720_INT_RTC1_MASK BIT(5)
+#define BD72720_INT_RTC2_MASK BIT(6)
+
+enum {
+ /*
+ * The IRQs excluding GPIO1 and GPIO2 are ordered in a same way as the
+ * respective IRQ bits in status and mask registers are ordered.
+ *
+ * The BD72720_INT_GPIO1_IN and BD72720_INT_GPIO2_IN are IRQs which can
+ * be used by other devices. Let's have GPIO1 and GPIO2 as first IRQs
+ * here so we can use the regmap-IRQ with standard device tree xlate
+ * while devices connected to the BD72720 IRQ input pins can refer to
+ * the first two interrupt numbers in their device tree. If we placed
+ * BD72720_INT_GPIO1_IN and BD72720_INT_GPIO2_IN after the CC_MON_DET
+ * interrupts (like they are in the registers), the devices using
+ * BD72720 as an IRQ parent should refer the interrupts starting with
+ * an offset which might not be trivial to understand.
+ */
+ BD72720_INT_GPIO1_IN,
+ BD72720_INT_GPIO2_IN,
+ BD72720_INT_LONGPUSH,
+ BD72720_INT_MIDPUSH,
+ BD72720_INT_SHORTPUSH,
+ BD72720_INT_PUSH,
+ BD72720_INT_HALL_DET,
+ BD72720_INT_HALL_TGL,
+ BD72720_INT_WDOG,
+ BD72720_INT_SWRESET,
+ BD72720_INT_SEQ_DONE,
+ BD72720_INT_PGFAULT,
+ BD72720_INT_BUCK1_DVS,
+ BD72720_INT_BUCK2_DVS,
+ BD72720_INT_BUCK3_DVS,
+ BD72720_INT_BUCK4_DVS,
+ BD72720_INT_BUCK5_DVS,
+ BD72720_INT_BUCK6_DVS,
+ BD72720_INT_BUCK7_DVS,
+ BD72720_INT_BUCK8_DVS,
+ BD72720_INT_BUCK9_DVS,
+ BD72720_INT_BUCK10_DVS,
+ BD72720_INT_LDO1_DVS,
+ BD72720_INT_LDO2_DVS,
+ BD72720_INT_LDO3_DVS,
+ BD72720_INT_LDO4_DVS,
+ BD72720_INT_VBUS_RMV,
+ BD72720_INT_VBUS_DET,
+ BD72720_INT_VBUS_MON_RES,
+ BD72720_INT_VBUS_MON_DET,
+ BD72720_INT_VSYS_MON_RES,
+ BD72720_INT_VSYS_MON_DET,
+ BD72720_INT_VSYS_UV_RES,
+ BD72720_INT_VSYS_UV_DET,
+ BD72720_INT_VSYS_LO_RES,
+ BD72720_INT_VSYS_LO_DET,
+ BD72720_INT_VSYS_OV_RES,
+ BD72720_INT_VSYS_OV_DET,
+ BD72720_INT_BAT_ILIM,
+ BD72720_INT_CHG_DONE,
+ BD72720_INT_EXTEMP_TOUT,
+ BD72720_INT_CHG_WDT_EXP,
+ BD72720_INT_BAT_MNT_OUT,
+ BD72720_INT_BAT_MNT_IN,
+ BD72720_INT_CHG_TRNS,
+ BD72720_INT_VBAT_MON_RES,
+ BD72720_INT_VBAT_MON_DET,
+ BD72720_INT_VBAT_SHT_RES,
+ BD72720_INT_VBAT_SHT_DET,
+ BD72720_INT_VBAT_LO_RES,
+ BD72720_INT_VBAT_LO_DET,
+ BD72720_INT_VBAT_OV_RES,
+ BD72720_INT_VBAT_OV_DET,
+ BD72720_INT_BAT_RMV,
+ BD72720_INT_BAT_DET,
+ BD72720_INT_DBAT_DET,
+ BD72720_INT_BAT_TEMP_TRNS,
+ BD72720_INT_LOBTMP_RES,
+ BD72720_INT_LOBTMP_DET,
+ BD72720_INT_OVBTMP_RES,
+ BD72720_INT_OVBTMP_DET,
+ BD72720_INT_OCUR1_RES,
+ BD72720_INT_OCUR1_DET,
+ BD72720_INT_OCUR2_RES,
+ BD72720_INT_OCUR2_DET,
+ BD72720_INT_OCUR3_RES,
+ BD72720_INT_OCUR3_DET,
+ BD72720_INT_CC_MON1_DET,
+ BD72720_INT_CC_MON2_DET,
+ BD72720_INT_CC_MON3_DET,
+ BD72720_INT_VF125_RES,
+ BD72720_INT_VF125_DET,
+ BD72720_INT_VF_RES,
+ BD72720_INT_VF_DET,
+ BD72720_INT_RTC0,
+ BD72720_INT_RTC1,
+ BD72720_INT_RTC2,
+};
+
+/*
+ * BD72720 Registers:
+ * The BD72720 has two sets of registers behind two different I2C slave
+ * addresses. "Common" registers being behind 0x4b, the charger registers
+ * being behind 0x4c.
+ */
+/* Registers behind I2C slave 0x4b */
+enum {
+ BD72720_REG_PRODUCT_ID,
+ BD72720_REG_MANUFACTURER_ID,
+ BD72720_REG_PMIC_REV_NUM,
+ BD72720_REG_NVM_REV_NUM,
+ BD72720_REG_BOOTSRC = 0x10,
+ BD72720_REG_RESETSRC_1,
+ BD72720_REG_RESETSRC_2,
+ BD72720_REG_RESETSRC_3,
+ BD72720_REG_RESETSRC_4,
+ BD72720_REG_RESETSRC_5,
+ BD72720_REG_RESETSRC_6,
+ BD72720_REG_RESETSRC_7,
+ BD72720_REG_POWER_STATE,
+ BD72720_REG_PS_CFG,
+ BD72720_REG_PS_CTRL_1,
+ BD72720_REG_PS_CTRL_2,
+ BD72720_REG_RCVCFG,
+ BD72720_REG_RCVNUM,
+ BD72720_REG_CRDCFG,
+ BD72720_REG_REX_CTRL,
+
+ BD72720_REG_BUCK1_ON,
+ BD72720_REG_BUCK1_MODE,
+ /* Deep idle vsel */
+ BD72720_REG_BUCK1_VSEL_DI,
+ /* Idle vsel */
+ BD72720_REG_BUCK1_VSEL_I,
+ /* Suspend vsel */
+ BD72720_REG_BUCK1_VSEL_S,
+ /* Run boot vsel */
+ BD72720_REG_BUCK1_VSEL_RB,
+ /* Run0 ... run3 vsel */
+ BD72720_REG_BUCK1_VSEL_RB0,
+ BD72720_REG_BUCK1_VSEL_RB1,
+ BD72720_REG_BUCK1_VSEL_RB2,
+ BD72720_REG_BUCK1_VSEL_RB3,
+
+ BD72720_REG_BUCK2_ON,
+ BD72720_REG_BUCK2_MODE,
+ BD72720_REG_BUCK2_VSEL_DI,
+ BD72720_REG_BUCK2_VSEL_I,
+ BD72720_REG_BUCK2_VSEL_S,
+ /* Run vsel */
+ BD72720_REG_BUCK2_VSEL_R,
+
+ BD72720_REG_BUCK3_ON,
+ BD72720_REG_BUCK3_MODE,
+ BD72720_REG_BUCK3_VSEL_DI,
+ BD72720_REG_BUCK3_VSEL_I,
+ BD72720_REG_BUCK3_VSEL_S,
+ BD72720_REG_BUCK3_VSEL_R,
+
+ BD72720_REG_BUCK4_ON,
+ BD72720_REG_BUCK4_MODE,
+ BD72720_REG_BUCK4_VSEL_DI,
+ BD72720_REG_BUCK4_VSEL_I,
+ BD72720_REG_BUCK4_VSEL_S,
+ BD72720_REG_BUCK4_VSEL_R,
+
+ BD72720_REG_BUCK5_ON,
+ BD72720_REG_BUCK5_MODE,
+ BD72720_REG_BUCK5_VSEL,
+
+ BD72720_REG_BUCK6_ON,
+ BD72720_REG_BUCK6_MODE,
+ BD72720_REG_BUCK6_VSEL,
+
+ BD72720_REG_BUCK7_ON,
+ BD72720_REG_BUCK7_MODE,
+ BD72720_REG_BUCK7_VSEL,
+
+ BD72720_REG_BUCK8_ON,
+ BD72720_REG_BUCK8_MODE,
+ BD72720_REG_BUCK8_VSEL,
+
+ BD72720_REG_BUCK9_ON,
+ BD72720_REG_BUCK9_MODE,
+ BD72720_REG_BUCK9_VSEL,
+
+ BD72720_REG_BUCK10_ON,
+ BD72720_REG_BUCK10_MODE,
+ BD72720_REG_BUCK10_VSEL,
+
+ BD72720_REG_LDO1_ON,
+ BD72720_REG_LDO1_MODE1,
+ BD72720_REG_LDO1_MODE2,
+ BD72720_REG_LDO1_VSEL_DI,
+ BD72720_REG_LDO1_VSEL_I,
+ BD72720_REG_LDO1_VSEL_S,
+ BD72720_REG_LDO1_VSEL_RB,
+ BD72720_REG_LDO1_VSEL_R0,
+ BD72720_REG_LDO1_VSEL_R1,
+ BD72720_REG_LDO1_VSEL_R2,
+ BD72720_REG_LDO1_VSEL_R3,
+
+ BD72720_REG_LDO2_ON,
+ BD72720_REG_LDO2_MODE,
+ BD72720_REG_LDO2_VSEL_DI,
+ BD72720_REG_LDO2_VSEL_I,
+ BD72720_REG_LDO2_VSEL_S,
+ BD72720_REG_LDO2_VSEL_R,
+
+ BD72720_REG_LDO3_ON,
+ BD72720_REG_LDO3_MODE,
+ BD72720_REG_LDO3_VSEL_DI,
+ BD72720_REG_LDO3_VSEL_I,
+ BD72720_REG_LDO3_VSEL_S,
+ BD72720_REG_LDO3_VSEL_R,
+
+ BD72720_REG_LDO4_ON,
+ BD72720_REG_LDO4_MODE,
+ BD72720_REG_LDO4_VSEL_DI,
+ BD72720_REG_LDO4_VSEL_I,
+ BD72720_REG_LDO4_VSEL_S,
+ BD72720_REG_LDO4_VSEL_R,
+
+ BD72720_REG_LDO5_ON,
+ BD72720_REG_LDO5_MODE,
+ BD72720_REG_LDO5_VSEL,
+
+ BD72720_REG_LDO6_ON,
+ BD72720_REG_LDO6_MODE,
+ BD72720_REG_LDO6_VSEL,
+
+ BD72720_REG_LDO7_ON,
+ BD72720_REG_LDO7_MODE,
+ BD72720_REG_LDO7_VSEL,
+
+ BD72720_REG_LDO8_ON,
+ BD72720_REG_LDO8_MODE,
+ BD72720_REG_LDO8_VSEL,
+
+ BD72720_REG_LDO9_ON,
+ BD72720_REG_LDO9_MODE,
+ BD72720_REG_LDO9_VSEL,
+
+ BD72720_REG_LDO10_ON,
+ BD72720_REG_LDO10_MODE,
+ BD72720_REG_LDO10_VSEL,
+
+ BD72720_REG_LDO11_ON,
+ BD72720_REG_LDO11_MODE,
+ BD72720_REG_LDO11_VSEL,
+
+ BD72720_REG_GPIO1_ON = 0x8b,
+ BD72720_REG_GPIO2_ON,
+ BD72720_REG_GPIO3_ON,
+ BD72720_REG_GPIO4_ON,
+ BD72720_REG_GPIO5_ON,
+
+ BD72720_REG_GPIO1_CTRL,
+ BD72720_REG_GPIO2_CTRL,
+#define BD72720_GPIO_IRQ_TYPE_MASK GENMASK(6, 4)
+#define BD72720_GPIO_IRQ_TYPE_FALLING 0x0
+#define BD72720_GPIO_IRQ_TYPE_RISING 0x1
+#define BD72720_GPIO_IRQ_TYPE_BOTH 0x2
+#define BD72720_GPIO_IRQ_TYPE_HIGH 0x3
+#define BD72720_GPIO_IRQ_TYPE_LOW 0x4
+ BD72720_REG_GPIO3_CTRL,
+ BD72720_REG_GPIO4_CTRL,
+ BD72720_REG_GPIO5_CTRL,
+#define BD72720_GPIO_DRIVE_MASK BIT(1)
+#define BD72720_GPIO_HIGH BIT(0)
+
+ BD72720_REG_EPDEN_CTRL,
+ BD72720_REG_GATECNT_CTRL,
+ BD72720_REG_LED_CTRL,
+
+ BD72720_REG_PWRON_CFG1,
+ BD72720_REG_PWRON_CFG2,
+
+ BD72720_REG_OUT32K,
+ BD72720_REG_CONF,
+ BD72720_REG_HALL_STAT,
+
+ BD72720_REG_RTC_SEC = 0xa0,
+#define BD72720_REG_RTC_START BD72720_REG_RTC_SEC
+ BD72720_REG_RTC_MIN,
+ BD72720_REG_RTC_HOUR,
+ BD72720_REG_RTC_WEEK,
+ BD72720_REG_RTC_DAY,
+ BD72720_REG_RTC_MON,
+ BD72720_REG_RTC_YEAR,
+
+ BD72720_REG_RTC_ALM0_SEC,
+#define BD72720_REG_RTC_ALM_START BD72720_REG_RTC_ALM0_SEC
+ BD72720_REG_RTC_ALM0_MIN,
+ BD72720_REG_RTC_ALM0_HOUR,
+ BD72720_REG_RTC_ALM0_WEEK,
+ BD72720_REG_RTC_ALM0_MON,
+ BD72720_REG_RTC_ALM0_YEAR,
+
+ BD72720_REG_RTC_ALM1_SEC,
+ BD72720_REG_RTC_ALM1_MIN,
+ BD72720_REG_RTC_ALM1_HOUR,
+ BD72720_REG_RTC_ALM1_WEEK,
+ BD72720_REG_RTC_ALM1_MON,
+ BD72720_REG_RTC_ALM1_YEAR,
+
+ BD72720_REG_RTC_ALM0_EN,
+ BD72720_REG_RTC_ALM1_EN,
+ BD72720_REG_RTC_ALM2,
+
+ BD72720_REG_INT_LVL1_EN = 0xc0,
+#define BD72720_MASK_LVL1_EN_ALL GENMASK(7, 0)
+ BD72720_REG_INT_PS1_EN,
+ BD72720_REG_INT_PS2_EN,
+ BD72720_REG_INT_DVS1_EN,
+ BD72720_REG_INT_DVS2_EN,
+ BD72720_REG_INT_VBUS_EN,
+ BD72720_REG_INT_VSYS_EN,
+ BD72720_REG_INT_CHG_EN,
+ BD72720_REG_INT_BAT1_EN,
+ BD72720_REG_INT_BAT2_EN,
+ BD72720_REG_INT_IBAT_EN,
+ BD72720_REG_INT_ETC1_EN,
+ BD72720_REG_INT_ETC2_EN,
+
+ /*
+ * The _STAT registers inform IRQ line state, and are used to ack IRQ.
+ * The _SRC registers below indicate current state of the function
+ * connected to the line.
+ */
+ BD72720_REG_INT_LVL1_STAT,
+ BD72720_REG_INT_PS1_STAT,
+ BD72720_REG_INT_PS2_STAT,
+ BD72720_REG_INT_DVS1_STAT,
+ BD72720_REG_INT_DVS2_STAT,
+ BD72720_REG_INT_VBUS_STAT,
+ BD72720_REG_INT_VSYS_STAT,
+ BD72720_REG_INT_CHG_STAT,
+ BD72720_REG_INT_BAT1_STAT,
+ BD72720_REG_INT_BAT2_STAT,
+ BD72720_REG_INT_IBAT_STAT,
+ BD72720_REG_INT_ETC1_STAT,
+ BD72720_REG_INT_ETC2_STAT,
+
+ BD72720_REG_INT_LVL1_SRC,
+ BD72720_REG_INT_PS1_SRC,
+ BD72720_REG_INT_PS2_SRC,
+ BD72720_REG_INT_DVS1_SRC,
+ BD72720_REG_INT_DVS2_SRC,
+ BD72720_REG_INT_VBUS_SRC,
+#define BD72720_MASK_DCIN_DET BIT(1)
+ BD72720_REG_INT_VSYS_SRC,
+ BD72720_REG_INT_CHG_SRC,
+ BD72720_REG_INT_BAT1_SRC,
+ BD72720_REG_INT_BAT2_SRC,
+ BD72720_REG_INT_IBAT_SRC,
+ BD72720_REG_INT_ETC1_SRC,
+ BD72720_REG_INT_ETC2_SRC,
+};
+
+/* Register masks */
+#define BD72720_MASK_DEEP_IDLE_EN BIT(0)
+#define BD72720_MASK_IDLE_EN BIT(1)
+#define BD72720_MASK_SUSPEND_EN BIT(2)
+#define BD72720_MASK_RUN_B_EN BIT(3)
+#define BD72720_MASK_RUN_0_EN BIT(4)
+#define BD72720_MASK_RUN_1_EN BIT(5)
+#define BD72720_MASK_RUN_2_EN BIT(6)
+#define BD72720_MASK_RUN_3_EN BIT(7)
+
+#define BD72720_MASK_RAMP_UP_DELAY GENMASK(7, 6)
+#define BD72720_MASK_BUCK_VSEL GENMASK(7, 0)
+#define BD72720_MASK_LDO12346_VSEL GENMASK(6, 0)
+#define BD72720_MASK_LDO_VSEL GENMASK(7, 0)
+
+#define BD72720_I2C4C_ADDR_OFFSET 0x100
+
+/* Registers behind I2C slave 0x4c */
+enum {
+ BD72720_REG_CHG_STATE = BD72720_I2C4C_ADDR_OFFSET,
+ BD72720_REG_CHG_LAST_STATE,
+ BD72720_REG_CHG_VBUS_STAT,
+ BD72720_REG_CHG_VSYS_STAT,
+ BD72720_REG_CHG_BAT_TEMP_STAT,
+ BD72720_REG_CHG_WDT_STAT,
+ BD72720_REG_CHG_ILIM_STAT,
+ BD72720_REG_CHG_CHG_STAT,
+ BD72720_REG_CHG_EN,
+ BD72720_REG_CHG_INIT,
+ BD72720_REG_CHG_CTRL,
+ BD72720_REG_CHG_SET_1,
+ BD72720_REG_CHG_SET_2,
+ BD72720_REG_CHG_SET_3,
+ BD72720_REG_CHG_VPRE,
+ BD72720_REG_CHG_VBAT_1,
+ BD72720_REG_CHG_VBAT_2,
+ BD72720_REG_CHG_VBAT_3,
+ BD72720_REG_CHG_VBAT_4,
+ BD72720_REG_CHG_BAT_SET_1,
+ BD72720_REG_CHG_BAT_SET_2,
+ BD72720_REG_CHG_BAT_SET_3,
+ BD72720_REG_CHG_IPRE,
+ BD72720_REG_CHG_IFST_TERM,
+ BD72720_REG_CHG_VSYS_REG,
+ BD72720_REG_CHG_VBUS_SET,
+ BD72720_REG_CHG_WDT_PRE,
+ BD72720_REG_CHG_WDT_FST,
+ BD72720_REG_CHG_LED_CTRL,
+ BD72720_REG_CHG_CFG_1,
+ BD72720_REG_CHG_IFST_1,
+ BD72720_REG_CHG_IFST_2,
+ BD72720_REG_CHG_IFST_3,
+ BD72720_REG_CHG_IFST_4,
+ BD72720_REG_CHG_S_CFG_1,
+ BD72720_REG_CHG_S_CFG_2,
+ BD72720_REG_RS_VBUS,
+ BD72720_REG_RS_IBUS,
+ BD72720_REG_RS_VSYS,
+ BD72720_REG_VSYS_STATE_STAT, /* 0x27 + offset*/
+
+ BD72720_REG_VM_VBAT_U = BD72720_I2C4C_ADDR_OFFSET + 0x30,
+ BD72720_REG_VM_VBAT_L,
+ BD72720_REG_VM_OCV_PRE_U,
+ BD72720_REG_VM_OCV_PRE_L,
+ BD72720_REG_VM_OCV_PST_U,
+ BD72720_REG_VM_OCV_PST_L,
+ BD72720_REG_VM_OCV_PWRON_U,
+ BD72720_REG_VM_OCV_PWRON_L,
+ BD72720_REG_VM_DVBAT_IMP_U,
+ BD72720_REG_VM_DVBAT_IMP_L,
+ BD72720_REG_VM_SA_VBAT_U,
+ BD72720_REG_VM_SA_VBAT_L,
+ BD72720_REG_VM_SA_VBAT_MIN_U,
+ BD72720_REG_VM_SA_VBAT_MIN_L,
+ BD72720_REG_VM_SA_VBAT_MAX_U,
+ BD72720_REG_VM_SA_VBAT_MAX_L,
+ BD72720_REG_REX_SA_VBAT_U,
+ BD72720_REG_REX_SA_VBAT_L,
+ BD72720_REG_VM_VSYS_U,
+ BD72720_REG_VM_VSYS_L,
+ BD72720_REG_VM_SA_VSYS_U,
+ BD72720_REG_VM_SA_VSYS_L,
+ BD72720_REG_VM_SA_VSYS_MIN_U,
+ BD72720_REG_VM_SA_VSYS_MIN_L,
+ BD72720_REG_VM_SA_VSYS_MAX_U,
+ BD72720_REG_VM_SA_VSYS_MAX_L,
+ BD72720_REG_VM_SA2_VSYS_U,
+ BD72720_REG_VM_SA2_VSYS_L,
+ BD72720_REG_VM_VBUS_U,
+#define BD72720_MASK_VDCIN_U GENMASK(3, 0)
+ BD72720_REG_VM_VBUS_L,
+ BD72720_REG_VM_BATID_U,
+ BD72720_REG_VM_BATID_L,
+ BD72720_REG_VM_BATID_NOLOAD_U,
+ BD72720_REG_VM_BATID_NOLOAD_L,
+ BD72720_REG_VM_BATID_OFS_U,
+ BD72720_REG_VM_BATID_OFS_L,
+ BD72720_REG_VM_VTH_U,
+ BD72720_REG_VM_VTH_L,
+ BD72720_REG_VM_VTH_CORR_U,
+ BD72720_REG_VM_VTH_CORR_L,
+ BD72720_REG_VM_BTMP_U,
+ BD72720_REG_VM_BTMP_L,
+ BD72720_REG_VM_BTMP_IMP_U,
+ BD72720_REG_VM_BTMP_IMP_L,
+ BD72720_REG_VM_VF_U,
+ BD72720_REG_VM_VF_L,
+ BD72720_REG_VM_BATID_TH_U,
+ BD72720_REG_VM_BATID_TH_L,
+ BD72720_REG_VM_BTMP_OV_THR,
+ BD72720_REG_VM_BTMP_OV_DUR,
+ BD72720_REG_VM_BTMP_LO_THR,
+ BD72720_REG_VM_BTMP_LO_DUR,
+ BD72720_REG_ALM_VBAT_TH_U,
+ BD72720_REG_ALM_VBAT_TH_L,
+ BD72720_REG_ALM_VSYS_TH,
+ BD72720_REG_ALM_VBUS_TH,
+ BD72720_REG_ALM_VF_TH,
+ BD72720_REG_VSYS_MAX,
+ BD72720_REG_VSYS_MIN,
+ BD72720_REG_VM_VSYS_SA_MINMAX_CTRL,
+ BD72720_REG_VM_SA_CFG, /* 0x6c + offset*/
+
+ BD72720_REG_CC_CURCD_U = BD72720_I2C4C_ADDR_OFFSET + 0x70,
+ BD72720_REG_CC_CURCD_L,
+ BD72720_REG_CC_CURCD_IMP_U,
+ BD72720_REG_CC_CURCD_IMP_L,
+ BD72720_REG_CC_SA_CURCD_U,
+ BD72720_REG_CC_SA_CURCD_L,
+ BD72720_REG_CC_OCUR_MON,
+ BD72720_REG_CC_CCNTD_3,
+ BD72720_REG_CC_CCNTD_2,
+ BD72720_REG_CC_CCNTD_1,
+ BD72720_REG_CC_CCNTD_0,
+ BD72720_REG_REX_CCNTD_3,
+ BD72720_REG_REX_CCNTD_2,
+ BD72720_REG_REX_CCNTD_1,
+ BD72720_REG_REX_CCNTD_0,
+ BD72720_REG_FULL_CCNTD_3,
+ BD72720_REG_FULL_CCNTD_2,
+ BD72720_REG_FULL_CCNTD_1,
+ BD72720_REG_FULL_CCNTD_0,
+ BD72720_REG_CCNTD_CHG_3,
+ BD72720_REG_CCNTD_CHG_2,
+ BD72720_REG_CC_STAT,
+ BD72720_REG_CC_CTRL,
+ BD72720_REG_CC_OCUR_THR_1,
+ BD72720_REG_CC_OCUR_THR_2,
+ BD72720_REG_CC_OCUR_THR_3,
+ BD72720_REG_REX_CURCD_TH,
+ BD72720_REG_CC_BATCAP1_TH_U,
+ BD72720_REG_CC_BATCAP1_TH_L,
+ BD72720_REG_CC_BATCAP2_TH_U,
+ BD72720_REG_CC_BATCAP2_TH_L,
+ BD72720_REG_CC_BATCAP3_TH_U,
+ BD72720_REG_CC_BATCAP3_TH_L,
+ BD72720_REG_CC_CCNTD_CTRL,
+ BD72720_REG_CC_SA_CFG, /* 0x92 + offset*/
+ BD72720_REG_IMPCHK_CTRL = BD72720_I2C4C_ADDR_OFFSET + 0xa0,
+};
+
+#endif /* __LINUX_MFD_BD72720_H */
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 579e8dcfcca4..0a284919a6c3 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -16,6 +16,7 @@ enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_BD71837,
ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_BD72720,
ROHM_CHIP_TYPE_BD96801,
ROHM_CHIP_TYPE_BD96802,
ROHM_CHIP_TYPE_BD96805,
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index d785e101fe79..4480c631110a 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -40,6 +40,7 @@ enum sec_device_type {
S2DOS05,
S2MPA01,
S2MPG10,
+ S2MPG11,
S2MPS11X,
S2MPS13X,
S2MPS14X,
@@ -69,7 +70,6 @@ struct sec_pmic_dev {
int device_type;
int irq;
- struct regmap_irq_chip_data *irq_data;
};
struct sec_platform_data {
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 8402a5f8e18a..6eab95de6fa8 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -166,6 +166,111 @@ enum s2mpg10_irq {
S2MPG10_IRQ_NR,
};
+enum s2mpg11_common_irq {
+ /* Top-level (common) block */
+ S2MPG11_COMMON_IRQ_PMIC,
+ S2MPG11_COMMON_IRQ_UNUSED,
+};
+
+enum s2mpg11_irq {
+ /* PMIC */
+ S2MPG11_IRQ_PWRONF,
+ S2MPG11_IRQ_PWRONR,
+ S2MPG11_IRQ_PIF_TIMEOUT_MIF,
+ S2MPG11_IRQ_PIF_TIMEOUTS,
+ S2MPG11_IRQ_WTSR,
+ S2MPG11_IRQ_SPD_ABNORMAL_STOP,
+ S2MPG11_IRQ_SPD_PARITY_ERR,
+#define S2MPG11_IRQ_PWRONF_MASK BIT(0)
+#define S2MPG11_IRQ_PWRONR_MASK BIT(1)
+#define S2MPG11_IRQ_PIF_TIMEOUT_MIF_MASK BIT(3)
+#define S2MPG11_IRQ_PIF_TIMEOUTS_MASK BIT(4)
+#define S2MPG11_IRQ_WTSR_MASK BIT(5)
+#define S2MPG11_IRQ_SPD_ABNORMAL_STOP_MASK BIT(6)
+#define S2MPG11_IRQ_SPD_PARITY_ERR_MASK BIT(7)
+
+ S2MPG11_IRQ_140C,
+ S2MPG11_IRQ_120C,
+ S2MPG11_IRQ_TSD,
+ S2MPG11_IRQ_WRST,
+ S2MPG11_IRQ_NTC_CYCLE_DONE,
+ S2MPG11_IRQ_PMETER_OVERF,
+#define S2MPG11_IRQ_INT140C_MASK BIT(0)
+#define S2MPG11_IRQ_INT120C_MASK BIT(1)
+#define S2MPG11_IRQ_TSD_MASK BIT(2)
+#define S2MPG11_IRQ_WRST_MASK BIT(5)
+#define S2MPG11_IRQ_NTC_CYCLE_DONE_MASK BIT(6)
+#define S2MPG11_IRQ_PMETER_OVERF_MASK BIT(7)
+
+ S2MPG11_IRQ_OCP_B1S,
+ S2MPG11_IRQ_OCP_B2S,
+ S2MPG11_IRQ_OCP_B3S,
+ S2MPG11_IRQ_OCP_B4S,
+ S2MPG11_IRQ_OCP_B5S,
+ S2MPG11_IRQ_OCP_B6S,
+ S2MPG11_IRQ_OCP_B7S,
+ S2MPG11_IRQ_OCP_B8S,
+#define S2MPG11_IRQ_OCP_B1S_MASK BIT(0)
+#define S2MPG11_IRQ_OCP_B2S_MASK BIT(1)
+#define S2MPG11_IRQ_OCP_B3S_MASK BIT(2)
+#define S2MPG11_IRQ_OCP_B4S_MASK BIT(3)
+#define S2MPG11_IRQ_OCP_B5S_MASK BIT(4)
+#define S2MPG11_IRQ_OCP_B6S_MASK BIT(5)
+#define S2MPG11_IRQ_OCP_B7S_MASK BIT(6)
+#define S2MPG11_IRQ_OCP_B8S_MASK BIT(7)
+
+ S2MPG11_IRQ_OCP_B9S,
+ S2MPG11_IRQ_OCP_B10S,
+ S2MPG11_IRQ_OCP_BDS,
+ S2MPG11_IRQ_OCP_BAS,
+ S2MPG11_IRQ_OCP_BBS,
+ S2MPG11_IRQ_WLWP_ACC,
+ S2MPG11_IRQ_SPD_SRP_PKT_RST,
+#define S2MPG11_IRQ_OCP_B9S_MASK BIT(0)
+#define S2MPG11_IRQ_OCP_B10S_MASK BIT(1)
+#define S2MPG11_IRQ_OCP_BDS_MASK BIT(2)
+#define S2MPG11_IRQ_OCP_BAS_MASK BIT(3)
+#define S2MPG11_IRQ_OCP_BBS_MASK BIT(4)
+#define S2MPG11_IRQ_WLWP_ACC_MASK BIT(5)
+#define S2MPG11_IRQ_SPD_SRP_PKT_RST_MASK BIT(7)
+
+ S2MPG11_IRQ_PWR_WARN_CH0,
+ S2MPG11_IRQ_PWR_WARN_CH1,
+ S2MPG11_IRQ_PWR_WARN_CH2,
+ S2MPG11_IRQ_PWR_WARN_CH3,
+ S2MPG11_IRQ_PWR_WARN_CH4,
+ S2MPG11_IRQ_PWR_WARN_CH5,
+ S2MPG11_IRQ_PWR_WARN_CH6,
+ S2MPG11_IRQ_PWR_WARN_CH7,
+#define S2MPG11_IRQ_PWR_WARN_CH0_MASK BIT(0)
+#define S2MPG11_IRQ_PWR_WARN_CH1_MASK BIT(1)
+#define S2MPG11_IRQ_PWR_WARN_CH2_MASK BIT(2)
+#define S2MPG11_IRQ_PWR_WARN_CH3_MASK BIT(3)
+#define S2MPG11_IRQ_PWR_WARN_CH4_MASK BIT(4)
+#define S2MPG11_IRQ_PWR_WARN_CH5_MASK BIT(5)
+#define S2MPG11_IRQ_PWR_WARN_CH6_MASK BIT(6)
+#define S2MPG11_IRQ_PWR_WARN_CH7_MASK BIT(7)
+
+ S2MPG11_IRQ_NTC_WARN_CH0,
+ S2MPG11_IRQ_NTC_WARN_CH1,
+ S2MPG11_IRQ_NTC_WARN_CH2,
+ S2MPG11_IRQ_NTC_WARN_CH3,
+ S2MPG11_IRQ_NTC_WARN_CH4,
+ S2MPG11_IRQ_NTC_WARN_CH5,
+ S2MPG11_IRQ_NTC_WARN_CH6,
+ S2MPG11_IRQ_NTC_WARN_CH7,
+#define S2MPG11_IRQ_NTC_WARN_CH0_MASK BIT(0)
+#define S2MPG11_IRQ_NTC_WARN_CH1_MASK BIT(1)
+#define S2MPG11_IRQ_NTC_WARN_CH2_MASK BIT(2)
+#define S2MPG11_IRQ_NTC_WARN_CH3_MASK BIT(3)
+#define S2MPG11_IRQ_NTC_WARN_CH4_MASK BIT(4)
+#define S2MPG11_IRQ_NTC_WARN_CH5_MASK BIT(5)
+#define S2MPG11_IRQ_NTC_WARN_CH6_MASK BIT(6)
+#define S2MPG11_IRQ_NTC_WARN_CH7_MASK BIT(7)
+
+ S2MPG11_IRQ_NR,
+};
+
enum s2mps11_irq {
S2MPS11_IRQ_PWRONF,
S2MPS11_IRQ_PWRONR,
diff --git a/include/linux/mfd/samsung/s2mpg10.h b/include/linux/mfd/samsung/s2mpg10.h
index 9f5919b89a3c..8e5cf21cbd5a 100644
--- a/include/linux/mfd/samsung/s2mpg10.h
+++ b/include/linux/mfd/samsung/s2mpg10.h
@@ -290,6 +290,30 @@ enum s2mpg10_pmic_reg {
S2MPG10_PMIC_LDO_SENSE4,
};
+/* Rail controlled externally, based on PCTRLSELx */
+#define S2MPG10_PMIC_CTRL_ENABLE_EXT BIT(0)
+
+/* For S2MPG10_PMIC_PCTRLSELx */
+#define S2MPG10_PCTRLSEL_PWREN 0x1 /* PWREN pin */
+#define S2MPG10_PCTRLSEL_PWREN_TRG 0x2 /* PWREN_TRG bit in MIMICKING_CTRL */
+#define S2MPG10_PCTRLSEL_PWREN_MIF 0x3 /* PWREN_MIF pin */
+#define S2MPG10_PCTRLSEL_PWREN_MIF_TRG 0x4 /* PWREN_MIF_TRG bit in MIMICKING_CTRL */
+#define S2MPG10_PCTRLSEL_AP_ACTIVE_N 0x5 /* ~AP_ACTIVE_N pin */
+#define S2MPG10_PCTRLSEL_AP_ACTIVE_N_TRG 0x6 /* ~AP_ACTIVE_N_TRG bit in MIMICKING_CTRL */
+#define S2MPG10_PCTRLSEL_CPUCL1_EN 0x7 /* CPUCL1_EN pin */
+#define S2MPG10_PCTRLSEL_CPUCL1_EN2 0x8 /* CPUCL1_EN & PWREN pins */
+#define S2MPG10_PCTRLSEL_CPUCL2_EN 0x9 /* CPUCL2_EN pin */
+#define S2MPG10_PCTRLSEL_CPUCL2_EN2 0xa /* CPUCL2_E2 & PWREN pins */
+#define S2MPG10_PCTRLSEL_TPU_EN 0xb /* TPU_EN pin */
+#define S2MPG10_PCTRLSEL_TPU_EN2 0xc /* TPU_EN & ~AP_ACTIVE_N pins */
+#define S2MPG10_PCTRLSEL_TCXO_ON 0xd /* TCXO_ON pin */
+#define S2MPG10_PCTRLSEL_TCXO_ON2 0xe /* TCXO_ON & ~AP_ACTIVE_N pins */
+
+/* For S2MPG10_PMIC_PCTRLSELx of LDO20M */
+#define S2MPG10_PCTRLSEL_LDO20M_EN2 0x1 /* VLDO20M_EN & LDO20M_SFR */
+#define S2MPG10_PCTRLSEL_LDO20M_EN 0x2 /* VLDO20M_EN pin */
+#define S2MPG10_PCTRLSEL_LDO20M_SFR 0x3 /* LDO20M_SFR bit in LDO_CTRL1 register */
+
/* Meter registers (type 0xa00) */
enum s2mpg10_meter_reg {
S2MPG10_METER_CTRL1,
@@ -407,6 +431,16 @@ enum s2mpg10_meter_reg {
/* S2MPG10 regulator IDs */
enum s2mpg10_regulators {
+ S2MPG10_BUCK1,
+ S2MPG10_BUCK2,
+ S2MPG10_BUCK3,
+ S2MPG10_BUCK4,
+ S2MPG10_BUCK5,
+ S2MPG10_BUCK6,
+ S2MPG10_BUCK7,
+ S2MPG10_BUCK8,
+ S2MPG10_BUCK9,
+ S2MPG10_BUCK10,
S2MPG10_LDO1,
S2MPG10_LDO2,
S2MPG10_LDO3,
@@ -438,16 +472,6 @@ enum s2mpg10_regulators {
S2MPG10_LDO29,
S2MPG10_LDO30,
S2MPG10_LDO31,
- S2MPG10_BUCK1,
- S2MPG10_BUCK2,
- S2MPG10_BUCK3,
- S2MPG10_BUCK4,
- S2MPG10_BUCK5,
- S2MPG10_BUCK6,
- S2MPG10_BUCK7,
- S2MPG10_BUCK8,
- S2MPG10_BUCK9,
- S2MPG10_BUCK10,
S2MPG10_REGULATOR_MAX,
};
diff --git a/include/linux/mfd/samsung/s2mpg11.h b/include/linux/mfd/samsung/s2mpg11.h
new file mode 100644
index 000000000000..66daa3bafa6e
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpg11.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015 Samsung Electronics
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_S2MPG11_H
+#define __LINUX_MFD_S2MPG11_H
+
+/* Common registers (type 0x000) */
+enum s2mpg11_common_reg {
+ S2MPG11_COMMON_CHIPID,
+ S2MPG11_COMMON_INT,
+ S2MPG11_COMMON_INT_MASK,
+ S2MPG11_COMMON_SPD_CTRL1 = 0x0a,
+ S2MPG11_COMMON_SPD_CTRL2,
+ S2MPG11_COMMON_SPD_CTRL3,
+ S2MPG11_COMMON_MON1SEL = 0x1a,
+ S2MPG11_COMMON_MON2SEL,
+ S2MPG11_COMMON_MONR,
+ S2MPG11_COMMON_DEBUG_CTRL1,
+ S2MPG11_COMMON_DEBUG_CTRL2,
+ S2MPG11_COMMON_DEBUG_CTRL3,
+ S2MPG11_COMMON_DEBUG_CTRL4,
+ S2MPG11_COMMON_DEBUG_CTRL5,
+ S2MPG11_COMMON_DEBUG_CTRL6,
+ S2MPG11_COMMON_TEST_MODE1,
+ S2MPG11_COMMON_SPD_DEBUG1,
+ S2MPG11_COMMON_SPD_DEBUG2,
+ S2MPG11_COMMON_SPD_DEBUG3,
+ S2MPG11_COMMON_SPD_DEBUG4,
+};
+
+/* For S2MPG11_COMMON_INT and S2MPG11_COMMON_INT_MASK */
+#define S2MPG11_COMMON_INT_SRC GENMASK(2, 0)
+#define S2MPG11_COMMON_INT_SRC_PMIC BIT(0)
+
+/* PMIC registers (type 0x100) */
+enum s2mpg11_pmic_reg {
+ S2MPG11_PMIC_INT1,
+ S2MPG11_PMIC_INT2,
+ S2MPG11_PMIC_INT3,
+ S2MPG11_PMIC_INT4,
+ S2MPG11_PMIC_INT5,
+ S2MPG11_PMIC_INT6,
+ S2MPG11_PMIC_INT1M,
+ S2MPG11_PMIC_INT2M,
+ S2MPG11_PMIC_INT3M,
+ S2MPG11_PMIC_INT4M,
+ S2MPG11_PMIC_INT5M,
+ S2MPG11_PMIC_INT6M,
+ S2MPG11_PMIC_STATUS1,
+ S2MPG11_PMIC_OFFSRC,
+ S2MPG11_PMIC_COMMON_CTRL1,
+ S2MPG11_PMIC_COMMON_CTRL2,
+ S2MPG11_PMIC_COMMON_CTRL3,
+ S2MPG11_PMIC_MIMICKING_CTRL,
+ S2MPG11_PMIC_B1S_CTRL,
+ S2MPG11_PMIC_B1S_OUT1,
+ S2MPG11_PMIC_B1S_OUT2,
+ S2MPG11_PMIC_B2S_CTRL,
+ S2MPG11_PMIC_B2S_OUT1,
+ S2MPG11_PMIC_B2S_OUT2,
+ S2MPG11_PMIC_B3S_CTRL,
+ S2MPG11_PMIC_B3S_OUT1,
+ S2MPG11_PMIC_B3S_OUT2,
+ S2MPG11_PMIC_B4S_CTRL,
+ S2MPG11_PMIC_B4S_OUT,
+ S2MPG11_PMIC_B5S_CTRL,
+ S2MPG11_PMIC_B5S_OUT,
+ S2MPG11_PMIC_B6S_CTRL,
+ S2MPG11_PMIC_B6S_OUT1,
+ S2MPG11_PMIC_B6S_OUT2,
+ S2MPG11_PMIC_B7S_CTRL,
+ S2MPG11_PMIC_B7S_OUT1,
+ S2MPG11_PMIC_B7S_OUT2,
+ S2MPG11_PMIC_B8S_CTRL,
+ S2MPG11_PMIC_B8S_OUT1,
+ S2MPG11_PMIC_B8S_OUT2,
+ S2MPG11_PMIC_B9S_CTRL,
+ S2MPG11_PMIC_B9S_OUT1,
+ S2MPG11_PMIC_B9S_OUT2,
+ S2MPG11_PMIC_B10S_CTRL,
+ S2MPG11_PMIC_B10S_OUT,
+ S2MPG11_PMIC_BUCKD_CTRL,
+ S2MPG11_PMIC_BUCKD_OUT,
+ S2MPG11_PMIC_BUCKA_CTRL,
+ S2MPG11_PMIC_BUCKA_OUT,
+ S2MPG11_PMIC_BB_CTRL,
+ S2MPG11_PMIC_BB_OUT1,
+ S2MPG11_PMIC_BB_OUT2,
+ S2MPG11_PMIC_BUCK1S_USONIC,
+ S2MPG11_PMIC_BUCK2S_USONIC,
+ S2MPG11_PMIC_BUCK3S_USONIC,
+ S2MPG11_PMIC_BUCK4S_USONIC,
+ S2MPG11_PMIC_BUCK5S_USONIC,
+ S2MPG11_PMIC_BUCK6S_USONIC,
+ S2MPG11_PMIC_BUCK7S_USONIC,
+ S2MPG11_PMIC_BUCK8S_USONIC,
+ S2MPG11_PMIC_BUCK9S_USONIC,
+ S2MPG11_PMIC_BUCK10S_USONIC,
+ S2MPG11_PMIC_BUCKD_USONIC,
+ S2MPG11_PMIC_BUCKA_USONIC,
+ S2MPG11_PMIC_BB_USONIC,
+ S2MPG11_PMIC_L1S_CTRL1,
+ S2MPG11_PMIC_L1S_CTRL2,
+ S2MPG11_PMIC_L2S_CTRL1,
+ S2MPG11_PMIC_L2S_CTRL2,
+ S2MPG11_PMIC_L3S_CTRL,
+ S2MPG11_PMIC_L4S_CTRL,
+ S2MPG11_PMIC_L5S_CTRL,
+ S2MPG11_PMIC_L6S_CTRL,
+ S2MPG11_PMIC_L7S_CTRL,
+ S2MPG11_PMIC_L8S_CTRL,
+ S2MPG11_PMIC_L9S_CTRL,
+ S2MPG11_PMIC_L10S_CTRL,
+ S2MPG11_PMIC_L11S_CTRL,
+ S2MPG11_PMIC_L12S_CTRL,
+ S2MPG11_PMIC_L13S_CTRL,
+ S2MPG11_PMIC_L14S_CTRL,
+ S2MPG11_PMIC_L15S_CTRL,
+ S2MPG11_PMIC_LDO_CTRL1,
+ S2MPG11_PMIC_LDO_DSCH1,
+ S2MPG11_PMIC_LDO_DSCH2,
+ S2MPG11_PMIC_DVS_RAMP1,
+ S2MPG11_PMIC_DVS_RAMP2,
+ S2MPG11_PMIC_DVS_RAMP3,
+ S2MPG11_PMIC_DVS_RAMP4,
+ S2MPG11_PMIC_DVS_RAMP5,
+ S2MPG11_PMIC_DVS_RAMP6,
+ /* Nothing @ 0x5a */
+ S2MPG11_PMIC_DVS_SYNC_CTRL1 = 0x5c,
+ S2MPG11_PMIC_DVS_SYNC_CTRL2,
+ S2MPG11_PMIC_OFF_CTRL1,
+ S2MPG11_PMIC_OFF_CTRL2,
+ S2MPG11_PMIC_OFF_CTRL3,
+ S2MPG11_PMIC_SEQ_CTRL1,
+ S2MPG11_PMIC_SEQ_CTRL2,
+ S2MPG11_PMIC_SEQ_CTRL3,
+ S2MPG11_PMIC_SEQ_CTRL4,
+ S2MPG11_PMIC_SEQ_CTRL5,
+ S2MPG11_PMIC_SEQ_CTRL6,
+ S2MPG11_PMIC_SEQ_CTRL7,
+ S2MPG11_PMIC_SEQ_CTRL8,
+ S2MPG11_PMIC_SEQ_CTRL9,
+ S2MPG11_PMIC_SEQ_CTRL10,
+ S2MPG11_PMIC_SEQ_CTRL11,
+ S2MPG11_PMIC_SEQ_CTRL12,
+ S2MPG11_PMIC_SEQ_CTRL13,
+ S2MPG11_PMIC_SEQ_CTRL14,
+ S2MPG11_PMIC_SEQ_CTRL15,
+ S2MPG11_PMIC_SEQ_CTRL16,
+ S2MPG11_PMIC_SEQ_CTRL17,
+ S2MPG11_PMIC_SEQ_CTRL18,
+ S2MPG11_PMIC_SEQ_CTRL19,
+ S2MPG11_PMIC_SEQ_CTRL20,
+ S2MPG11_PMIC_SEQ_CTRL21,
+ S2MPG11_PMIC_SEQ_CTRL22,
+ S2MPG11_PMIC_SEQ_CTRL23,
+ S2MPG11_PMIC_SEQ_CTRL24,
+ S2MPG11_PMIC_SEQ_CTRL25,
+ S2MPG11_PMIC_SEQ_CTRL26,
+ S2MPG11_PMIC_SEQ_CTRL27,
+ S2MPG11_PMIC_OFF_SEQ_CTRL1,
+ S2MPG11_PMIC_OFF_SEQ_CTRL2,
+ S2MPG11_PMIC_OFF_SEQ_CTRL3,
+ S2MPG11_PMIC_OFF_SEQ_CTRL4,
+ S2MPG11_PMIC_OFF_SEQ_CTRL5,
+ S2MPG11_PMIC_OFF_SEQ_CTRL6,
+ S2MPG11_PMIC_OFF_SEQ_CTRL7,
+ S2MPG11_PMIC_OFF_SEQ_CTRL8,
+ S2MPG11_PMIC_OFF_SEQ_CTRL9,
+ S2MPG11_PMIC_OFF_SEQ_CTRL10,
+ S2MPG11_PMIC_OFF_SEQ_CTRL11,
+ S2MPG11_PMIC_OFF_SEQ_CTRL12,
+ S2MPG11_PMIC_OFF_SEQ_CTRL13,
+ S2MPG11_PMIC_OFF_SEQ_CTRL14,
+ S2MPG11_PMIC_OFF_SEQ_CTRL15,
+ S2MPG11_PMIC_OFF_SEQ_CTRL16,
+ S2MPG11_PMIC_OFF_SEQ_CTRL17,
+ S2MPG11_PMIC_PCTRLSEL1,
+ S2MPG11_PMIC_PCTRLSEL2,
+ S2MPG11_PMIC_PCTRLSEL3,
+ S2MPG11_PMIC_PCTRLSEL4,
+ S2MPG11_PMIC_PCTRLSEL5,
+ S2MPG11_PMIC_PCTRLSEL6,
+ S2MPG11_PMIC_DCTRLSEL1,
+ S2MPG11_PMIC_DCTRLSEL2,
+ S2MPG11_PMIC_DCTRLSEL3,
+ S2MPG11_PMIC_DCTRLSEL4,
+ S2MPG11_PMIC_DCTRLSEL5,
+ S2MPG11_PMIC_GPIO_CTRL1,
+ S2MPG11_PMIC_GPIO_CTRL2,
+ S2MPG11_PMIC_GPIO_CTRL3,
+ S2MPG11_PMIC_GPIO_CTRL4,
+ S2MPG11_PMIC_GPIO_CTRL5,
+ S2MPG11_PMIC_GPIO_CTRL6,
+ S2MPG11_PMIC_GPIO_CTRL7,
+ S2MPG11_PMIC_B2S_OCP_WARN,
+ S2MPG11_PMIC_B2S_OCP_WARN_X,
+ S2MPG11_PMIC_B2S_OCP_WARN_Y,
+ S2MPG11_PMIC_B2S_OCP_WARN_Z,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN_X,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN_Y,
+ S2MPG11_PMIC_B2S_SOFT_OCP_WARN_Z,
+ S2MPG11_PMIC_BUCK_OCP_EN1,
+ S2MPG11_PMIC_BUCK_OCP_EN2,
+ S2MPG11_PMIC_BUCK_OCP_PD_EN1,
+ S2MPG11_PMIC_BUCK_OCP_PD_EN2,
+ S2MPG11_PMIC_BUCK_OCP_CTRL1,
+ S2MPG11_PMIC_BUCK_OCP_CTRL2,
+ S2MPG11_PMIC_BUCK_OCP_CTRL3,
+ S2MPG11_PMIC_BUCK_OCP_CTRL4,
+ S2MPG11_PMIC_BUCK_OCP_CTRL5,
+ S2MPG11_PMIC_BUCK_OCP_CTRL6,
+ S2MPG11_PMIC_BUCK_OCP_CTRL7,
+ S2MPG11_PMIC_PIF_CTRL,
+ S2MPG11_PMIC_BUCK_HR_MODE1,
+ S2MPG11_PMIC_BUCK_HR_MODE2,
+ S2MPG11_PMIC_FAULTOUT_CTRL,
+ S2MPG11_PMIC_LDO_SENSE1,
+ S2MPG11_PMIC_LDO_SENSE2,
+};
+
+/* For S2MPG11_PMIC_PCTRLSELx */
+#define S2MPG11_PCTRLSEL_PWREN 0x1 /* PWREN pin */
+#define S2MPG11_PCTRLSEL_PWREN_TRG 0x2 /* PWREN_TRG bit in MIMICKING_CTRL */
+#define S2MPG11_PCTRLSEL_PWREN_MIF 0x3 /* PWREN_MIF pin */
+#define S2MPG11_PCTRLSEL_PWREN_MIF_TRG 0x4 /* PWREN_MIF_TRG bit in MIMICKING_CTRL */
+#define S2MPG11_PCTRLSEL_AP_ACTIVE_N 0x5 /* ~AP_ACTIVE_N pin */
+#define S2MPG11_PCTRLSEL_AP_ACTIVE_N_TRG 0x6 /* ~AP_ACTIVE_N_TRG bit in MIMICKING_CTRL */
+#define S2MPG11_PCTRLSEL_G3D_EN 0x7 /* G3D_EN pin */
+#define S2MPG11_PCTRLSEL_G3D_EN2 0x8 /* G3D_EN & ~AP_ACTIVE_N pins */
+#define S2MPG11_PCTRLSEL_AOC_VDD 0x9 /* AOC_VDD pin */
+#define S2MPG11_PCTRLSEL_AOC_RET 0xa /* AOC_RET pin */
+#define S2MPG11_PCTRLSEL_UFS_EN 0xb /* UFS_EN pin */
+#define S2MPG11_PCTRLSEL_LDO13S_EN 0xc /* VLDO13S_EN pin */
+
+/* Meter registers (type 0xa00) */
+enum s2mpg11_meter_reg {
+ S2MPG11_METER_CTRL1,
+ S2MPG11_METER_CTRL2,
+ S2MPG11_METER_CTRL3,
+ S2MPG11_METER_CTRL4,
+ S2MPG11_METER_CTRL5,
+ S2MPG11_METER_BUCKEN1,
+ S2MPG11_METER_BUCKEN2,
+ S2MPG11_METER_MUXSEL0,
+ S2MPG11_METER_MUXSEL1,
+ S2MPG11_METER_MUXSEL2,
+ S2MPG11_METER_MUXSEL3,
+ S2MPG11_METER_MUXSEL4,
+ S2MPG11_METER_MUXSEL5,
+ S2MPG11_METER_MUXSEL6,
+ S2MPG11_METER_MUXSEL7,
+ S2MPG11_METER_LPF_C0_0,
+ S2MPG11_METER_LPF_C0_1,
+ S2MPG11_METER_LPF_C0_2,
+ S2MPG11_METER_LPF_C0_3,
+ S2MPG11_METER_LPF_C0_4,
+ S2MPG11_METER_LPF_C0_5,
+ S2MPG11_METER_LPF_C0_6,
+ S2MPG11_METER_LPF_C0_7,
+ S2MPG11_METER_NTC_LPF_C0_0,
+ S2MPG11_METER_NTC_LPF_C0_1,
+ S2MPG11_METER_NTC_LPF_C0_2,
+ S2MPG11_METER_NTC_LPF_C0_3,
+ S2MPG11_METER_NTC_LPF_C0_4,
+ S2MPG11_METER_NTC_LPF_C0_5,
+ S2MPG11_METER_NTC_LPF_C0_6,
+ S2MPG11_METER_NTC_LPF_C0_7,
+ S2MPG11_METER_PWR_WARN0,
+ S2MPG11_METER_PWR_WARN1,
+ S2MPG11_METER_PWR_WARN2,
+ S2MPG11_METER_PWR_WARN3,
+ S2MPG11_METER_PWR_WARN4,
+ S2MPG11_METER_PWR_WARN5,
+ S2MPG11_METER_PWR_WARN6,
+ S2MPG11_METER_PWR_WARN7,
+ S2MPG11_METER_NTC_L_WARN0,
+ S2MPG11_METER_NTC_L_WARN1,
+ S2MPG11_METER_NTC_L_WARN2,
+ S2MPG11_METER_NTC_L_WARN3,
+ S2MPG11_METER_NTC_L_WARN4,
+ S2MPG11_METER_NTC_L_WARN5,
+ S2MPG11_METER_NTC_L_WARN6,
+ S2MPG11_METER_NTC_L_WARN7,
+ S2MPG11_METER_NTC_H_WARN0,
+ S2MPG11_METER_NTC_H_WARN1,
+ S2MPG11_METER_NTC_H_WARN2,
+ S2MPG11_METER_NTC_H_WARN3,
+ S2MPG11_METER_NTC_H_WARN4,
+ S2MPG11_METER_NTC_H_WARN5,
+ S2MPG11_METER_NTC_H_WARN6,
+ S2MPG11_METER_NTC_H_WARN7,
+ S2MPG11_METER_PWR_HYS1,
+ S2MPG11_METER_PWR_HYS2,
+ S2MPG11_METER_PWR_HYS3,
+ S2MPG11_METER_PWR_HYS4,
+ S2MPG11_METER_NTC_HYS1,
+ S2MPG11_METER_NTC_HYS2,
+ S2MPG11_METER_NTC_HYS3,
+ S2MPG11_METER_NTC_HYS4,
+ /* Nothing @ 0x3f */
+ S2MPG11_METER_ACC_DATA_CH0_1 = 0x40,
+ S2MPG11_METER_ACC_DATA_CH0_2,
+ S2MPG11_METER_ACC_DATA_CH0_3,
+ S2MPG11_METER_ACC_DATA_CH0_4,
+ S2MPG11_METER_ACC_DATA_CH0_5,
+ S2MPG11_METER_ACC_DATA_CH0_6,
+ S2MPG11_METER_ACC_DATA_CH1_1,
+ S2MPG11_METER_ACC_DATA_CH1_2,
+ S2MPG11_METER_ACC_DATA_CH1_3,
+ S2MPG11_METER_ACC_DATA_CH1_4,
+ S2MPG11_METER_ACC_DATA_CH1_5,
+ S2MPG11_METER_ACC_DATA_CH1_6,
+ S2MPG11_METER_ACC_DATA_CH2_1,
+ S2MPG11_METER_ACC_DATA_CH2_2,
+ S2MPG11_METER_ACC_DATA_CH2_3,
+ S2MPG11_METER_ACC_DATA_CH2_4,
+ S2MPG11_METER_ACC_DATA_CH2_5,
+ S2MPG11_METER_ACC_DATA_CH2_6,
+ S2MPG11_METER_ACC_DATA_CH3_1,
+ S2MPG11_METER_ACC_DATA_CH3_2,
+ S2MPG11_METER_ACC_DATA_CH3_3,
+ S2MPG11_METER_ACC_DATA_CH3_4,
+ S2MPG11_METER_ACC_DATA_CH3_5,
+ S2MPG11_METER_ACC_DATA_CH3_6,
+ S2MPG11_METER_ACC_DATA_CH4_1,
+ S2MPG11_METER_ACC_DATA_CH4_2,
+ S2MPG11_METER_ACC_DATA_CH4_3,
+ S2MPG11_METER_ACC_DATA_CH4_4,
+ S2MPG11_METER_ACC_DATA_CH4_5,
+ S2MPG11_METER_ACC_DATA_CH4_6,
+ S2MPG11_METER_ACC_DATA_CH5_1,
+ S2MPG11_METER_ACC_DATA_CH5_2,
+ S2MPG11_METER_ACC_DATA_CH5_3,
+ S2MPG11_METER_ACC_DATA_CH5_4,
+ S2MPG11_METER_ACC_DATA_CH5_5,
+ S2MPG11_METER_ACC_DATA_CH5_6,
+ S2MPG11_METER_ACC_DATA_CH6_1,
+ S2MPG11_METER_ACC_DATA_CH6_2,
+ S2MPG11_METER_ACC_DATA_CH6_3,
+ S2MPG11_METER_ACC_DATA_CH6_4,
+ S2MPG11_METER_ACC_DATA_CH6_5,
+ S2MPG11_METER_ACC_DATA_CH6_6,
+ S2MPG11_METER_ACC_DATA_CH7_1,
+ S2MPG11_METER_ACC_DATA_CH7_2,
+ S2MPG11_METER_ACC_DATA_CH7_3,
+ S2MPG11_METER_ACC_DATA_CH7_4,
+ S2MPG11_METER_ACC_DATA_CH7_5,
+ S2MPG11_METER_ACC_DATA_CH7_6,
+ S2MPG11_METER_ACC_COUNT_1,
+ S2MPG11_METER_ACC_COUNT_2,
+ S2MPG11_METER_ACC_COUNT_3,
+ S2MPG11_METER_LPF_DATA_CH0_1,
+ S2MPG11_METER_LPF_DATA_CH0_2,
+ S2MPG11_METER_LPF_DATA_CH0_3,
+ S2MPG11_METER_LPF_DATA_CH1_1,
+ S2MPG11_METER_LPF_DATA_CH1_2,
+ S2MPG11_METER_LPF_DATA_CH1_3,
+ S2MPG11_METER_LPF_DATA_CH2_1,
+ S2MPG11_METER_LPF_DATA_CH2_2,
+ S2MPG11_METER_LPF_DATA_CH2_3,
+ S2MPG11_METER_LPF_DATA_CH3_1,
+ S2MPG11_METER_LPF_DATA_CH3_2,
+ S2MPG11_METER_LPF_DATA_CH3_3,
+ S2MPG11_METER_LPF_DATA_CH4_1,
+ S2MPG11_METER_LPF_DATA_CH4_2,
+ S2MPG11_METER_LPF_DATA_CH4_3,
+ S2MPG11_METER_LPF_DATA_CH5_1,
+ S2MPG11_METER_LPF_DATA_CH5_2,
+ S2MPG11_METER_LPF_DATA_CH5_3,
+ S2MPG11_METER_LPF_DATA_CH6_1,
+ S2MPG11_METER_LPF_DATA_CH6_2,
+ S2MPG11_METER_LPF_DATA_CH6_3,
+ S2MPG11_METER_LPF_DATA_CH7_1,
+ S2MPG11_METER_LPF_DATA_CH7_2,
+ S2MPG11_METER_LPF_DATA_CH7_3,
+ /* Nothing @ 0x8b 0x8c */
+ S2MPG11_METER_LPF_DATA_NTC0_1 = 0x8d,
+ S2MPG11_METER_LPF_DATA_NTC0_2,
+ S2MPG11_METER_LPF_DATA_NTC1_1,
+ S2MPG11_METER_LPF_DATA_NTC1_2,
+ S2MPG11_METER_LPF_DATA_NTC2_1,
+ S2MPG11_METER_LPF_DATA_NTC2_2,
+ S2MPG11_METER_LPF_DATA_NTC3_1,
+ S2MPG11_METER_LPF_DATA_NTC3_2,
+ S2MPG11_METER_LPF_DATA_NTC4_1,
+ S2MPG11_METER_LPF_DATA_NTC4_2,
+ S2MPG11_METER_LPF_DATA_NTC5_1,
+ S2MPG11_METER_LPF_DATA_NTC5_2,
+ S2MPG11_METER_LPF_DATA_NTC6_1,
+ S2MPG11_METER_LPF_DATA_NTC6_2,
+ S2MPG11_METER_LPF_DATA_NTC7_1,
+ S2MPG11_METER_LPF_DATA_NTC7_2,
+};
+
+/* S2MPG11 regulator IDs */
+enum s2mpg11_regulators {
+ S2MPG11_BUCKBOOST,
+ S2MPG11_BUCK1,
+ S2MPG11_BUCK2,
+ S2MPG11_BUCK3,
+ S2MPG11_BUCK4,
+ S2MPG11_BUCK5,
+ S2MPG11_BUCK6,
+ S2MPG11_BUCK7,
+ S2MPG11_BUCK8,
+ S2MPG11_BUCK9,
+ S2MPG11_BUCK10,
+ S2MPG11_BUCKD,
+ S2MPG11_BUCKA,
+ S2MPG11_LDO1,
+ S2MPG11_LDO2,
+ S2MPG11_LDO3,
+ S2MPG11_LDO4,
+ S2MPG11_LDO5,
+ S2MPG11_LDO6,
+ S2MPG11_LDO7,
+ S2MPG11_LDO8,
+ S2MPG11_LDO9,
+ S2MPG11_LDO10,
+ S2MPG11_LDO11,
+ S2MPG11_LDO12,
+ S2MPG11_LDO13,
+ S2MPG11_LDO14,
+ S2MPG11_LDO15,
+ S2MPG11_REGULATOR_MAX,
+};
+
+#endif /* __LINUX_MFD_S2MPG11_H */
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 5f70d3b5d1b1..097ef4dfcdac 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -667,7 +667,7 @@ static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq,
return -ENODEV;
return request_threaded_irq(irq + wm8350->irq_base, NULL,
- handler, flags, name, data);
+ handler, flags | IRQF_ONESHOT, name, data);
}
static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index d7f46a8fbfa1..b37fe39cef27 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -962,6 +962,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
}
+static inline u8 get_cqe_lro_num_seg(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->srqn) >> 24;
+}
+
#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1c54aa6f74fb..e2d067b1e67b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1149,6 +1149,7 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+int mlx5_lag_query_bond_speed(struct mlx5_core_dev *dev, u32 *speed);
bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index e9dcd4bf355d..775cb0c56865 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1071,7 +1071,9 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 esw_shared_ingress_acl[0x1];
u8 esw_uplink_ingress_acl[0x1];
u8 root_ft_on_other_esw[0x1];
- u8 reserved_at_a[0xf];
+ u8 reserved_at_a[0x1];
+ u8 esw_vport_state_max_tx_speed[0x1];
+ u8 reserved_at_c[0xd];
u8 esw_functions_changed[0x1];
u8 reserved_at_1a[0x1];
u8 ecpf_vport_exists[0x1];
@@ -5445,7 +5447,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 reserved_at_40[0x20];
- u8 reserved_at_60[0x18];
+ u8 max_tx_speed[0x10];
+ u8 reserved_at_70[0x8];
u8 admin_state[0x4];
u8 state[0x4];
};
@@ -7778,7 +7781,7 @@ struct mlx5_ifc_modify_vport_state_in_bits {
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_at_60[0x10];
+ u8 max_tx_speed[0x10];
u8 ingress_connect[0x1];
u8 egress_connect[0x1];
u8 ingress_connect_valid[0x1];
@@ -11006,7 +11009,9 @@ struct mlx5_ifc_qcam_access_reg_cap_mask {
};
struct mlx5_ifc_qcam_qos_feature_cap_mask {
- u8 qcam_qos_feature_cap_mask_127_to_1[0x7F];
+ u8 qcam_qos_feature_cap_mask_127_to_5[0x7B];
+ u8 qetcr_qshr_max_bw_val_msb[0x1];
+ u8 qcam_qos_feature_cap_mask_3_to_1[0x3];
u8 qpts_trust_both[0x1];
};
@@ -11962,8 +11967,7 @@ struct mlx5_ifc_ets_tcn_config_reg_bits {
u8 reserved_at_20[0xc];
u8 max_bw_units[0x4];
- u8 reserved_at_30[0x8];
- u8 max_bw_value[0x8];
+ u8 max_bw_value[0x10];
};
struct mlx5_ifc_ets_global_config_reg_bits {
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 1df9d9a57bbc..12d366b12e2e 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -112,7 +112,7 @@ enum mlx5e_ext_link_mode {
MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
- MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
+ MLX5E_1600GAUI_8_1600GBASE_CR8_KR8 = 23,
MLX5E_EXT_LINK_MODES_NUMBER,
};
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index f876bfc0669c..dfa2fe32217a 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -41,6 +41,8 @@
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
+#define MLX5_MAX_TX_SPEED_UNIT 100
+
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
@@ -58,6 +60,10 @@ enum {
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state);
+int mlx5_query_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 op_mod,
+ u16 vport, u8 other_vport, u32 *max_tx_speed);
+int mlx5_modify_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 other_vport, u16 max_tx_speed);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, bool other, u8 *addr);
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6f959d8ca4b4..dc1ad71a2a70 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -36,6 +36,7 @@
#include <linux/rcuwait.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/iommu-debug-pagealloc.h>
struct mempolicy;
struct anon_vma;
@@ -45,6 +46,7 @@ struct pt_regs;
struct folio_batch;
void arch_mm_preinit(void);
+void mm_core_init_early(void);
void mm_core_init(void);
void init_mm_internals(void);
@@ -359,7 +361,7 @@ enum {
DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
-#if defined(CONFIG_X86_USER_SHADOW_STACK)
+#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_RISCV_USER_CFI)
/*
* VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
* support core mm.
@@ -460,7 +462,8 @@ enum {
#define VM_PKEY_BIT4 VM_NONE
#endif /* CONFIG_ARCH_PKEY_BITS > 4 */
#endif /* CONFIG_ARCH_HAS_PKEYS */
-#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
+#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
+ defined(CONFIG_RISCV_USER_CFI)
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
#else
#define VM_SHADOW_STACK VM_NONE
@@ -608,7 +611,11 @@ enum {
/*
* Flags which should result in page tables being copied on fork. These are
* flags which indicate that the VMA maps page tables which cannot be
- * reconsistuted upon page fault, so necessitate page table copying upon
+ * reconsistuted upon page fault, so necessitate page table copying upon fork.
+ *
+ * Note that these flags should be compared with the DESTINATION VMA not the
+ * source, as VM_UFFD_WP may not be propagated to destination, while all other
+ * flags will be.
*
* VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
* reasonably reconstructed on page fault.
@@ -1003,10 +1010,7 @@ static inline void vma_flag_set_atomic(struct vm_area_struct *vma,
{
unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
- /* mmap read lock/VMA read lock must be held. */
- if (!rwsem_is_locked(&vma->vm_mm->mmap_lock))
- vma_assert_locked(vma);
-
+ vma_assert_stabilised(vma);
if (__vma_flag_atomic_valid(vma, bit))
set_bit((__force int)bit, bitmap);
}
@@ -2901,6 +2905,13 @@ static inline unsigned long get_mm_rss(struct mm_struct *mm)
get_mm_counter(mm, MM_SHMEMPAGES);
}
+static inline unsigned long get_mm_rss_sum(struct mm_struct *mm)
+{
+ return get_mm_counter_sum(mm, MM_FILEPAGES) +
+ get_mm_counter_sum(mm, MM_ANONPAGES) +
+ get_mm_counter_sum(mm, MM_SHMEMPAGES);
+}
+
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
return max(mm->hiwater_rss, get_mm_rss(mm));
@@ -2975,15 +2986,8 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl);
-static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl)
-{
- pte_t *ptep;
- __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
- return ptep;
-}
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl);
#ifdef __PAGETABLE_P4D_FOLDED
static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
@@ -3337,31 +3341,15 @@ static inline bool pagetable_pte_ctor(struct mm_struct *mm,
return true;
}
-pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
-static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
- pmd_t *pmdvalp)
-{
- pte_t *pte;
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
- __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
- return pte;
-}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
}
-pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp);
-static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp)
-{
- pte_t *pte;
-
- __cond_lock(RCU, __cond_lock(*ptlp,
- pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
- return pte;
-}
+pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
@@ -3536,7 +3524,7 @@ static inline unsigned long get_num_physpages(void)
}
/*
- * Using memblock node mappings, an architecture may initialise its
+ * FIXME: Using memblock node mappings, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in an
* architecture independent manner.
*
@@ -3551,7 +3539,7 @@ static inline unsigned long get_num_physpages(void)
* memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
-void free_area_init(unsigned long *max_zone_pfn);
+void arch_zone_limits_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
@@ -4133,12 +4121,16 @@ extern void __kernel_map_pages(struct page *page, int numpages, int enable);
#ifdef CONFIG_DEBUG_PAGEALLOC
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
+ iommu_debug_check_unmapped(page, numpages);
+
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 1);
}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
{
+ iommu_debug_check_unmapped(page, numpages);
+
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
@@ -4194,6 +4186,61 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
+#ifndef clear_pages
+/**
+ * clear_pages() - clear a page range for kernel-internal use.
+ * @addr: start address
+ * @npages: number of pages
+ *
+ * Use clear_user_pages() instead when clearing a page range to be
+ * mapped to user space.
+ *
+ * Does absolutely no exception handling.
+ *
+ * Note that even though the clearing operation is preemptible, clear_pages()
+ * does not (and on architectures where it reduces to a few long-running
+ * instructions, might not be able to) call cond_resched() to check if
+ * rescheduling is required.
+ *
+ * When running under preemptible models this is not a problem. Under
+ * cooperatively scheduled models, however, the caller is expected to
+ * limit @npages to no more than PROCESS_PAGES_NON_PREEMPT_BATCH.
+ */
+static inline void clear_pages(void *addr, unsigned int npages)
+{
+ do {
+ clear_page(addr);
+ addr += PAGE_SIZE;
+ } while (--npages);
+}
+#endif
+
+#ifndef PROCESS_PAGES_NON_PREEMPT_BATCH
+#ifdef clear_pages
+/*
+ * The architecture defines clear_pages(), and we assume that it is
+ * generally "fast". So choose a batch size large enough to allow the processor
+ * headroom for optimizing the operation and yet small enough that we see
+ * reasonable preemption latency for when this optimization is not possible
+ * (ex. slow microarchitectures, memory bandwidth saturation.)
+ *
+ * With a value of 32MB and assuming a memory bandwidth of ~10GBps, this should
+ * result in worst case preemption latency of around 3ms when clearing pages.
+ *
+ * (See comment above clear_pages() for why preemption latency is a concern
+ * here.)
+ */
+#define PROCESS_PAGES_NON_PREEMPT_BATCH (SZ_32M >> PAGE_SHIFT)
+#else /* !clear_pages */
+/*
+ * The architecture does not provide a clear_pages() implementation. Assume
+ * that clear_page() -- which clear_pages() will fallback to -- is relatively
+ * slow and choose a small value for PROCESS_PAGES_NON_PREEMPT_BATCH.
+ */
+#define PROCESS_PAGES_NON_PREEMPT_BATCH 1
+#endif
+#endif
+
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
extern int in_gate_area_no_mm(unsigned long addr);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 42af2292951d..8731606d8d36 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -752,8 +752,18 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
}
#endif
-#define VMA_LOCK_OFFSET 0x40000000
-#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
+/*
+ * While __vma_enter_locked() is working to ensure are no read-locks held on a
+ * VMA (either while acquiring a VMA write lock or marking a VMA detached) we
+ * set the VM_REFCNT_EXCLUDE_READERS_FLAG in vma->vm_refcnt to indiciate to
+ * vma_start_read() that the reference count should be left alone.
+ *
+ * See the comment describing vm_refcnt in vm_area_struct for details as to
+ * which values the VMA reference count can be.
+ */
+#define VM_REFCNT_EXCLUDE_READERS_BIT (30)
+#define VM_REFCNT_EXCLUDE_READERS_FLAG (1U << VM_REFCNT_EXCLUDE_READERS_BIT)
+#define VM_REFCNT_LIMIT (VM_REFCNT_EXCLUDE_READERS_FLAG - 1)
struct vma_numab_state {
/*
@@ -935,10 +945,10 @@ struct vm_area_struct {
/*
* Can only be written (using WRITE_ONCE()) while holding both:
* - mmap_lock (in write mode)
- * - vm_refcnt bit at VMA_LOCK_OFFSET is set
+ * - vm_refcnt bit at VM_REFCNT_EXCLUDE_READERS_FLAG is set
* Can be read reliably while holding one of:
* - mmap_lock (in read or write mode)
- * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
+ * - vm_refcnt bit at VM_REFCNT_EXCLUDE_READERS_BIT is set or vm_refcnt > 1
* Can be read unreliably (using READ_ONCE()) for pessimistic bailout
* while holding nothing (except RCU to keep the VMA struct allocated).
*
@@ -980,7 +990,44 @@ struct vm_area_struct {
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
#ifdef CONFIG_PER_VMA_LOCK
- /* Unstable RCU readers are allowed to read this. */
+ /*
+ * Used to keep track of firstly, whether the VMA is attached, secondly,
+ * if attached, how many read locks are taken, and thirdly, if the
+ * VM_REFCNT_EXCLUDE_READERS_FLAG is set, whether any read locks held
+ * are currently in the process of being excluded.
+ *
+ * This value can be equal to:
+ *
+ * 0 - Detached. IMPORTANT: when the refcnt is zero, readers cannot
+ * increment it.
+ *
+ * 1 - Attached and either unlocked or write-locked. Write locks are
+ * identified via __is_vma_write_locked() which checks for equality of
+ * vma->vm_lock_seq and mm->mm_lock_seq.
+ *
+ * >1, < VM_REFCNT_EXCLUDE_READERS_FLAG - Read-locked or (unlikely)
+ * write-locked with other threads having temporarily incremented the
+ * reference count prior to determining it is write-locked and
+ * decrementing it again.
+ *
+ * VM_REFCNT_EXCLUDE_READERS_FLAG - Detached, pending
+ * __vma_end_exclude_readers() completion which will decrement the
+ * reference count to zero. IMPORTANT - at this stage no further readers
+ * can increment the reference count. It can only be reduced.
+ *
+ * VM_REFCNT_EXCLUDE_READERS_FLAG + 1 - A thread is either write-locking
+ * an attached VMA and has yet to invoke __vma_end_exclude_readers(),
+ * OR a thread is detaching a VMA and is waiting on a single spurious
+ * reader in order to decrement the reference count. IMPORTANT - as
+ * above, no further readers can increment the reference count.
+ *
+ * > VM_REFCNT_EXCLUDE_READERS_FLAG + 1 - A thread is either
+ * write-locking or detaching a VMA is waiting on readers to
+ * exit. IMPORTANT - as above, no further readers can increment the
+ * reference count.
+ *
+ * NOTE: Unstable RCU readers are allowed to read this.
+ */
refcount_t vm_refcnt ____cacheline_aligned_in_smp;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map vmlock_dep_map;
@@ -1329,7 +1376,7 @@ struct mm_struct {
* The mm_cpumask needs to be at the end of mm_struct, because it
* is dynamically sized based on nr_cpu_ids.
*/
- unsigned long cpu_bitmap[];
+ char flexible_array[] __aligned(__alignof__(unsigned long));
};
/* Copy value to the first system word of mm flags, non-atomically. */
@@ -1366,19 +1413,24 @@ static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
+#define MM_STRUCT_FLEXIBLE_ARRAY_INIT \
+{ \
+ [0 ... sizeof(cpumask_t) + MM_CID_STATIC_SIZE - 1] = 0 \
+}
+
/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
unsigned long cpu_bitmap = (unsigned long)mm;
- cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpu_bitmap += offsetof(struct mm_struct, flexible_array);
cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return (struct cpumask *)&mm->cpu_bitmap;
+ return (struct cpumask *)&mm->flexible_array;
}
#ifdef CONFIG_LRU_GEN
@@ -1469,7 +1521,7 @@ static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
{
unsigned long bitmap = (unsigned long)mm;
- bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ bitmap += offsetof(struct mm_struct, flexible_array);
/* Skip cpu_bitmap */
bitmap += cpumask_size();
return (struct cpumask *)bitmap;
@@ -1495,7 +1547,7 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
mm_init_cid(mm, p);
return 0;
}
-#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
+# define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm)
{
@@ -1509,6 +1561,8 @@ static inline unsigned int mm_cid_size(void)
return cpumask_size() + bitmap_size(num_possible_cpus());
}
+/* Use 2 * NR_CPUS as worse case for static allocation. */
+# define MM_CID_STATIC_SIZE (2 * sizeof(cpumask_t))
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
@@ -1517,11 +1571,13 @@ static inline unsigned int mm_cid_size(void)
{
return 0;
}
+# define MM_CID_STATIC_SIZE 0
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma);
extern void tlb_finish_mmu(struct mmu_gather *tlb);
struct vm_fault;
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index a82aa80c0ba4..11bf319d78ec 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -88,4 +88,9 @@ struct tlbflush_unmap_batch {
#endif
};
+struct lazy_mmu_state {
+ u8 enable_count;
+ u8 pause_count;
+};
+
#endif /* _LINUX_MM_TYPES_TASK_H */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index d53f72dba7fe..93eca48bc443 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -78,6 +78,43 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
#ifdef CONFIG_PER_VMA_LOCK
+#ifdef CONFIG_LOCKDEP
+#define __vma_lockdep_map(vma) (&vma->vmlock_dep_map)
+#else
+#define __vma_lockdep_map(vma) NULL
+#endif
+
+/*
+ * VMA locks do not behave like most ordinary locks found in the kernel, so we
+ * cannot quite have full lockdep tracking in the way we would ideally prefer.
+ *
+ * Read locks act as shared locks which exclude an exclusive lock being
+ * taken. We therefore mark these accordingly on read lock acquire/release.
+ *
+ * Write locks are acquired exclusively per-VMA, but released in a shared
+ * fashion, that is upon vma_end_write_all(), we update the mmap's seqcount such
+ * that write lock is released.
+ *
+ * We therefore cannot track write locks per-VMA, nor do we try. Mitigating this
+ * is the fact that, of course, we do lockdep-track the mmap lock rwsem which
+ * must be held when taking a VMA write lock.
+ *
+ * We do, however, want to indicate that during either acquisition of a VMA
+ * write lock or detachment of a VMA that we require the lock held be exclusive,
+ * so we utilise lockdep to do so.
+ */
+#define __vma_lockdep_acquire_read(vma) \
+ lock_acquire_shared(__vma_lockdep_map(vma), 0, 1, NULL, _RET_IP_)
+#define __vma_lockdep_release_read(vma) \
+ lock_release(__vma_lockdep_map(vma), _RET_IP_)
+#define __vma_lockdep_acquire_exclusive(vma) \
+ lock_acquire_exclusive(__vma_lockdep_map(vma), 0, 0, NULL, _RET_IP_)
+#define __vma_lockdep_release_exclusive(vma) \
+ lock_release(__vma_lockdep_map(vma), _RET_IP_)
+/* Only meaningful if CONFIG_LOCK_STAT is defined. */
+#define __vma_lockdep_stat_mark_acquired(vma) \
+ lock_acquired(__vma_lockdep_map(vma), _RET_IP_)
+
static inline void mm_lock_seqcount_init(struct mm_struct *mm)
{
seqcount_init(&mm->mm_lock_seq);
@@ -115,36 +152,81 @@ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_key;
- lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
+ lockdep_init_map(__vma_lockdep_map(vma), "vm_lock", &lockdep_key, 0);
#endif
if (reset_refcnt)
refcount_set(&vma->vm_refcnt, 0);
vma->vm_lock_seq = UINT_MAX;
}
-static inline bool is_vma_writer_only(int refcnt)
+/*
+ * This function determines whether the input VMA reference count describes a
+ * VMA which has excluded all VMA read locks.
+ *
+ * In the case of a detached VMA, we may incorrectly indicate that readers are
+ * excluded when one remains, because in that scenario we target a refcount of
+ * VM_REFCNT_EXCLUDE_READERS_FLAG, rather than the attached target of
+ * VM_REFCNT_EXCLUDE_READERS_FLAG + 1.
+ *
+ * However, the race window for that is very small so it is unlikely.
+ *
+ * Returns: true if readers are excluded, false otherwise.
+ */
+static inline bool __vma_are_readers_excluded(int refcnt)
{
/*
- * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
- * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
- * a detached vma happens only in vma_mark_detached() and is a rare
- * case, therefore most of the time there will be no unnecessary wakeup.
+ * See the comment describing the vm_area_struct->vm_refcnt field for
+ * details of possible refcnt values.
*/
- return (refcnt & VMA_LOCK_OFFSET) && refcnt <= VMA_LOCK_OFFSET + 1;
+ return (refcnt & VM_REFCNT_EXCLUDE_READERS_FLAG) &&
+ refcnt <= VM_REFCNT_EXCLUDE_READERS_FLAG + 1;
+}
+
+/*
+ * Actually decrement the VMA reference count.
+ *
+ * The function returns the reference count as it was immediately after the
+ * decrement took place. If it returns zero, the VMA is now detached.
+ */
+static inline __must_check unsigned int
+__vma_refcount_put_return(struct vm_area_struct *vma)
+{
+ int oldcnt;
+
+ if (__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt))
+ return 0;
+
+ return oldcnt - 1;
}
+/**
+ * vma_refcount_put() - Drop reference count in VMA vm_refcnt field due to a
+ * read-lock being dropped.
+ * @vma: The VMA whose reference count we wish to decrement.
+ *
+ * If we were the last reader, wake up threads waiting to obtain an exclusive
+ * lock.
+ */
static inline void vma_refcount_put(struct vm_area_struct *vma)
{
- /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
+ /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt. */
struct mm_struct *mm = vma->vm_mm;
- int oldcnt;
+ int newcnt;
- rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
- if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
+ __vma_lockdep_release_read(vma);
+ newcnt = __vma_refcount_put_return(vma);
- if (is_vma_writer_only(oldcnt - 1))
- rcuwait_wake_up(&mm->vma_writer_wait);
- }
+ /*
+ * __vma_start_exclude_readers() may be sleeping waiting for readers to
+ * drop their reference count, so wake it up if we were the last reader
+ * blocking it from being acquired.
+ *
+ * We may be raced by other readers temporarily incrementing the
+ * reference count, though the race window is very small, this might
+ * cause spurious wakeups.
+ */
+ if (newcnt && __vma_are_readers_excluded(newcnt))
+ rcuwait_wake_up(&mm->vma_writer_wait);
}
/*
@@ -159,10 +241,10 @@ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int
mmap_assert_locked(vma->vm_mm);
if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
- VMA_REF_LIMIT)))
+ VM_REFCNT_LIMIT)))
return false;
- rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+ __vma_lockdep_acquire_read(vma);
return true;
}
@@ -182,21 +264,31 @@ static inline void vma_end_read(struct vm_area_struct *vma)
vma_refcount_put(vma);
}
-/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
{
+ const struct mm_struct *mm = vma->vm_mm;
+
+ /* We must hold an exclusive write lock for this access to be valid. */
mmap_assert_write_locked(vma->vm_mm);
+ return mm->mm_lock_seq.sequence;
+}
+/*
+ * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
+ * write lock is held.
+ *
+ * Returns true if write-locked, otherwise false.
+ */
+static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
+{
/*
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
- return (vma->vm_lock_seq == *mm_lock_seq);
+ return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
}
-int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
- int state);
+int __vma_start_write(struct vm_area_struct *vma, int state);
/*
* Begin writing to a VMA.
@@ -205,12 +297,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
*/
static inline void vma_start_write(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
+ if (__is_vma_write_locked(vma))
return;
- __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
+ __vma_start_write(vma, TASK_UNINTERRUPTIBLE);
}
/**
@@ -229,26 +319,110 @@ static inline void vma_start_write(struct vm_area_struct *vma)
static inline __must_check
int vma_start_write_killable(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
+ if (__is_vma_write_locked(vma))
return 0;
- return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
+
+ return __vma_start_write(vma, TASK_KILLABLE);
}
+/**
+ * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
}
+/**
+ * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
+ * lock and is not detached.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
+ unsigned int refcnt;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ if (!lock_is_held(__vma_lockdep_map(vma)))
+ vma_assert_write_locked(vma);
+ return;
+ }
+
+ /*
+ * See the comment describing the vm_area_struct->vm_refcnt field for
+ * details of possible refcnt values.
+ */
+ refcnt = refcount_read(&vma->vm_refcnt);
+
+ /*
+ * In this case we're either read-locked, write-locked with temporary
+ * readers, or in the midst of excluding readers, all of which means
+ * we're locked.
+ */
+ if (refcnt > 1)
+ return;
+
+ /* It is a bug for the VMA to be detached here. */
+ VM_WARN_ON_ONCE_VMA(!refcnt, vma);
+
+ /*
+ * OK, the VMA has a reference count of 1 which means it is either
+ * unlocked and attached or write-locked, so assert that it is
+ * write-locked.
+ */
+ vma_assert_write_locked(vma);
+}
+
+/**
+ * vma_assert_stabilised() - assert that this VMA cannot be changed from
+ * underneath us either by having a VMA or mmap lock held.
+ * @vma: The VMA whose stability we wish to assess.
+ *
+ * If lockdep is enabled we can precisely ensure stability via either an mmap
+ * lock owned by us or a specific VMA lock.
+ *
+ * With lockdep disabled we may sometimes race with other threads acquiring the
+ * mmap read lock simultaneous with our VMA read lock.
+ */
+static inline void vma_assert_stabilised(struct vm_area_struct *vma)
+{
+ /*
+ * If another thread owns an mmap lock, it may go away at any time, and
+ * thus is no guarantee of stability.
+ *
+ * If lockdep is enabled we can accurately determine if an mmap lock is
+ * held and owned by us. Otherwise we must approximate.
+ *
+ * It doesn't necessarily mean we are not stabilised however, as we may
+ * hold a VMA read lock (not a write lock as this would require an owned
+ * mmap lock).
+ *
+ * If (assuming lockdep is not enabled) we were to assert a VMA read
+ * lock first we may also run into issues, as other threads can hold VMA
+ * read locks simlutaneous to us.
+ *
+ * Therefore if lockdep is not enabled we risk a false negative (i.e. no
+ * assert fired). If accurate checking is required, enable lockdep.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ if (lockdep_is_held(&vma->vm_mm->mmap_lock))
+ return;
+ } else {
+ if (rwsem_is_locked(&vma->vm_mm->mmap_lock))
+ return;
+ }
- VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
- !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ /*
+ * We're not stabilised by the mmap lock, so assert that we're
+ * stabilised by a VMA lock.
+ */
+ vma_assert_locked(vma);
+}
+
+static inline bool vma_is_attached(struct vm_area_struct *vma)
+{
+ return refcount_read(&vma->vm_refcnt);
}
/*
@@ -258,12 +432,12 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
*/
static inline void vma_assert_attached(struct vm_area_struct *vma)
{
- WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
+ WARN_ON_ONCE(!vma_is_attached(vma));
}
static inline void vma_assert_detached(struct vm_area_struct *vma)
{
- WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
+ WARN_ON_ONCE(vma_is_attached(vma));
}
static inline void vma_mark_attached(struct vm_area_struct *vma)
@@ -273,7 +447,28 @@ static inline void vma_mark_attached(struct vm_area_struct *vma)
refcount_set_release(&vma->vm_refcnt, 1);
}
-void vma_mark_detached(struct vm_area_struct *vma);
+void __vma_exclude_readers_for_detach(struct vm_area_struct *vma);
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_attached(vma);
+
+ /*
+ * The VMA still being attached (refcnt > 0) - is unlikely, because the
+ * vma has been already write-locked and readers can increment vm_refcnt
+ * only temporarily before they check vm_lock_seq, realize the vma is
+ * locked and drop back the vm_refcnt. That is a narrow window for
+ * observing a raised vm_refcnt.
+ *
+ * See the comment describing the vm_area_struct->vm_refcnt field for
+ * details of possible refcnt values.
+ */
+ if (likely(!__vma_refcount_put_return(vma)))
+ return;
+
+ __vma_exclude_readers_for_detach(vma);
+}
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
@@ -327,6 +522,12 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
mmap_assert_locked(vma->vm_mm);
}
+static inline void vma_assert_stabilised(struct vm_area_struct *vma)
+{
+ /* If no VMA locks, then either mmap lock suffices to stabilise. */
+ mmap_assert_locked(vma->vm_mm);
+}
+
#endif /* CONFIG_PER_VMA_LOCK */
static inline void mmap_write_lock(struct mm_struct *mm)
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index fed1f5f4a8d3..4534bf462aac 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -78,6 +78,7 @@ struct sdio_driver {
int (*probe)(struct sdio_func *, const struct sdio_device_id *);
void (*remove)(struct sdio_func *);
+ void (*shutdown)(struct sdio_func *);
struct device_driver drv;
};
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 14a45979cccc..ab60ffba08f5 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -47,6 +47,15 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
BUG(); \
} \
} while (0)
+#define VM_WARN_ON_PAGE(cond, page) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(page, "VM_WARN_ON_PAGE(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
@@ -122,6 +131,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index ac01dc4eb2ce..ed3dd0f3fe19 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -24,7 +24,7 @@ static inline void leave_mm(void) { }
#ifndef task_cpu_possible_mask
# define task_cpu_possible_mask(p) cpu_possible_mask
# define task_cpu_possible(cpu, p) true
-# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
+# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_DOMAIN)
#else
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75ef7c9f9307..3e51190a55e4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1534,14 +1534,27 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
-void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
- enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
+
+enum kswapd_clear_hopeless_reason {
+ KSWAPD_CLEAR_HOPELESS_OTHER = 0,
+ KSWAPD_CLEAR_HOPELESS_KSWAPD,
+ KSWAPD_CLEAR_HOPELESS_DIRECT,
+ KSWAPD_CLEAR_HOPELESS_PCP,
+};
+
+void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
+ enum zone_type highest_zoneidx);
+void kswapd_try_clear_hopeless(struct pglist_data *pgdat,
+ unsigned int order, int highest_zoneidx);
+void kswapd_clear_hopeless(pg_data_t *pgdat, enum kswapd_clear_hopeless_reason reason);
+bool kswapd_test_hopeless(pg_data_t *pgdat);
+
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
@@ -1648,14 +1661,15 @@ static inline int is_highmem(const struct zone *zone)
return is_highmem_idx(zone_idx(zone));
}
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void);
-#else
+bool has_managed_zone(enum zone_type zone);
static inline bool has_managed_dma(void)
{
+#ifdef CONFIG_ZONE_DMA
+ return has_managed_zone(ZONE_DMA);
+#else
return false;
-}
#endif
+}
#ifndef CONFIG_NUMA
@@ -2285,9 +2299,7 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#define pfn_to_nid(pfn) (0)
#endif
-void sparse_init(void);
#else
-#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 24eb5a88a5c5..5b1725fe9707 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -609,7 +609,6 @@ struct platform_device_id {
kernel_ulong_t driver_data;
};
-#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
diff --git a/include/linux/module.h b/include/linux/module.h
index d80c3ea57472..14f391b186c6 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -151,16 +151,10 @@ extern void cleanup_module(void);
#define __init_or_module
#define __initdata_or_module
#define __initconst_or_module
-#define __INIT_OR_MODULE .text
-#define __INITDATA_OR_MODULE .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
#else
#define __init_or_module __init
#define __initdata_or_module __initdata
#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
struct module_kobject *lookup_or_create_module_kobject(const char *name);
@@ -748,6 +742,15 @@ static inline void __module_get(struct module *module)
__mod ? __mod->name : "kernel"; \
})
+static inline const unsigned char *module_buildid(struct module *mod)
+{
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ return mod->build_id;
+#else
+ return NULL;
+#endif
+}
+
/* Dereference module function descriptor */
void *dereference_module_function_descriptor(struct module *mod, void *ptr);
@@ -770,8 +773,6 @@ static inline bool is_livepatch_module(struct module *mod)
#endif
}
-void set_module_sig_enforced(void);
-
void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data);
#else /* !CONFIG_MODULES... */
@@ -866,10 +867,6 @@ static inline bool module_requested_async_probing(struct module *module)
}
-static inline void set_module_sig_enforced(void)
-{
-}
-
/* Dereference module function descriptor */
static inline
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
@@ -925,6 +922,8 @@ static inline bool retpoline_module_ok(bool has_retpoline)
#ifdef CONFIG_MODULE_SIG
bool is_module_sig_enforced(void);
+void set_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
@@ -935,6 +934,10 @@ static inline bool is_module_sig_enforced(void)
return false;
}
+static inline void set_module_sig_enforced(void)
+{
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 915f32f7d888..7d22d4c4ea2e 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -2,9 +2,14 @@
#ifndef _LINUX_MODULE_PARAMS_H
#define _LINUX_MODULE_PARAMS_H
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/stringify.h>
-#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
/*
* The maximum module name length, including the NUL byte.
@@ -355,8 +360,8 @@ static inline void kernel_param_unlock(struct module *mod)
/**
* __core_param_cb - similar like core_param, with a set/get ops instead of type.
* @name: the name of the cmdline and sysfs parameter (often the same as var)
- * @var: the variable
* @ops: the set & get operations for this parameter.
+ * @arg: the variable
* @perm: visibility in sysfs
*
* Ideally this should be called 'core_param_cb', but the name has been
@@ -390,7 +395,7 @@ static inline void kernel_param_unlock(struct module *mod)
* @name1: parameter name 1
* @name2: parameter name 2
*
- * Returns true if the two parameter names are equal.
+ * Returns: true if the two parameter names are equal.
* Dashes (-) are considered equal to underscores (_).
*/
extern bool parameq(const char *name1, const char *name2);
@@ -402,6 +407,10 @@ extern bool parameq(const char *name1, const char *name2);
* @n: the length to compare
*
* Similar to parameq(), except it compares @n characters.
+ *
+ * Returns: true if the first @n characters of the two parameter names
+ * are equal.
+ * Dashes (-) are considered equal to underscores (_).
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8003e3218c46..fa41eed62868 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -49,12 +49,12 @@ typedef struct arch_msi_msg_data {
#endif
/**
- * msi_msg - Representation of a MSI message
+ * struct msi_msg - Representation of a MSI message
* @address_lo: Low 32 bits of msi message address
- * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @arch_addr_lo: Architecture specific shadow of @address_lo
* @address_hi: High 32 bits of msi message address
* (only used when device supports it)
- * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @arch_addr_hi: Architecture specific shadow of @address_hi
* @data: MSI message data (usually 16 bits)
* @arch_data: Architecture specific shadow of @data
*/
@@ -91,7 +91,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ * struct pci_msi_desc - PCI/MSI specific MSI descriptor data
*
* @msi_mask: [PCI MSI] MSI cached mask bits
* @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
@@ -101,6 +101,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
* @can_mask: [PCI MSI/X] Masking supported?
* @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
* @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @msi_attrib: [PCI MSI/X] Compound struct of MSI/X attributes
* @mask_pos: [PCI MSI] Mask register position
* @mask_base: [PCI MSI-X] Mask register base address
*/
@@ -169,7 +170,7 @@ struct msi_desc_data {
* Only used if iommu_msi_shift != 0
* @iommu_msi_shift: Indicates how many bits of the original address should be
* preserved when using iommu_msi_iova.
- * @sysfs_attr: Pointer to sysfs device attribute
+ * @sysfs_attrs: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
@@ -220,7 +221,7 @@ enum msi_desc_filter {
/**
* struct msi_dev_domain - The internals of MSI domain info per device
* @store: Xarray for storing MSI descriptor pointers
- * @irqdomain: Pointer to a per device interrupt domain
+ * @domain: Pointer to a per device interrupt domain
*/
struct msi_dev_domain {
struct xarray store;
@@ -702,7 +703,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
-u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
+u32 pci_msi_map_rid_ctlr_node(struct irq_domain *domain, struct pci_dev *pdev,
+ struct fwnode_handle **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index ce76f5c632e1..6a024cf1c53a 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -26,8 +26,14 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_WR_EN_DIS_1S_0_0_OP(enable) \
- SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
+#define SPINAND_WR_EN_1S_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x06, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_DIS_1S_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x04, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -233,10 +239,75 @@
SPI_MEM_OP_DATA_OUT(len, buf, 8))
/**
- * Standard SPI NAND flash commands
+ * Octal DDR SPI NAND flash operations
*/
-#define SPINAND_CMD_PROG_LOAD_X4 0x32
-#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
+
+#define SPINAND_RESET_8D_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0xff, 8), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_8D_8D_8D_OP(naddr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x9f, 8), \
+ SPI_MEM_DTR_OP_ADDR(naddr, 0, 8), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8))
+
+#define SPINAND_WR_EN_8D_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x06, 8), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_DIS_8D_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x04, 8), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_SET_FEATURE_8D_8D_8D_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x1f, 8), \
+ SPI_MEM_DTR_OP_RPT_ADDR(reg, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_DTR_OP_DATA_OUT(2, valptr, 8))
+
+#define SPINAND_GET_FEATURE_8D_8D_8D_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x0f, 8), \
+ SPI_MEM_DTR_OP_RPT_ADDR(reg, 8), \
+ SPI_MEM_DTR_OP_DUMMY(14, 8), \
+ SPI_MEM_DTR_OP_DATA_IN(2, valptr, 8))
+
+#define SPINAND_BLK_ERASE_8D_8D_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0xd8, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_8D_8D_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x13, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_8D_8D_8D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x9d, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PROG_EXEC_8D_8D_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x10, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD_8D_8D_8D_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD((reset ? 0xc2 : 0xc4), 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_DTR_OP_DATA_OUT(len, buf, 8))
/* feature register */
#define REG_BLOCK_LOCK 0xa0
@@ -261,7 +332,7 @@
struct spinand_op;
struct spinand_device;
-#define SPINAND_MAX_ID_LEN 5
+#define SPINAND_MAX_ID_LEN 6
/*
* For erase, write and read operation, we got the following timings :
* tBERS (erase) 1ms to 4ms
@@ -287,7 +358,7 @@ struct spinand_device;
/**
* struct spinand_id - SPI NAND id structure
- * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ * @data: buffer containing the id bytes. Currently 6 bytes large, but can
* be extended if required
* @len: ID length
*/
@@ -354,6 +425,7 @@ struct spinand_manufacturer {
/* SPI NAND manufacturers */
extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer dosilicon_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_8c_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
extern const struct spinand_manufacturer fmsh_spinand_manufacturer;
@@ -482,6 +554,16 @@ struct spinand_user_otp {
};
/**
+ * enum spinand_bus_interface - SPI NAND bus interface types
+ * @SSDR: Bus configuration supporting all 1S-XX-XX operations, including dual and quad
+ * @ODTR: Bus configuration supporting only 8D-8D-8D operations
+ */
+enum spinand_bus_interface {
+ SSDR,
+ ODTR,
+};
+
+/**
* struct spinand_info - Structure used to describe SPI NAND chips
* @model: model name
* @devid: device ID
@@ -493,6 +575,7 @@ struct spinand_user_otp {
* @op_variants.read_cache: variants of the read-cache operation
* @op_variants.write_cache: variants of the write-cache operation
* @op_variants.update_cache: variants of the update-cache operation
+ * @vendor_ops: vendor specific operations
* @select_target: function used to select a target/die. Required only for
* multi-die chips
* @configure_chip: Align the chip configuration with the core settings
@@ -517,9 +600,11 @@ struct spinand_info {
const struct spinand_op_variants *write_cache;
const struct spinand_op_variants *update_cache;
} op_variants;
+ const struct spinand_op_variants *vendor_ops;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
- int (*configure_chip)(struct spinand_device *spinand);
+ int (*configure_chip)(struct spinand_device *spinand,
+ enum spinand_bus_interface iface);
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
struct spinand_fact_otp fact_otp;
@@ -543,6 +628,9 @@ struct spinand_info {
.update_cache = __update, \
}
+#define SPINAND_INFO_VENDOR_OPS(__ops) \
+ .vendor_ops = __ops
+
#define SPINAND_ECCINFO(__ooblayout, __get_status) \
.eccinfo = { \
.ooblayout = __ooblayout, \
@@ -600,16 +688,46 @@ struct spinand_dirmap {
};
/**
+ * struct spinand_mem_ops - SPI NAND memory operations
+ * @reset: reset op template
+ * @readid: read ID op template
+ * @wr_en: write enable op template
+ * @wr_dis: write disable op template
+ * @set_feature: set feature op template
+ * @get_feature: get feature op template
+ * @blk_erase: blk erase op template
+ * @page_read: page read op template
+ * @prog_exec: prog exec op template
+ * @read_cache: read cache op template
+ * @write_cache: write cache op template
+ * @update_cache: update cache op template
+ */
+struct spinand_mem_ops {
+ struct spi_mem_op reset;
+ struct spi_mem_op readid;
+ struct spi_mem_op wr_en;
+ struct spi_mem_op wr_dis;
+ struct spi_mem_op set_feature;
+ struct spi_mem_op get_feature;
+ struct spi_mem_op blk_erase;
+ struct spi_mem_op page_read;
+ struct spi_mem_op prog_exec;
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+};
+
+/**
* struct spinand_device - SPI NAND device instance
* @base: NAND device instance
* @spimem: pointer to the SPI mem object
* @lock: lock used to serialize accesses to the NAND
* @id: NAND ID as returned by READ_ID
* @flags: NAND flags
- * @op_templates: various SPI mem op templates
- * @op_templates.read_cache: read cache op template
- * @op_templates.write_cache: write cache op template
- * @op_templates.update_cache: update cache op template
+ * @ssdr_op_templates: Templates for all single SDR SPI mem operations
+ * @odtr_op_templates: Templates for all octal DTR SPI mem operations
+ * @op_templates: Templates for all SPI mem operations
+ * @bus_iface: Current bus interface
* @select_target: select a specific target/die. Usually called before sending
* a command addressing a page or an eraseblock embedded in
* this die. Only required if your chip exposes several dies
@@ -643,11 +761,10 @@ struct spinand_device {
struct spinand_id id;
u32 flags;
- struct {
- const struct spi_mem_op *read_cache;
- const struct spi_mem_op *write_cache;
- const struct spi_mem_op *update_cache;
- } op_templates;
+ struct spinand_mem_ops ssdr_op_templates;
+ struct spinand_mem_ops odtr_op_templates;
+ struct spinand_mem_ops *op_templates;
+ enum spinand_bus_interface bus_iface;
struct spinand_dirmap *dirmaps;
@@ -664,7 +781,8 @@ struct spinand_device {
const struct spinand_manufacturer *manufacturer;
void *priv;
- int (*configure_chip)(struct spinand_device *spinand);
+ int (*configure_chip)(struct spinand_device *spinand,
+ enum spinand_bus_interface iface);
bool cont_read_possible;
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
@@ -677,6 +795,14 @@ struct spinand_device {
unsigned int retry_mode);
};
+struct spi_mem_op spinand_fill_wr_en_op(struct spinand_device *spinand);
+struct spi_mem_op spinand_fill_set_feature_op(struct spinand_device *spinand, u64 reg, const void *valptr);
+struct spi_mem_op spinand_fill_get_feature_op(struct spinand_device *spinand, u64 reg, void *valptr);
+struct spi_mem_op spinand_fill_prog_exec_op(struct spinand_device *spinand, u64 addr);
+
+#define SPINAND_OP(spinand, op_name, ...) \
+ spinand_fill_ ## op_name ## _op(spinand, ##__VA_ARGS__)
+
/**
* mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
* @mtd: MTD instance
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bf535f0118bb..ecaa0440f6ec 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -182,13 +182,13 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
- unsigned int subclass, struct lockdep_map *nest_lock);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -211,10 +211,10 @@ do { \
_mutex_lock_killable(lock, subclass, NULL)
#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock) __acquires(lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock);
+extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -232,7 +232,7 @@ extern void mutex_lock_io(struct mutex *lock);
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
@@ -242,17 +242,27 @@ extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
-extern int mutex_trylock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
-extern void mutex_unlock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock) __releases(lock);
-extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock);
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f515fde8..80975935ec48 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -38,7 +38,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+context_lock_struct(mutex) {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +59,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>
-struct mutex {
+context_lock_struct(mutex) {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d99b0fbc1942..d4e6e00bb90a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1831,6 +1831,8 @@ enum netdev_reg_state {
*
* @mpls_features: Mask of features inheritable by MPLS
* @gso_partial_features: value(s) from NETIF_F_GSO\*
+ * @mangleid_features: Mask of features requiring MANGLEID, will be
+ * disabled together with the latter.
*
* @ifindex: interface index
* @group: The group the device belongs to
@@ -2219,6 +2221,7 @@ struct net_device {
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
+ netdev_features_t mangleid_features;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -5163,8 +5166,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
-/* RSS keys are 40 or 52 bytes long */
-#define NETDEV_RSS_KEY_LEN 52
+#define NETDEV_RSS_KEY_LEN 256
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 34ce5d2f37a2..9ee7014400e8 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
-#include <asm/byteorder.h>
-#include <net/gre.h>
-#include <net/pptp.h>
struct nf_ct_gre {
unsigned int stream_timeout;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e947af6a3684..d87be1f25273 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -598,6 +598,10 @@ enum {
#define FATTR4_WORD2_TIME_DELEG_ACCESS BIT(FATTR4_TIME_DELEG_ACCESS - 64)
#define FATTR4_WORD2_TIME_DELEG_MODIFY BIT(FATTR4_TIME_DELEG_MODIFY - 64)
#define FATTR4_WORD2_OPEN_ARGUMENTS BIT(FATTR4_OPEN_ARGUMENTS - 64)
+#define FATTR4_WORD2_ACL_TRUEFORM BIT(FATTR4_ACL_TRUEFORM - 64)
+#define FATTR4_WORD2_ACL_TRUEFORM_SCOPE BIT(FATTR4_ACL_TRUEFORM_SCOPE - 64)
+#define FATTR4_WORD2_POSIX_DEFAULT_ACL BIT(FATTR4_POSIX_DEFAULT_ACL - 64)
+#define FATTR4_WORD2_POSIX_ACCESS_ACL BIT(FATTR4_POSIX_ACCESS_ACL - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c58b870f31ee..4daee27fa5eb 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -115,9 +115,7 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */
-#if IS_ENABLED(CONFIG_NFS_V4_1)
wait_queue_head_t cl_lock_waitq;
-#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
/* Our own IP address, as a null-terminated string.
@@ -259,6 +257,10 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ spinlock_t delegations_lock;
+ struct list_head delegations_return;
+ struct list_head delegations_lru;
+ struct list_head delegations_delayed;
atomic_long_t nr_active_delegations;
unsigned int delegation_hash_mask;
struct hlist_head *delegation_hash_table;
@@ -266,9 +268,7 @@ struct nfs_server {
struct list_head ss_src_copies;
unsigned long delegation_flags;
-#define NFS4SERV_DELEGRETURN (1)
-#define NFS4SERV_DELEGATION_EXPIRED (2)
-#define NFS4SERV_DELEGRETURN_DELAYED (3)
+#define NFS4SERV_DELEGATION_EXPIRED (1)
unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 79fe2dfb470f..ff1f12aa73d2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -209,6 +209,7 @@ struct nfs4_sequence_args {
};
struct nfs4_sequence_res {
+ const struct nfs4_sequence_slot_ops *sr_slot_ops;
struct nfs4_slot *sr_slot; /* slot used to send request */
unsigned long sr_timestamp;
int sr_status; /* sequence operation status */
@@ -1323,10 +1324,6 @@ struct nfs4_fsid_present_res {
unsigned char renew:1;
};
-#endif /* CONFIG_NFS_V4 */
-
-#ifdef CONFIG_NFS_V4_1
-
struct pnfs_commit_bucket {
struct list_head written;
struct list_head committing;
@@ -1466,7 +1463,7 @@ struct nfs41_free_stateid_res {
struct pnfs_ds_commit_info {
};
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_2
struct nfs42_falloc_args {
@@ -1849,7 +1846,7 @@ struct nfs_rpc_ops {
struct iattr *iattr,
int *);
int (*have_delegation)(struct inode *, fmode_t, int);
- int (*return_delegation)(struct inode *);
+ void (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index bd38648c998d..204c92462f3c 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -157,10 +157,10 @@ static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline bool __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define nodes_or(dst, src1, src2) \
@@ -181,10 +181,10 @@ static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline bool __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES)
diff --git a/include/linux/ns/ns_common_types.h b/include/linux/ns/ns_common_types.h
index b332b019b29c..0014fbc1c626 100644
--- a/include/linux/ns/ns_common_types.h
+++ b/include/linux/ns/ns_common_types.h
@@ -108,11 +108,13 @@ extern const struct proc_ns_operations utsns_operations;
* @ns_tree: namespace tree nodes and active reference count
*/
struct ns_common {
+ struct {
+ refcount_t __ns_ref; /* do not use directly */
+ } ____cacheline_aligned_in_smp;
u32 ns_type;
struct dentry *stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
- refcount_t __ns_ref; /* do not use directly */
union {
struct ns_tree;
struct rcu_head ns_rcu;
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 4d103ac8f5c7..b8710c825d64 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -162,8 +162,7 @@ void nubus_seq_write_rsrc_mem(struct seq_file *m,
unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
/* Declarations relating to driver model objects */
-int nubus_parent_device_register(void);
-int nubus_device_register(struct nubus_board *board);
+int nubus_device_register(struct device *parent, struct nubus_board *board);
int nubus_driver_register(struct nubus_driver *ndrv);
void nubus_driver_unregister(struct nubus_driver *ndrv);
int nubus_proc_show(struct seq_file *m, void *data);
diff --git a/include/linux/of.h b/include/linux/of.h
index 9bbdcf25a2b4..be6ec4916adf 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1485,6 +1485,13 @@ static inline int of_property_read_s32(const struct device_node *np,
#define for_each_compatible_node(dn, type, compatible) \
for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
dn = of_find_compatible_node(dn, type, compatible))
+
+#define for_each_compatible_node_scoped(dn, type, compatible) \
+ for (struct device_node *dn __free(device_node) = \
+ of_find_compatible_node(NULL, type, compatible); \
+ dn; \
+ dn = of_find_compatible_node(dn, type, compatible))
+
#define for_each_matching_node(dn, matches) \
for (dn = of_find_matching_node(NULL, matches); dn; \
dn = of_find_matching_node(dn, matches))
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1c2bc0281807..2a64d8cecaae 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -11,6 +11,30 @@
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
+struct of_imap_parser {
+ struct device_node *node;
+ const __be32 *imap;
+ const __be32 *imap_end;
+ u32 parent_offset;
+};
+
+struct of_imap_item {
+ struct of_phandle_args parent_args;
+ u32 child_imap_count;
+ u32 child_imap[16]; /* Arbitrary size.
+ * Should be #address-cells + #interrupt-cells but
+ * avoid using allocation and so, expect that 16
+ * should be enough
+ */
+};
+
+/*
+ * If the iterator is exited prematurely (break, goto, return) of_node_put() has
+ * to be called on item.parent_args.np
+ */
+#define for_each_of_imap_item(parser, item) \
+ for (; of_imap_parser_one(parser, item);)
+
/*
* Workarounds only applied to 32bit powermac machines
*/
@@ -49,6 +73,11 @@ extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern int of_imap_parser_init(struct of_imap_parser *parser,
+ struct device_node *node,
+ struct of_imap_item *item);
+extern struct of_imap_item *of_imap_parser_one(struct of_imap_parser *parser,
+ struct of_imap_item *item);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
const struct device_node *np,
enum irq_domain_bus_token token);
@@ -92,7 +121,17 @@ static inline void *of_irq_find_parent(struct device_node *child)
{
return NULL;
}
-
+static inline int of_imap_parser_init(struct of_imap_parser *parser,
+ struct device_node *node,
+ struct of_imap_item *item)
+{
+ return -ENOSYS;
+}
+static inline struct of_imap_item *of_imap_parser_one(struct of_imap_parser *parser,
+ struct of_imap_item *item)
+{
+ return NULL;
+}
static inline struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 6de479ebbe5d..ebce402854de 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -145,6 +145,11 @@ enum OID {
OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+ /* NIST FIPS-204 ML-DSA */
+ OID_id_ml_dsa_44, /* 2.16.840.1.101.3.4.3.17 */
+ OID_id_ml_dsa_65, /* 2.16.840.1.101.3.4.3.18 */
+ OID_id_ml_dsa_87, /* 2.16.840.1.101.3.4.3.19 */
+
OID__NR
};
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 736f633b2d5f..a5e95dbce220 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -36,12 +36,6 @@
#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
#define type_min(t) __type_min(typeof(t))
-/*
- * Avoids triggering -Wtype-limits compilation warning,
- * while using unsigned data types to check a < 0.
- */
-#define is_non_negative(a) ((a) > 0 || (a) == 0)
-#define is_negative(a) (!(is_non_negative(a)))
/*
* Allows for effectively applying __must_check to a macro so we can have
@@ -201,9 +195,9 @@ static inline bool __must_check __must_check_overflow(bool overflow)
typeof(d) _d = d; \
unsigned long long _a_full = _a; \
unsigned int _to_shift = \
- is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
+ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
- (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
+ (_to_shift != _s || *_d < 0 || _a < 0 || \
(*_d >> _to_shift) != _a); \
}))
@@ -552,4 +546,46 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
(__member_size((name)->array) / sizeof(*(name)->array) + \
__must_be_array((name)->array))
+/**
+ * typeof_flex_counter() - Return the type of the counter variable of a given
+ * flexible array member annotated by __counted_by().
+ * @FAM: Instance of flexible array member within a given struct.
+ *
+ * Returns: "size_t" if no annotation exists.
+ */
+#define typeof_flex_counter(FAM) \
+ typeof(_Generic(__flex_counter(FAM), \
+ void *: (size_t)0, \
+ default: *__flex_counter(FAM)))
+
+/**
+ * overflows_flex_counter_type() - Check if the counter associated with the
+ * given flexible array member can represent
+ * a value.
+ * @TYPE: Type of the struct that contains the @FAM.
+ * @FAM: Member name of the FAM within @TYPE.
+ * @COUNT: Value to check against the __counted_by annotated @FAM's counter.
+ *
+ * Returns: true if @COUNT can be represented in the @FAM's counter. When
+ * @FAM is not annotated with __counted_by(), always returns true.
+ */
+#define overflows_flex_counter_type(TYPE, FAM, COUNT) \
+ (!overflows_type(COUNT, typeof_flex_counter(((TYPE *)NULL)->FAM)))
+
+/**
+ * __set_flex_counter() - Set the counter associated with the given flexible
+ * array member that has been annoated by __counted_by().
+ * @FAM: Instance of flexible array member within a given struct.
+ * @COUNT: Value to store to the __counted_by annotated @FAM_PTR's counter.
+ *
+ * This is a no-op if no annotation exists. Count needs to be checked with
+ * overflows_flex_counter_type() before using this function.
+ */
+#define __set_flex_counter(FAM, COUNT) \
+({ \
+ *_Generic(__flex_counter(FAM), \
+ void *: &(size_t){ 0 }, \
+ default: __flex_counter(FAM)) = (COUNT); \
+})
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3e2f960e166c..6f8638c9904f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -67,4 +67,6 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
enum pb_isolate_mode mode);
+bool page_is_unmovable(struct zone *zone, struct page *page,
+ enum pb_isolate_mode mode, unsigned long *step);
#endif
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 76c817162d2f..61e876e255e8 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -93,6 +93,7 @@ static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
#endif
extern struct page_ext *page_ext_get(const struct page *page);
+extern struct page_ext *page_ext_from_phys(phys_addr_t phys);
extern void page_ext_put(struct page_ext *page_ext);
extern struct page_ext *page_ext_lookup(unsigned long pfn);
@@ -215,6 +216,11 @@ static inline struct page_ext *page_ext_get(const struct page *page)
return NULL;
}
+static inline struct page_ext *page_ext_from_phys(phys_addr_t phys)
+{
+ return NULL;
+}
+
static inline void page_ext_put(struct page_ext *page_ext)
{
}
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 289620d4aad3..12268a32e8be 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -14,15 +14,18 @@ extern struct static_key_true page_table_check_disabled;
extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
-void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
-void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
-void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
-void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
- unsigned int nr);
-void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
- unsigned int nr);
-void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
- unsigned int nr);
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud);
+void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr);
+void __page_table_check_pmds_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr);
+void __page_table_check_puds_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud, unsigned int nr);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -43,55 +46,59 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
__page_table_check_zero(page, order);
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_clear(mm, pte);
+ __page_table_check_pte_clear(mm, addr, pte);
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_clear(mm, pmd);
+ __page_table_check_pmd_clear(mm, addr, pmd);
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_clear(mm, pud);
+ __page_table_check_pud_clear(mm, addr, pud);
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
- pte_t *ptep, pte_t pte, unsigned int nr)
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_ptes_set(mm, ptep, pte, nr);
+ __page_table_check_ptes_set(mm, addr, ptep, pte, nr);
}
static inline void page_table_check_pmds_set(struct mm_struct *mm,
- pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+ unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmds_set(mm, pmdp, pmd, nr);
+ __page_table_check_pmds_set(mm, addr, pmdp, pmd, nr);
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
- pud_t *pudp, pud_t pud, unsigned int nr)
+ unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_puds_set(mm, pudp, pud, nr);
+ __page_table_check_puds_set(mm, addr, pudp, pud, nr);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -114,30 +121,34 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
{
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
{
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
{
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
{
}
static inline void page_table_check_ptes_set(struct mm_struct *mm,
- pte_t *ptep, pte_t pte, unsigned int nr)
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned int nr)
{
}
static inline void page_table_check_pmds_set(struct mm_struct *mm,
- pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+ unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
- pud_t *pudp, pud_t pud, unsigned int nr)
+ unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr)
{
}
@@ -149,7 +160,7 @@ static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
#endif /* CONFIG_PAGE_TABLE_CHECK */
-#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
-#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
+#define page_table_check_pmd_set(mm, addr, pmdp, pmd) page_table_check_pmds_set(mm, addr, pmdp, pmd, 1)
+#define page_table_check_pud_set(mm, addr, pudp, pud) page_table_check_puds_set(mm, addr, pudp, pud, 1)
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 31a848485ad9..ec442af3f886 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -210,6 +210,7 @@ enum mapping_flags {
AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
account usage to user cgroups */
+ AS_NO_DATA_INTEGRITY = 11, /* no data integrity guarantees */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
@@ -345,6 +346,16 @@ static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct addres
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
+static inline void mapping_set_no_data_integrity(struct address_space *mapping)
+{
+ set_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
+}
+
+static inline bool mapping_no_data_integrity(const struct address_space *mapping)
+{
+ return test_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
diff --git a/include/linux/panic.h b/include/linux/panic.h
index a00bc0937698..f1dd417e54b2 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -41,6 +41,14 @@ void abort(void);
* PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
*/
extern atomic_t panic_cpu;
+
+/*
+ * panic_redirect_cpu is used when panic is redirected to a specific CPU via
+ * the panic_force_cpu= boot parameter. It holds the CPU number that originally
+ * triggered the panic before redirection. A value of PANIC_CPU_INVALID means
+ * no redirection has occurred.
+ */
+extern atomic_t panic_redirect_cpu;
#define PANIC_CPU_INVALID -1
bool panic_try_start(void);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 078225b514d4..c0c54baadf04 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -12,7 +12,8 @@
#include <linux/acpi.h>
#ifdef CONFIG_ACPI
-extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
+extern acpi_status pci_acpi_add_root_pm_notifier(struct acpi_device *dev,
+ struct acpi_pci_root *pci_root);
static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
{
return acpi_remove_pm_notifier(dev);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 4286bfdbfdfa..c021c7af175f 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -223,6 +223,13 @@ struct pci_epc_bar_desc {
/**
* struct pci_epc_features - features supported by a EPC device per function
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @dynamic_inbound_mapping: indicate if the EPC device supports updating
+ * inbound mappings for an already configured BAR
+ * (i.e. allow calling pci_epc_set_bar() again
+ * without first calling pci_epc_clear_bar())
+ * @subrange_mapping: indicate if the EPC device can map inbound subranges for a
+ * BAR. This feature depends on @dynamic_inbound_mapping
+ * feature.
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
* @intx_capable: indicate if the endpoint can raise INTx interrupts
@@ -231,6 +238,8 @@ struct pci_epc_bar_desc {
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
+ unsigned int dynamic_inbound_mapping : 1;
+ unsigned int subrange_mapping : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
unsigned int intx_capable : 1;
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 48f68c4dcfa5..7737a7c03260 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -111,6 +111,22 @@ struct pci_epf_driver {
#define to_pci_epf_driver(drv) container_of_const((drv), struct pci_epf_driver, driver)
/**
+ * struct pci_epf_bar_submap - BAR subrange for inbound mapping
+ * @phys_addr: target physical/DMA address for this subrange
+ * @size: the size of the subrange to be mapped
+ *
+ * When pci_epf_bar.num_submap is >0, pci_epf_bar.submap describes the
+ * complete BAR layout. This allows an EPC driver to program multiple
+ * inbound translation windows for a single BAR when supported by the
+ * controller. The array order defines the BAR layout (submap[0] at offset
+ * 0, and each immediately follows the previous one).
+ */
+struct pci_epf_bar_submap {
+ dma_addr_t phys_addr;
+ size_t size;
+};
+
+/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
* @addr: virtual address corresponding to the @phys_addr
@@ -119,6 +135,9 @@ struct pci_epf_driver {
* requirement
* @barno: BAR number
* @flags: flags that are set for the BAR
+ * @num_submap: number of entries in @submap
+ * @submap: array of subrange descriptors allocated by the caller. See
+ * struct pci_epf_bar_submap for the semantics in detail.
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
@@ -127,6 +146,10 @@ struct pci_epf_bar {
size_t mem_size;
enum pci_barno barno;
int flags;
+
+ /* Optional sub-range mapping */
+ unsigned int num_submap;
+ struct pci_epf_bar_submap *submap;
};
/**
diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h
index 37a1ad9501b0..ae07d9f699c0 100644
--- a/include/linux/pci-ide.h
+++ b/include/linux/pci-ide.h
@@ -26,7 +26,7 @@ enum pci_ide_partner_select {
/**
* struct pci_ide_partner - Per port pair Selective IDE Stream settings
* @rid_start: Partner Port Requester ID range start
- * @rid_end: Partner Port Requester ID range end
+ * @rid_end: Partner Port Requester ID range end (inclusive)
* @stream_index: Selective IDE Stream Register Block selection
* @mem_assoc: PCI bus memory address association for targeting peer partner
* @pref_assoc: PCI bus prefetchable memory address association for
@@ -82,7 +82,6 @@ struct pci_ide_regs {
* @host_bridge_stream: allocated from host bridge @ide_stream_ida pool
* @stream_id: unique Stream ID (within Partner Port pairing)
* @name: name of the established Selective IDE Stream in sysfs
- * @tsm_dev: For TSM established IDE, the TSM device context
*
* Negative @stream_id values indicate "uninitialized" on the
* expectation that with TSM established IDE the TSM owns the stream_id
@@ -94,7 +93,6 @@ struct pci_ide {
u8 host_bridge_stream;
int stream_id;
const char *name;
- struct tsm_dev *tsm_dev;
};
/*
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 517e121d2598..873de20a2247 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -20,6 +20,8 @@ struct scatterlist;
* struct p2pdma_provider
*
* A p2pdma provider is a range of MMIO address space available to the CPU.
+ * @owner: Device to which this provider belongs.
+ * @bus_offset: Bus offset for p2p communication.
*/
struct p2pdma_provider {
struct device *owner;
diff --git a/include/linux/pci-pwrctrl.h b/include/linux/pci-pwrctrl.h
index 4aefc7901cd1..1192a2527521 100644
--- a/include/linux/pci-pwrctrl.h
+++ b/include/linux/pci-pwrctrl.h
@@ -31,6 +31,8 @@ struct device_link;
/**
* struct pci_pwrctrl - PCI device power control context.
* @dev: Address of the power controlling device.
+ * @power_on: Callback to power on the power controlling device.
+ * @power_off: Callback to power off the power controlling device.
*
* An object of this type must be allocated by the PCI power control device and
* passed to the pwrctrl subsystem to trigger a bus rescan and setup a device
@@ -38,6 +40,8 @@ struct device_link;
*/
struct pci_pwrctrl {
struct device *dev;
+ int (*power_on)(struct pci_pwrctrl *pwrctrl);
+ int (*power_off)(struct pci_pwrctrl *pwrctrl);
/* private: internal use only */
struct notifier_block nb;
@@ -50,5 +54,15 @@ int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl);
void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl);
int devm_pci_pwrctrl_device_set_ready(struct device *dev,
struct pci_pwrctrl *pwrctrl);
-
+#if IS_ENABLED(CONFIG_PCI_PWRCTRL)
+int pci_pwrctrl_create_devices(struct device *parent);
+void pci_pwrctrl_destroy_devices(struct device *parent);
+int pci_pwrctrl_power_on_devices(struct device *parent);
+void pci_pwrctrl_power_off_devices(struct device *parent);
+#else
+static inline int pci_pwrctrl_create_devices(struct device *parent) { return 0; }
+static void pci_pwrctrl_destroy_devices(struct device *parent) { }
+static inline int pci_pwrctrl_power_on_devices(struct device *parent) { return 0; }
+static void pci_pwrctrl_power_off_devices(struct device *parent) { }
+#endif
#endif /* __PCI_PWRCTRL_H__ */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b5cc0c2b9906..1c270f1d5123 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -248,6 +248,11 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
+ /*
+ * PCIe to PCI bridge does not create RID aliases because the bridge is
+ * integrated with the downstream devices and doesn't use real PCI.
+ */
+ PCI_DEV_FLAGS_PCI_BRIDGE_NO_ALIAS = (__force pci_dev_flags_t) (1 << 14),
};
enum pci_irq_reroute_variant {
@@ -377,6 +382,13 @@ struct pci_dev {
0xffffffff. You only need to change
this if your device has broken DMA
or supports 64-bit transfers. */
+ u64 msi_addr_mask; /* Mask of the bits of bus address for
+ MSI that this device implements.
+ Normally set based on device
+ capabilities. You only need to
+ change this if your device claims
+ to support 64-bit MSI but implements
+ fewer than 64 address bits. */
struct device_dma_parameters dma_parms;
@@ -406,6 +418,7 @@ struct pci_dev {
user sysfs */
unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
bit manually */
+ unsigned int no_bw_notif:1; /* BW notifications may cause issues */
unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -441,7 +454,6 @@ struct pci_dev {
unsigned int is_busmaster:1; /* Is busmaster */
unsigned int no_msi:1; /* May not use MSI */
- unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* Config space access blocked */
unsigned int broken_parity_status:1; /* Generates false positive parity */
unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
@@ -463,6 +475,7 @@ struct pci_dev {
unsigned int is_pciehp:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
+ unsigned int is_cxl:1; /* Compute Express Link (CXL) */
/*
* Devices marked being untrusted are the ones that can potentially
* execute DMA attacks and similar. They are typically connected
@@ -558,6 +571,7 @@ struct pci_dev {
struct pci_tsm *tsm; /* TSM operation state */
#endif
u16 acs_cap; /* ACS Capability offset */
+ u16 acs_capabilities; /* ACS Capabilities */
u8 supported_speeds; /* Supported Link Speeds Vector */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
@@ -791,6 +805,11 @@ static inline bool pci_is_display(struct pci_dev *pdev)
return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
}
+static inline bool pcie_is_cxl(struct pci_dev *pci_dev)
+{
+ return pci_dev->is_cxl;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -854,7 +873,6 @@ struct pci_ops {
void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
- int (*assert_perst)(struct pci_bus *bus, bool assert);
};
/*
@@ -1206,6 +1224,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
int pci_host_probe(struct pci_host_bridge *bridge);
+void pci_probe_flush_workqueue(void);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
@@ -1243,7 +1262,11 @@ void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
void pci_stop_root_bus(struct pci_bus *bus);
void pci_remove_root_bus(struct pci_bus *bus);
-void pci_setup_cardbus(struct pci_bus *bus);
+#ifdef CONFIG_CARDBUS
+void pci_setup_cardbus_bridge(struct pci_bus *bus);
+#else
+static inline void pci_setup_cardbus_bridge(struct pci_bus *bus) { }
+#endif
void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
@@ -2079,6 +2102,8 @@ static inline int pci_has_flag(int flag) { return 0; }
_PCI_NOP_ALL(read, *)
_PCI_NOP_ALL(write,)
+static inline void pci_probe_flush_workqueue(void) { }
+
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a9a089566b7c..406abf629be2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1733,9 +1733,6 @@
#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320
#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321
-#define PCI_VENDOR_ID_ESSENTIAL 0x120f
-#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
-
#define PCI_VENDOR_ID_O2 0x1217
#define PCI_DEVICE_ID_O2_6729 0x6729
#define PCI_DEVICE_ID_O2_6730 0x673a
@@ -2583,6 +2580,8 @@
#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
+#define PCI_VENDOR_ID_ASPEED 0x1a03
+
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
@@ -2950,7 +2949,8 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
-#define PCI_DEVICE_ID_INTEL_HDA_GML 0x3198
+/* In a few of the Intel documents the GML acronym is used for Gemini Lake */
+#define PCI_DEVICE_ID_INTEL_HDA_GLK 0x3198
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
@@ -3143,6 +3143,7 @@
#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_NVL 0xd328
#define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7
#define PCI_DEVICE_ID_INTEL_HDA_PTL_H 0xe328
#define PCI_DEVICE_ID_INTEL_HDA_PTL 0xe428
diff --git a/include/linux/pcs/pcs-mtk-lynxi.h b/include/linux/pcs/pcs-mtk-lynxi.h
index be3b4ab32f4a..1bd4a27a8898 100644
--- a/include/linux/pcs/pcs-mtk-lynxi.h
+++ b/include/linux/pcs/pcs-mtk-lynxi.h
@@ -5,9 +5,8 @@
#include <linux/phylink.h>
#include <linux/regmap.h>
-#define MTK_SGMII_FLAG_PN_SWAP BIT(0)
struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
- struct regmap *regmap,
- u32 ana_rgc3, u32 flags);
+ struct fwnode_handle *fwnode,
+ struct regmap *regmap, u32 ana_rgc3);
void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs);
#endif
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 288f5235649a..c8cb010d655e 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -161,6 +161,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
+#define percpu_rwsem_is_write_held(sem) lockdep_is_held_type(sem, 0)
#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9ded2e582c60..48d851fbd8ea 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -305,6 +305,7 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
#define PERF_PMU_CAP_AUX_PAUSE 0x0200
#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
+#define PERF_PMU_CAP_MEDIATED_VPMU 0x0800
/**
* pmu::scope
@@ -998,6 +999,11 @@ struct perf_event_groups {
u64 index;
};
+struct perf_time_ctx {
+ u64 time;
+ u64 stamp;
+ u64 offset;
+};
/**
* struct perf_event_context - event context structure
@@ -1036,9 +1042,12 @@ struct perf_event_context {
/*
* Context clock, runs when context enabled.
*/
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
+
+ /*
+ * Context clock, runs when in the guest mode.
+ */
+ struct perf_time_ctx timeguest;
/*
* These fields let us detect when two contexts have both
@@ -1171,9 +1180,8 @@ struct bpf_perf_event_data_kern {
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
+ struct perf_time_ctx timeguest;
int active;
};
@@ -1669,6 +1677,8 @@ struct perf_guest_info_callbacks {
unsigned int (*state)(void);
unsigned long (*get_ip)(void);
unsigned int (*handle_intel_pt_intr)(void);
+
+ void (*handle_mediated_pmi)(void);
};
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -1678,6 +1688,7 @@ extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+DECLARE_STATIC_CALL(__perf_guest_handle_mediated_pmi, *perf_guest_cbs->handle_mediated_pmi);
static inline unsigned int perf_guest_state(void)
{
@@ -1694,6 +1705,11 @@ static inline unsigned int perf_guest_handle_intel_pt_intr(void)
return static_call(__perf_guest_handle_intel_pt_intr)();
}
+static inline void perf_guest_handle_mediated_pmi(void)
+{
+ static_call(__perf_guest_handle_mediated_pmi)();
+}
+
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
@@ -1914,6 +1930,13 @@ extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+int perf_create_mediated_pmu(void);
+void perf_release_mediated_pmu(void);
+void perf_load_guest_context(void);
+void perf_put_guest_context(void);
+#endif
+
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 652f287c1ef6..827dca25c0bc 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -225,16 +225,156 @@ static inline int pmd_dirty(pmd_t pmd)
* up to date.
*
* In the general case, no lock is guaranteed to be held between entry and exit
- * of the lazy mode. So the implementation must assume preemption may be enabled
- * and cpu migration is possible; it must take steps to be robust against this.
- * (In practice, for user PTE updates, the appropriate page table lock(s) are
- * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
- * and the mode cannot be used in interrupt context.
+ * of the lazy mode. (In practice, for user PTE updates, the appropriate page
+ * table lock(s) are held, but for kernel PTE updates, no lock is held).
+ * The implementation must therefore assume preemption may be enabled upon
+ * entry to the mode and cpu migration is possible; it must take steps to be
+ * robust against this. An implementation may handle this by disabling
+ * preemption, as a consequence generic code may not sleep while the lazy MMU
+ * mode is active.
+ *
+ * The mode is disabled in interrupt context and calls to the lazy_mmu API have
+ * no effect.
+ *
+ * The lazy MMU mode is enabled for a given block of code using:
+ *
+ * lazy_mmu_mode_enable();
+ * <code>
+ * lazy_mmu_mode_disable();
+ *
+ * Nesting is permitted: <code> may itself use an enable()/disable() pair.
+ * A nested call to enable() has no functional effect; however disable() causes
+ * any batched architectural state to be flushed regardless of nesting. After a
+ * call to disable(), the caller can therefore rely on all previous page table
+ * modifications to have taken effect, but the lazy MMU mode may still be
+ * enabled.
+ *
+ * In certain cases, it may be desirable to temporarily pause the lazy MMU mode.
+ * This can be done using:
+ *
+ * lazy_mmu_mode_pause();
+ * <code>
+ * lazy_mmu_mode_resume();
+ *
+ * pause() ensures that the mode is exited regardless of the nesting level;
+ * resume() re-enters the mode at the same nesting level. Any call to the
+ * lazy_mmu_mode_* API between those two calls has no effect. In particular,
+ * this means that pause()/resume() pairs may nest.
+ *
+ * is_lazy_mmu_mode_active() can be used to check whether the lazy MMU mode is
+ * currently enabled.
*/
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-static inline void arch_enter_lazy_mmu_mode(void) {}
-static inline void arch_leave_lazy_mmu_mode(void) {}
-static inline void arch_flush_lazy_mmu_mode(void) {}
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+/**
+ * lazy_mmu_mode_enable() - Enable the lazy MMU mode.
+ *
+ * Enters a new lazy MMU mode section; if the mode was not already enabled,
+ * enables it and calls arch_enter_lazy_mmu_mode().
+ *
+ * Must be paired with a call to lazy_mmu_mode_disable().
+ *
+ * Has no effect if called:
+ * - While paused - see lazy_mmu_mode_pause()
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_enable(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt() || state->pause_count > 0)
+ return;
+
+ VM_WARN_ON_ONCE(state->enable_count == U8_MAX);
+
+ if (state->enable_count++ == 0)
+ arch_enter_lazy_mmu_mode();
+}
+
+/**
+ * lazy_mmu_mode_disable() - Disable the lazy MMU mode.
+ *
+ * Exits the current lazy MMU mode section. If it is the outermost section,
+ * disables the mode and calls arch_leave_lazy_mmu_mode(). Otherwise (nested
+ * section), calls arch_flush_lazy_mmu_mode().
+ *
+ * Must match a call to lazy_mmu_mode_enable().
+ *
+ * Has no effect if called:
+ * - While paused - see lazy_mmu_mode_pause()
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_disable(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt() || state->pause_count > 0)
+ return;
+
+ VM_WARN_ON_ONCE(state->enable_count == 0);
+
+ if (--state->enable_count == 0)
+ arch_leave_lazy_mmu_mode();
+ else /* Exiting a nested section */
+ arch_flush_lazy_mmu_mode();
+
+}
+
+/**
+ * lazy_mmu_mode_pause() - Pause the lazy MMU mode.
+ *
+ * Pauses the lazy MMU mode; if it is currently active, disables it and calls
+ * arch_leave_lazy_mmu_mode().
+ *
+ * Must be paired with a call to lazy_mmu_mode_resume(). Calls to the
+ * lazy_mmu_mode_* API have no effect until the matching resume() call.
+ *
+ * Has no effect if called:
+ * - While paused (inside another pause()/resume() pair)
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_pause(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt())
+ return;
+
+ VM_WARN_ON_ONCE(state->pause_count == U8_MAX);
+
+ if (state->pause_count++ == 0 && state->enable_count > 0)
+ arch_leave_lazy_mmu_mode();
+}
+
+/**
+ * lazy_mmu_mode_resume() - Resume the lazy MMU mode.
+ *
+ * Resumes the lazy MMU mode; if it was active at the point where the matching
+ * call to lazy_mmu_mode_pause() was made, re-enables it and calls
+ * arch_enter_lazy_mmu_mode().
+ *
+ * Must match a call to lazy_mmu_mode_pause().
+ *
+ * Has no effect if called:
+ * - While paused (inside another pause()/resume() pair)
+ * - In interrupt context
+ */
+static inline void lazy_mmu_mode_resume(void)
+{
+ struct lazy_mmu_state *state = &current->lazy_mmu_state;
+
+ if (in_interrupt())
+ return;
+
+ VM_WARN_ON_ONCE(state->pause_count == 0);
+
+ if (--state->pause_count == 0 && state->enable_count > 0)
+ arch_enter_lazy_mmu_mode();
+}
+#else
+static inline void lazy_mmu_mode_enable(void) {}
+static inline void lazy_mmu_mode_disable(void) {}
+static inline void lazy_mmu_mode_pause(void) {}
+static inline void lazy_mmu_mode_resume(void) {}
#endif
#ifndef pte_batch_hint
@@ -289,7 +429,7 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
- page_table_check_ptes_set(mm, ptep, pte, nr);
+ page_table_check_ptes_set(mm, addr, ptep, pte, nr);
for (;;) {
set_pte(ptep, pte);
@@ -494,7 +634,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, address, pte);
return pte;
}
#endif
@@ -553,7 +693,7 @@ static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
* No need for ptep_get_and_clear(): page table check doesn't care about
* any bits that could have been set by HW concurrently.
*/
- page_table_check_pte_clear(mm, pte);
+ page_table_check_pte_clear(mm, addr, pte);
}
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
@@ -648,7 +788,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
- page_table_check_pmd_clear(mm, pmd);
+ page_table_check_pmd_clear(mm, address, pmd);
return pmd;
}
@@ -661,7 +801,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
- page_table_check_pud_clear(mm, pud);
+ page_table_check_pud_clear(mm, address, pud);
return pud;
}
diff --git a/include/linux/phy.h b/include/linux/phy.h
index fbbe028cc4b7..6f9979a26892 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,6 +327,7 @@ static inline long rgmii_clock(int speed)
struct device;
struct kernel_hwtstamp_config;
struct phylink;
+struct phy_port;
struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
@@ -645,6 +646,9 @@ struct phy_oatc14_sqi_capability {
* @master_slave_state: Current master/slave configuration
* @mii_ts: Pointer to time stamper callbacks
* @psec: Pointer to Power Sourcing Equipment control struct
+ * @ports: List of PHY ports structures
+ * @n_ports: Number of ports currently attached to the PHY
+ * @max_n_ports: Max number of ports this PHY can expose
* @lock: Mutex for serialization access to PHY
* @state_queue: Work queue for state machine
* @link_down_events: Number of times link was lost
@@ -783,6 +787,10 @@ struct phy_device {
struct mii_timestamper *mii_ts;
struct pse_control *psec;
+ struct list_head ports;
+ int n_ports;
+ int max_n_ports;
+
u8 mdix;
u8 mdix_ctrl;
@@ -802,11 +810,15 @@ struct phy_device {
};
/* Generic phy_device::dev_flags */
-#define PHY_F_NO_IRQ 0x80000000
-#define PHY_F_RXC_ALWAYS_ON 0x40000000
+#define PHY_F_NO_IRQ 0x80000000
+#define PHY_F_RXC_ALWAYS_ON 0x40000000
+#define PHY_F_KEEP_PREAMBLE_BEFORE_SFD 0x20000000
#define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio)
+#define phy_for_each_port(phydev, port) \
+ list_for_each_entry(port, &(phydev)->ports, head)
+
/**
* struct phy_tdr_config - Configuration of a TDR raw test
*
@@ -1507,6 +1519,49 @@ struct phy_driver {
* Returns the time in jiffies until the next update event.
*/
unsigned int (*get_next_update_time)(struct phy_device *dev);
+
+ /**
+ * @attach_mii_port: Attach the given MII port to the PHY device
+ * @dev: PHY device to notify
+ * @port: The port being added
+ *
+ * Called when an MII port that needs to be driven by the PHY is found.
+ *
+ * The port that is being passed may or may not be initialized. If it is
+ * already initialized, it is by the generic port representation from
+ * devicetree, which superseeds any strapping or vendor-specific
+ * properties.
+ *
+ * If the port isn't initialized, the port->mediums and port->lanes
+ * fields must be set, possibly according to strapping information.
+ *
+ * The PHY driver must set the port->interfaces field to indicate the
+ * possible MII modes that this PHY can output on the port.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*attach_mii_port)(struct phy_device *dev, struct phy_port *port);
+
+ /**
+ * @attach_mdi_port: Attach the given MII port to the PHY device
+ * @dev: PHY device to notify
+ * @port: The port being added
+ *
+ * Called when a port that needs to be driven by the PHY is found. The
+ * number of time this will be called depends on phydev->max_n_ports,
+ * which the driver can change in .probe().
+ *
+ * The port that is being passed may or may not be initialized. If it is
+ * already initialized, it is by the generic port representation from
+ * devicetree, which superseeds any strapping or vendor-specific
+ * properties.
+ *
+ * If the port isn't initialized, the port->mediums and port->lanes
+ * fields must be set, possibly according to strapping information.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*attach_mdi_port)(struct phy_device *dev, struct phy_port *port);
};
#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -2097,12 +2152,6 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable, int speed);
-int phy_sfp_connect_phy(void *upstream, struct phy_device *phy);
-void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy);
-void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
-void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
-int phy_sfp_probe(struct phy_device *phydev,
- const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos);
@@ -2310,6 +2359,7 @@ void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
+
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
@@ -2356,10 +2406,6 @@ int phy_register_fixup_for_id(const char *bus_id,
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
-int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
-int phy_unregister_fixup_for_id(const char *bus_id);
-int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
-
int phy_eee_tx_clock_stop_capable(struct phy_device *phydev);
int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
@@ -2400,6 +2446,8 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
+struct phy_port *phy_get_sfp_port(struct phy_device *phydev);
+
extern const struct bus_type mdio_bus_type;
extern const struct class mdio_bus_class;
diff --git a/include/linux/phy/phy-common-props.h b/include/linux/phy/phy-common-props.h
new file mode 100644
index 000000000000..680e13de4558
--- /dev/null
+++ b/include/linux/phy/phy-common-props.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phy-common-props.h -- Common properties for generic PHYs
+ *
+ * Copyright 2025 NXP
+ */
+
+#ifndef __PHY_COMMON_PROPS_H
+#define __PHY_COMMON_PROPS_H
+
+#include <dt-bindings/phy/phy.h>
+
+struct fwnode_handle;
+
+int __must_check phy_get_rx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int supported,
+ unsigned int default_val,
+ unsigned int *val);
+int __must_check phy_get_tx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int supported,
+ unsigned int default_val,
+ unsigned int *val);
+int __must_check phy_get_manual_rx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int *val);
+int __must_check phy_get_manual_tx_polarity(struct fwnode_handle *fwnode,
+ const char *mode_name,
+ unsigned int *val);
+
+#endif /* __PHY_COMMON_PROPS_H */
diff --git a/include/linux/phy_port.h b/include/linux/phy_port.h
new file mode 100644
index 000000000000..0ef0f5ce4709
--- /dev/null
+++ b/include/linux/phy_port.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __PHY_PORT_H
+#define __PHY_PORT_H
+
+#include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/phy.h>
+
+struct phy_port;
+
+/**
+ * enum phy_port_parent - The device this port is attached to
+ *
+ * @PHY_PORT_PHY: Indicates that the port is driven by a PHY device
+ */
+enum phy_port_parent {
+ PHY_PORT_PHY,
+};
+
+struct phy_port_ops {
+ /* Sometimes, the link state can be retrieved from physical,
+ * out-of-band channels such as the LOS signal on SFP. These
+ * callbacks allows notifying the port about state changes
+ */
+ void (*link_up)(struct phy_port *port);
+ void (*link_down)(struct phy_port *port);
+
+ /* If the port acts as a Media Independent Interface (Serdes port),
+ * configures the port with the relevant state and mode. When enable is
+ * not set, interface should be ignored
+ */
+ int (*configure_mii)(struct phy_port *port, bool enable, phy_interface_t interface);
+};
+
+/**
+ * struct phy_port - A representation of a network device physical interface
+ *
+ * @head: Used by the port's parent to list ports
+ * @parent_type: The type of device this port is directly connected to
+ * @phy: If the parent is PHY_PORT_PHYDEV, the PHY controlling that port
+ * @ops: Callback ops implemented by the port controller
+ * @pairs: The number of pairs this port has, 0 if not applicable
+ * @mediums: Bitmask of the physical mediums this port provides access to
+ * @supported: The link modes this port can expose, if this port is MDI (not MII)
+ * @interfaces: The MII interfaces this port supports, if this port is MII
+ * @not_described: Indicates to the parent driver if this port isn't described,
+ * so it's up to the parent to filter its capabilities.
+ * @active: Indicates if the port is currently part of the active link.
+ * @is_mii: Indicates if this port is MII (Media Independent Interface),
+ * or MDI (Media Dependent Interface).
+ * @is_sfp: Indicates if this port drives an SFP cage.
+ */
+struct phy_port {
+ struct list_head head;
+ enum phy_port_parent parent_type;
+ union {
+ struct phy_device *phy;
+ };
+
+ const struct phy_port_ops *ops;
+
+ int pairs;
+ unsigned long mediums;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+
+ unsigned int not_described:1;
+ unsigned int active:1;
+ unsigned int is_mii:1;
+ unsigned int is_sfp:1;
+};
+
+struct phy_port *phy_port_alloc(void);
+void phy_port_destroy(struct phy_port *port);
+
+static inline struct phy_device *port_phydev(struct phy_port *port)
+{
+ return port->phy;
+}
+
+struct phy_port *phy_of_parse_port(struct device_node *dn);
+
+static inline bool phy_port_is_copper(struct phy_port *port)
+{
+ return port->mediums == BIT(ETHTOOL_LINK_MEDIUM_BASET);
+}
+
+static inline bool phy_port_is_fiber(struct phy_port *port)
+{
+ return !!(port->mediums & ETHTOOL_MEDIUM_FIBER_BITS);
+}
+
+void phy_port_update_supported(struct phy_port *port);
+int phy_port_restrict_mediums(struct phy_port *port, unsigned long mediums);
+
+int phy_port_get_type(struct phy_port *port);
+
+#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 38363e566ac3..2bc0db3d52ac 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -90,9 +90,10 @@ enum {
MAC_40000FD = BIT(13),
MAC_50000FD = BIT(14),
MAC_56000FD = BIT(15),
- MAC_100000FD = BIT(16),
- MAC_200000FD = BIT(17),
- MAC_400000FD = BIT(18),
+ MAC_80000FD = BIT(16),
+ MAC_100000FD = BIT(17),
+ MAC_200000FD = BIT(18),
+ MAC_400000FD = BIT(19),
};
static inline bool phylink_autoneg_inband(unsigned int mode)
@@ -836,4 +837,9 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
void phylink_decode_usxgmii_word(struct phylink_link_state *state,
uint16_t lpa);
+
+void phylink_replay_link_begin(struct phylink *pl);
+
+void phylink_replay_link_end(struct phylink *pl);
+
#endif
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 69294f79cc88..8080a6fc6c8c 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -2005,6 +2005,14 @@ struct lightbar_params_v2_colors {
struct rgb_s color[8]; /* 0-3 are Google colors */
} __ec_todo_packed;
+struct lightbar_params_v3 {
+ /*
+ * Number of LEDs reported by the EC.
+ * May be less than the actual number of LEDs in the lightbar.
+ */
+ uint8_t reported_led_num;
+} __ec_todo_packed;
+
/* Lightbar program. */
#define EC_LB_PROG_LEN 192
struct lightbar_program {
@@ -2012,6 +2020,17 @@ struct lightbar_program {
uint8_t data[EC_LB_PROG_LEN];
} __ec_todo_unpacked;
+/*
+ * Lightbar program for large sequences. Sequences are sent in pieces, with
+ * increasing offset. The sequences are still limited by the amount reserved in
+ * EC RAM.
+ */
+struct lightbar_program_ex {
+ uint8_t size;
+ uint16_t offset;
+ uint8_t data[];
+} __ec_todo_packed;
+
struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
@@ -2058,6 +2077,7 @@ struct ec_params_lightbar {
struct lightbar_params_v2_colors set_v2par_colors;
struct lightbar_program set_program;
+ struct lightbar_program_ex set_program_ex;
};
} __ec_todo_packed;
@@ -2086,6 +2106,8 @@ struct ec_response_lightbar {
struct lightbar_params_v2_thresholds get_params_v2_thlds;
struct lightbar_params_v2_colors get_params_v2_colors;
+ struct lightbar_params_v3 get_params_v3;
+
struct __ec_todo_unpacked {
uint32_t num;
uint32_t flags;
@@ -2143,6 +2165,8 @@ enum lightbar_command {
LIGHTBAR_CMD_SET_PARAMS_V2_THRESHOLDS = 31,
LIGHTBAR_CMD_GET_PARAMS_V2_COLORS = 32,
LIGHTBAR_CMD_SET_PARAMS_V2_COLORS = 33,
+ LIGHTBAR_CMD_GET_PARAMS_V3 = 34,
+ LIGHTBAR_CMD_SET_PROGRAM_EX = 35,
LIGHTBAR_NUM_CMDS
};
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index b9c8520b4bd3..509c5592aab0 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -59,7 +59,8 @@ struct davinci_mcasp_pdata {
bool i2s_accurate_sck;
/* McASP specific fields */
- int tdm_slots;
+ int tdm_slots_tx;
+ int tdm_slots_rx;
u8 op_mode;
u8 dismod;
u8 num_serializer;
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
deleted file mode 100644
index 7d21e0c41037..000000000000
--- a/include/linux/platform_data/hwmon-s3c.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C - HWMon interface for ADC
-*/
-
-#ifndef __HWMON_S3C_H__
-#define __HWMON_S3C_H__
-
-/**
- * s3c_hwmon_chcfg - channel configuration
- * @name: The name to give this channel.
- * @mult: Multiply the ADC value read by this.
- * @div: Divide the value from the ADC by this.
- *
- * The value read from the ADC is converted to a value that
- * hwmon expects (mV) by result = (value_read * @mult) / @div.
- */
-struct s3c_hwmon_chcfg {
- const char *name;
- unsigned int mult;
- unsigned int div;
-};
-
-/**
- * s3c_hwmon_pdata - HWMON platform data
- * @in: One configuration for each possible channel used.
- */
-struct s3c_hwmon_pdata {
- struct s3c_hwmon_chcfg *in[8];
-};
-
-#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/mipi-i3c-hci.h b/include/linux/platform_data/mipi-i3c-hci.h
new file mode 100644
index 000000000000..ab7395f455f9
--- /dev/null
+++ b/include/linux/platform_data/mipi-i3c-hci.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef INCLUDE_PLATFORM_DATA_MIPI_I3C_HCI_H
+#define INCLUDE_PLATFORM_DATA_MIPI_I3C_HCI_H
+
+#include <linux/compiler_types.h>
+
+/**
+ * struct mipi_i3c_hci_platform_data - Platform-dependent data for mipi_i3c_hci
+ * @base_regs: Register set base address (to support multi-bus instances)
+ */
+struct mipi_i3c_hci_platform_data {
+ void __iomem *base_regs;
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/asus-wmi-leds-ids.h b/include/linux/platform_data/x86/asus-wmi-leds-ids.h
deleted file mode 100644
index 034a039c4e37..000000000000
--- a/include/linux/platform_data/x86/asus-wmi-leds-ids.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H
-#define __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H
-
-#include <linux/dmi.h>
-#include <linux/types.h>
-
-/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */
-#if IS_REACHABLE(CONFIG_ASUS_WMI) || IS_REACHABLE(CONFIG_HID_ASUS)
-static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
- {
- .matches = {
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "RC71L"),
- },
- },
- { },
-};
-#endif
-
-#endif /* __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 419491d4abca..554f41b827e1 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -29,6 +29,7 @@
#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
+#define ASUS_WMI_METHODID_NOTIF 0x00100021 /* Notify method */
#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
@@ -172,12 +173,29 @@ enum asus_ally_mcu_hack {
ASUS_WMI_ALLY_MCU_HACK_DISABLED,
};
+/* Used to notify hid-asus when asus-wmi changes keyboard backlight */
+struct asus_hid_listener {
+ struct list_head list;
+ void (*brightness_set)(struct asus_hid_listener *listener, int brightness);
+};
+
+enum asus_hid_event {
+ ASUS_EV_BRTUP,
+ ASUS_EV_BRTDOWN,
+ ASUS_EV_BRTTOGGLE,
+};
+
+#define ASUS_EV_MAX_BRIGHTNESS 3
+
#if IS_REACHABLE(CONFIG_ASUS_WMI)
void set_ally_mcu_hack(enum asus_ally_mcu_hack status);
void set_ally_mcu_powersave(bool enabled);
int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval);
int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval);
int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
+int asus_hid_register_listener(struct asus_hid_listener *cdev);
+void asus_hid_unregister_listener(struct asus_hid_listener *cdev);
+int asus_hid_event(enum asus_hid_event event);
#else
static inline void set_ally_mcu_hack(enum asus_ally_mcu_hack status)
{
@@ -198,6 +216,17 @@ static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
{
return -ENODEV;
}
+static inline int asus_hid_register_listener(struct asus_hid_listener *bdev)
+{
+ return -ENODEV;
+}
+static inline void asus_hid_unregister_listener(struct asus_hid_listener *bdev)
+{
+}
+static inline int asus_hid_event(enum asus_hid_event event)
+{
+ return -ENODEV;
+}
#endif
#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 98a899858ece..afcaaa37a812 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -681,10 +681,10 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+ bool work_in_progress; /* Owned by the PM core */
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
- bool work_in_progress:1; /* Owned by the PM core */
bool smart_suspend:1; /* Owned by the PM core */
bool must_resume:1; /* Owned by the PM core */
bool may_skip_resume:1; /* Set by subsystems */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index e86f3b731da2..9e1892525eac 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -44,8 +44,9 @@ posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
}
#endif
-int posix_acl_to_xattr(struct user_namespace *user_ns,
- const struct posix_acl *acl, void *buffer, size_t size);
+extern void *posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
+ size_t *sizep, gfp_t gfp);
+
static inline const char *posix_acl_xattr_name(int type)
{
switch (type) {
diff --git a/include/linux/psp.h b/include/linux/psp.h
index 92e60aeef21e..b337dcce1e99 100644
--- a/include/linux/psp.h
+++ b/include/linux/psp.h
@@ -18,6 +18,7 @@
* and should include an appropriate local definition in their source file.
*/
#define PSP_CMDRESP_STS GENMASK(15, 0)
+#define PSP_TEE_STS_RING_BUSY 0x0000000d /* Ring already initialized */
#define PSP_CMDRESP_CMD GENMASK(23, 16)
#define PSP_CMDRESP_RESERVED GENMASK(29, 24)
#define PSP_CMDRESP_RECOVERY BIT(30)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c5b30054cd01..7729fef249e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -31,6 +31,16 @@
#include <asm/processor.h>
#include <linux/context_tracking_irq.h>
+token_context_lock(RCU, __reentrant_ctx_lock);
+token_context_lock_instance(RCU, RCU_SCHED);
+token_context_lock_instance(RCU, RCU_BH);
+
+/*
+ * A convenience macro that can be used for RCU-protected globals or struct
+ * members; adds type qualifier __rcu, and also enforces __guarded_by(RCU).
+ */
+#define __rcu_guarded __rcu __guarded_by(RCU)
+
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
@@ -175,36 +185,7 @@ void rcu_tasks_torture_stats_print(char *tt, char *tf);
# define synchronize_rcu_tasks synchronize_rcu
# endif
-# ifdef CONFIG_TASKS_TRACE_RCU
-// Bits for ->trc_reader_special.b.need_qs field.
-#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
-#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
-
-u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
-void rcu_tasks_trace_qs_blkd(struct task_struct *t);
-
-# define rcu_tasks_trace_qs(t) \
- do { \
- int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
- \
- if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
- likely(!___rttq_nesting)) { \
- rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
- } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
- !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
- rcu_tasks_trace_qs_blkd(t); \
- } \
- } while (0)
-void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
-# else
-# define rcu_tasks_trace_qs(t) do { } while (0)
-# endif
-
-#define rcu_tasks_qs(t, preempt) \
-do { \
- rcu_tasks_classic_qs((t), (preempt)); \
- rcu_tasks_trace_qs(t); \
-} while (0)
+#define rcu_tasks_qs(t, preempt) rcu_tasks_classic_qs((t), (preempt))
# ifdef CONFIG_TASKS_RUDE_RCU
void synchronize_rcu_tasks_rude(void);
@@ -425,7 +406,8 @@ static inline void rcu_preempt_sleep_check(void) { }
// See RCU_LOCKDEP_WARN() for an explanation of the double call to
// debug_lockdep_rcu_enabled().
-static inline bool lockdep_assert_rcu_helper(bool c)
+static __always_inline bool lockdep_assert_rcu_helper(bool c, const struct __ctx_lock_RCU *ctx)
+ __assumes_shared_ctx_lock(RCU) __assumes_shared_ctx_lock(ctx)
{
return debug_lockdep_rcu_enabled() &&
(c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
@@ -438,7 +420,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
*/
#define lockdep_assert_in_rcu_read_lock() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map), RCU))
/**
* lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
@@ -448,7 +430,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* actual rcu_read_lock_bh() is required.
*/
#define lockdep_assert_in_rcu_read_lock_bh() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map), RCU_BH))
/**
* lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
@@ -458,7 +440,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* instead an actual rcu_read_lock_sched() is required.
*/
#define lockdep_assert_in_rcu_read_lock_sched() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map), RCU_SCHED))
/**
* lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
@@ -476,17 +458,17 @@ static inline bool lockdep_assert_rcu_helper(bool c)
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
!lock_is_held(&rcu_bh_lock_map) && \
!lock_is_held(&rcu_sched_lock_map) && \
- preemptible()))
+ preemptible(), RCU))
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
-#define lockdep_assert_in_rcu_reader() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() __assume_shared_ctx_lock(RCU)
+#define lockdep_assert_in_rcu_read_lock_bh() __assume_shared_ctx_lock(RCU_BH)
+#define lockdep_assert_in_rcu_read_lock_sched() __assume_shared_ctx_lock(RCU_SCHED)
+#define lockdep_assert_in_rcu_reader() __assume_shared_ctx_lock(RCU)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -506,11 +488,11 @@ static inline bool lockdep_assert_rcu_helper(bool c)
#endif /* #else #ifdef __CHECKER__ */
#define __unrcu_pointer(p, local) \
-({ \
+context_unsafe( \
typeof(*p) *local = (typeof(*p) *__force)(p); \
rcu_check_sparse(p, __rcu); \
- ((typeof(*p) __force __kernel *)(local)); \
-})
+ ((typeof(*p) __force __kernel *)(local)) \
+)
/**
* unrcu_pointer - mark a pointer as not being RCU protected
* @p: pointer needing to lose its __rcu property
@@ -586,7 +568,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
-do { \
+context_unsafe( \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
rcu_check_sparse(p, __rcu); \
\
@@ -594,7 +576,7 @@ do { \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
-} while (0)
+)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
@@ -861,9 +843,10 @@ do { \
* only when acquiring spinlocks that are subject to priority inheritance.
*/
static __always_inline void rcu_read_lock(void)
+ __acquires_shared(RCU)
{
__rcu_read_lock();
- __acquire(RCU);
+ __acquire_shared(RCU);
rcu_lock_acquire(&rcu_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
@@ -891,11 +874,12 @@ static __always_inline void rcu_read_lock(void)
* See rcu_read_lock() for more information.
*/
static inline void rcu_read_unlock(void)
+ __releases_shared(RCU)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
- __release(RCU);
+ __release_shared(RCU);
__rcu_read_unlock();
}
@@ -914,9 +898,11 @@ static inline void rcu_read_unlock(void)
* was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_BH)
{
local_bh_disable();
- __acquire(RCU_BH);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
@@ -928,11 +914,13 @@ static inline void rcu_read_lock_bh(void)
* See rcu_read_lock_bh() for more information.
*/
static inline void rcu_read_unlock_bh(void)
+ __releases_shared(RCU) __releases_shared(RCU_BH)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
+ __release_shared(RCU_BH);
+ __release_shared(RCU);
local_bh_enable();
}
@@ -952,9 +940,11 @@ static inline void rcu_read_unlock_bh(void)
* rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
@@ -962,9 +952,11 @@ static inline void rcu_read_lock_sched(void)
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_lock_sched_notrace(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable_notrace();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
}
/**
@@ -973,22 +965,27 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
* See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable();
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_unlock_sched_notrace(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable_notrace();
}
static __always_inline void rcu_read_lock_dont_migrate(void)
+ __acquires_shared(RCU)
{
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
migrate_disable();
@@ -996,6 +993,7 @@ static __always_inline void rcu_read_lock_dont_migrate(void)
}
static inline void rcu_read_unlock_migrate(void)
+ __releases_shared(RCU)
{
rcu_read_unlock();
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
@@ -1041,10 +1039,10 @@ static inline void rcu_read_unlock_migrate(void)
* ordering guarantees for either the CPU or the compiler.
*/
#define RCU_INIT_POINTER(p, v) \
- do { \
+ context_unsafe( \
rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
- } while (0)
+ )
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
@@ -1192,18 +1190,7 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
-DEFINE_LOCK_GUARD_0(rcu,
- do {
- rcu_read_lock();
- /*
- * sparse doesn't call the cleanup function,
- * so just release immediately and don't track
- * the context. We don't need to anyway, since
- * the whole point of the guard is to not need
- * the explicit unlock.
- */
- __release(RCU);
- } while (0),
- rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+DECLARE_LOCK_GUARD_0_ATTRS(rcu, __acquires_shared(RCU), __releases_shared(RCU))
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index e6c44eb428ab..cee89e51e45c 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -12,27 +12,74 @@
#include <linux/rcupdate.h>
#include <linux/cleanup.h>
-extern struct lockdep_map rcu_trace_lock_map;
+#ifdef CONFIG_TASKS_TRACE_RCU
+extern struct srcu_struct rcu_tasks_trace_srcu_struct;
+#endif // #ifdef CONFIG_TASKS_TRACE_RCU
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
- return lock_is_held(&rcu_trace_lock_map);
+ return srcu_read_lock_held(&rcu_tasks_trace_srcu_struct);
}
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
return 1;
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif // #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t);
+/**
+ * rcu_read_lock_tasks_trace - mark beginning of RCU-trace read-side critical section
+ *
+ * When synchronize_rcu_tasks_trace() is invoked by one task, then that
+ * task is guaranteed to block until all other tasks exit their read-side
+ * critical sections. Similarly, if call_rcu_trace() is invoked on one
+ * task while other tasks are within RCU read-side critical sections,
+ * invocation of the corresponding RCU callback is deferred until after
+ * the all the other tasks exit their critical sections.
+ *
+ * For more details, please see the documentation for
+ * srcu_read_lock_fast(). For a description of how implicit RCU
+ * readers provide the needed ordering for architectures defining the
+ * ARCH_WANTS_NO_INSTR Kconfig option (and thus promising never to trace
+ * code where RCU is not watching), please see the __srcu_read_lock_fast()
+ * (non-kerneldoc) header comment. Otherwise, the smp_mb() below provided
+ * the needed ordering.
+ */
+static inline struct srcu_ctr __percpu *rcu_read_lock_tasks_trace(void)
+{
+ struct srcu_ctr __percpu *ret = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
+
+ rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Provide ordering on noinstr-incomplete architectures.
+ return ret;
+}
+
+/**
+ * rcu_read_unlock_tasks_trace - mark end of RCU-trace read-side critical section
+ * @scp: return value from corresponding rcu_read_lock_tasks_trace().
+ *
+ * Pairs with the preceding call to rcu_read_lock_tasks_trace() that
+ * returned the value passed in via scp.
+ *
+ * For more details, please see the documentation for rcu_read_unlock().
+ * For memory-ordering information, please see the header comment for the
+ * rcu_read_lock_tasks_trace() function.
+ */
+static inline void rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu *scp)
+{
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Provide ordering on noinstr-incomplete architectures.
+ __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
+ srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
+}
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -50,12 +97,15 @@ static inline void rcu_read_lock_trace(void)
{
struct task_struct *t = current;
- WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
- barrier();
- if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
- t->trc_reader_special.b.need_mb)
- smp_mb(); // Pairs with update-side barriers
- rcu_lock_acquire(&rcu_trace_lock_map);
+ rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
+ if (t->trc_reader_nesting++) {
+ // In case we interrupted a Tasks Trace RCU reader.
+ return;
+ }
+ barrier(); // nesting before scp to protect against interrupt handler.
+ t->trc_reader_scp = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Placeholder for more selective ordering
}
/**
@@ -69,26 +119,88 @@ static inline void rcu_read_lock_trace(void)
*/
static inline void rcu_read_unlock_trace(void)
{
- int nesting;
+ struct srcu_ctr __percpu *scp;
struct task_struct *t = current;
- rcu_lock_release(&rcu_trace_lock_map);
- nesting = READ_ONCE(t->trc_reader_nesting) - 1;
- barrier(); // Critical section before disabling.
- // Disable IPI-based setting of .need_qs.
- WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
- if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
- WRITE_ONCE(t->trc_reader_nesting, nesting);
- return; // We assume shallow reader nesting.
+ scp = t->trc_reader_scp;
+ barrier(); // scp before nesting to protect against interrupt handler.
+ if (!--t->trc_reader_nesting) {
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
+ smp_mb(); // Placeholder for more selective ordering
+ __srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
}
- WARN_ON_ONCE(nesting != 0);
- rcu_read_unlock_trace_special(t);
+ srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
}
-void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
-void synchronize_rcu_tasks_trace(void);
-void rcu_barrier_tasks_trace(void);
-struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
+/**
+ * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
+ * @rhp: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a trace rcu-tasks
+ * grace period elapses, in other words after all currently executing
+ * trace rcu-tasks read-side critical sections have completed. These
+ * read-side critical sections are delimited by calls to rcu_read_lock_trace()
+ * and rcu_read_unlock_trace().
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
+{
+ call_srcu(&rcu_tasks_trace_srcu_struct, rhp, func);
+}
+
+/**
+ * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
+ *
+ * Control will return to the caller some time after a trace rcu-tasks
+ * grace period has elapsed, in other words after all currently executing
+ * trace rcu-tasks read-side critical sections have elapsed. These read-side
+ * critical sections are delimited by calls to rcu_read_lock_trace()
+ * and rcu_read_unlock_trace().
+ *
+ * This is a very specialized primitive, intended only for a few uses in
+ * tracing and other situations requiring manipulation of function preambles
+ * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
+ * (yet) intended for heavy use from multiple CPUs.
+ *
+ * See the description of synchronize_rcu() for more detailed information
+ * on memory ordering guarantees.
+ */
+static inline void synchronize_rcu_tasks_trace(void)
+{
+ synchronize_srcu(&rcu_tasks_trace_srcu_struct);
+}
+
+/**
+ * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
+ *
+ * Note that rcu_barrier_tasks_trace() is not obligated to actually wait,
+ * for example, if there are no pending callbacks.
+ */
+static inline void rcu_barrier_tasks_trace(void)
+{
+ srcu_barrier(&rcu_tasks_trace_srcu_struct);
+}
+
+/**
+ * rcu_tasks_trace_expedite_current - Expedite the current Tasks Trace RCU grace period
+ *
+ * Cause the current Tasks Trace RCU grace period to become expedited.
+ * The grace period following the current one might also be expedited.
+ * If there is no current grace period, one might be created. If the
+ * current grace period is currently sleeping, that sleep will complete
+ * before expediting will take effect.
+ */
+static inline void rcu_tasks_trace_expedite_current(void)
+{
+ srcu_expedite_current(&rcu_tasks_trace_srcu_struct);
+}
+
+// Placeholders to enable stepwise transition.
+void __init rcu_tasks_trace_suppress_unused(void);
+
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 80dc023ac2bf..3da377ffb0c2 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -478,9 +478,9 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(true, lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(true, lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags) __cond_acquires(lock);
+ unsigned long *flags) __cond_acquires(true, lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index b0b9be750d93..caff2240bdab 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -359,6 +359,10 @@ typedef void (*regmap_unlock)(void *);
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
+ * @reg_default_cb: Optional callback to return default values for registers
+ * not listed in reg_defaults. This is only used for
+ * REGCACHE_FLAT population; drivers must ensure the readable_reg/
+ * writeable_reg callbacks are defined to handle holes.
*
* @read_flag_mask: Mask to be set in the top bytes of the register when doing
* a read.
@@ -449,6 +453,8 @@ struct regmap_config {
const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
+ int (*reg_default_cb)(struct device *dev, unsigned int reg,
+ unsigned int *def);
enum regcache_type cache_type;
const void *reg_defaults_raw;
unsigned int num_reg_defaults_raw;
@@ -1349,6 +1355,14 @@ static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
}
+static inline int regmap_default_zero_cb(struct device *dev,
+ unsigned int reg,
+ unsigned int *def)
+{
+ *def = 0;
+ return 0;
+}
+
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 978cf593b662..cc6ce709ec86 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -53,6 +53,11 @@ enum regulator_detection_severity {
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
+/* Initialize struct linear_range using voltages, not selectors */
+#define REGULATOR_LINEAR_VRANGE(_offs_uV, _min_uV, _max_uV, _step_uV) \
+ LINEAR_RANGE(_min_uV, ((_min_uV) - (_offs_uV)) / (_step_uV), \
+ ((_max_uV) - (_offs_uV)) / (_step_uV), _step_uV)
+
/**
* struct regulator_ops - regulator operations.
*
@@ -635,6 +640,7 @@ struct regulator_dev {
int ref_cnt;
struct module *owner;
struct device dev;
+ struct device bdev;
struct regulation_constraints *constraints;
struct regulator *supply; /* for tree */
const char *supply_name;
@@ -649,6 +655,7 @@ struct regulator_dev {
struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
+ unsigned int constraints_pending:1;
unsigned int is_switch:1;
/* time when this regulator was disabled last time */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 54701668b3df..006e57fd7ca5 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -53,6 +53,7 @@ enum resctrl_res_level {
RDT_RESOURCE_L2,
RDT_RESOURCE_MBA,
RDT_RESOURCE_SMBA,
+ RDT_RESOURCE_PERF_PKG,
/* Must be the last */
RDT_NUM_RESOURCES,
@@ -131,15 +132,24 @@ enum resctrl_domain_type {
* @list: all instances of this resource
* @id: unique id for this instance
* @type: type of this instance
+ * @rid: resource id for this instance
* @cpu_mask: which CPUs share this resource
*/
struct rdt_domain_hdr {
struct list_head list;
int id;
enum resctrl_domain_type type;
+ enum resctrl_res_level rid;
struct cpumask cpu_mask;
};
+static inline bool domain_header_is_valid(struct rdt_domain_hdr *hdr,
+ enum resctrl_domain_type type,
+ enum resctrl_res_level rid)
+{
+ return !WARN_ON_ONCE(hdr->type != type || hdr->rid != rid);
+}
+
/**
* struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
* @hdr: common header for different domain types
@@ -169,7 +179,7 @@ struct mbm_cntr_cfg {
};
/**
- * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
+ * struct rdt_l3_mon_domain - group of CPUs sharing RDT_RESOURCE_L3 monitoring
* @hdr: common header for different domain types
* @ci_id: cache info id for this domain
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
@@ -183,7 +193,7 @@ struct mbm_cntr_cfg {
* @cntr_cfg: array of assignable counters' configuration (indexed
* by counter ID)
*/
-struct rdt_mon_domain {
+struct rdt_l3_mon_domain {
struct rdt_domain_hdr hdr;
unsigned int ci_id;
unsigned long *rmid_busy_llc;
@@ -261,6 +271,7 @@ enum resctrl_scope {
RESCTRL_L2_CACHE = 2,
RESCTRL_L3_CACHE = 3,
RESCTRL_L3_NODE,
+ RESCTRL_PACKAGE,
};
/**
@@ -284,7 +295,7 @@ enum resctrl_schema_fmt {
* events of monitor groups created via mkdir.
*/
struct resctrl_mon {
- int num_rmid;
+ u32 num_rmid;
unsigned int mbm_cfg_mask;
int num_mbm_cntrs;
bool mbm_cntr_assignable;
@@ -358,10 +369,10 @@ struct resctrl_cpu_defaults {
};
struct resctrl_mon_config_info {
- struct rdt_resource *r;
- struct rdt_mon_domain *d;
- u32 evtid;
- u32 mon_config;
+ struct rdt_resource *r;
+ struct rdt_l3_mon_domain *d;
+ u32 evtid;
+ u32 mon_config;
};
/**
@@ -403,7 +414,8 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
-void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+bool resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
+ unsigned int binary_bits, void *arch_priv);
bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
@@ -498,22 +510,31 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type);
int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
-int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
-void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr);
void resctrl_online_cpu(unsigned int cpu);
void resctrl_offline_cpu(unsigned int cpu);
+/*
+ * Architecture hook called at beginning of first file system mount attempt.
+ * No locks are held.
+ */
+void resctrl_arch_pre_mount(void);
+
/**
* resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
* for this resource and domain.
* @r: resource that the counter should be read from.
- * @d: domain that the counter should be read from.
+ * @hdr: Header of domain that the counter should be read from.
* @closid: closid that matches the rmid. Depending on the architecture, the
* counter may match traffic of both @closid and @rmid, or @rmid
* only.
* @rmid: rmid of the counter to read.
* @eventid: eventid to read, e.g. L3 occupancy.
+ * @arch_priv: Architecture private data for this event.
+ * The @arch_priv provided by the architecture via
+ * resctrl_enable_mon_event().
* @val: result of the counter read in bytes.
* @arch_mon_ctx: An architecture specific value from
* resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
@@ -529,9 +550,9 @@ void resctrl_offline_cpu(unsigned int cpu);
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
- u64 *val, void *arch_mon_ctx);
+ void *arch_priv, u64 *val, void *arch_mon_ctx);
/**
* resctrl_arch_rmid_read_context_check() - warn about invalid contexts
@@ -576,7 +597,7 @@ struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid,
enum resctrl_event_id eventid);
@@ -589,7 +610,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d);
/**
* resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
@@ -615,7 +636,7 @@ void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
*
* This can be called from any CPU.
*/
-void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
enum resctrl_event_id evtid, u32 rmid, u32 closid,
u32 cntr_id, bool assign);
@@ -638,7 +659,7 @@ void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid, int cntr_id,
enum resctrl_event_id eventid, u64 *val);
@@ -653,7 +674,7 @@ int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
u32 closid, u32 rmid, int cntr_id,
enum resctrl_event_id eventid);
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index acfe07860b34..a5f56faa18d2 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -50,6 +50,17 @@ enum resctrl_event_id {
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+ /* Intel Telemetry Events */
+ PMT_EVENT_ENERGY,
+ PMT_EVENT_ACTIVITY,
+ PMT_EVENT_STALLS_LLC_HIT,
+ PMT_EVENT_C1_RES,
+ PMT_EVENT_UNHALTED_CORE_CYCLES,
+ PMT_EVENT_STALLS_LLC_MISS,
+ PMT_EVENT_AUTO_C6_RES,
+ PMT_EVENT_UNHALTED_REF_CYCLES,
+ PMT_EVENT_UOPS_RETIRED,
+
/* Must be the last */
QOS_NUM_EVENTS,
};
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 67d2bf579942..9b262109726d 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -6,6 +6,7 @@
#define __LINUX_RESTART_BLOCK_H
#include <linux/compiler.h>
+#include <linux/time64.h>
#include <linux/types.h>
struct __kernel_timespec;
@@ -50,8 +51,7 @@ struct restart_block {
struct pollfd __user *ufds;
int nfds;
int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
+ struct timespec64 end_time;
} poll;
};
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 08e664b21f5a..133ccb39137a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -245,16 +245,17 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires_shared(RCU)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
@@ -325,6 +326,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
+ __acquires(__bitlock(0, bkt))
{
unsigned long flags;
@@ -337,6 +339,7 @@ static inline unsigned long rht_lock(struct bucket_table *tbl,
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
+ __acquires(__bitlock(0, bucket))
{
unsigned long flags;
@@ -349,6 +352,7 @@ static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
@@ -424,13 +428,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(0, bkt));
local_irq_restore(flags);
}
@@ -612,6 +617,7 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -666,6 +672,7 @@ restart:
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -676,6 +683,7 @@ static __always_inline void *rhashtable_lookup(
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
@@ -727,6 +735,7 @@ static __always_inline void *rhashtable_lookup_fast(
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -737,6 +746,7 @@ static __always_inline struct rhlist_head *rhltable_lookup(
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index daa92a58585d..8dc0871e5f00 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -92,6 +92,7 @@ struct anon_vma_chain {
};
enum ttu_flags {
+ TTU_USE_SHARED_ZEROPAGE = 0x2, /* for unused pages of large folios */
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
@@ -104,75 +105,8 @@ enum ttu_flags {
};
#ifdef CONFIG_MMU
-static inline void get_anon_vma(struct anon_vma *anon_vma)
-{
- atomic_inc(&anon_vma->refcount);
-}
-
-void __put_anon_vma(struct anon_vma *anon_vma);
-
-static inline void put_anon_vma(struct anon_vma *anon_vma)
-{
- if (atomic_dec_and_test(&anon_vma->refcount))
- __put_anon_vma(anon_vma);
-}
-
-static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
-{
- down_write(&anon_vma->root->rwsem);
-}
-
-static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
-{
- return down_write_trylock(&anon_vma->root->rwsem);
-}
-
-static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
-{
- up_write(&anon_vma->root->rwsem);
-}
-
-static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
-{
- down_read(&anon_vma->root->rwsem);
-}
-
-static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
-{
- return down_read_trylock(&anon_vma->root->rwsem);
-}
-
-static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
-{
- up_read(&anon_vma->root->rwsem);
-}
-
-/*
- * anon_vma helper functions.
- */
void anon_vma_init(void); /* create anon_vma_cachep */
-int __anon_vma_prepare(struct vm_area_struct *);
-void unlink_anon_vmas(struct vm_area_struct *);
-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
-
-static inline int anon_vma_prepare(struct vm_area_struct *vma)
-{
- if (likely(vma->anon_vma))
- return 0;
-
- return __anon_vma_prepare(vma);
-}
-
-static inline void anon_vma_merge(struct vm_area_struct *vma,
- struct vm_area_struct *next)
-{
- VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
- unlink_anon_vmas(next);
-}
-
-struct anon_vma *folio_get_anon_vma(const struct folio *folio);
#ifdef CONFIG_MM_ID
static __always_inline void folio_lock_large_mapcount(struct folio *folio)
@@ -1000,12 +934,8 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-enum rmp_flags {
- RMP_LOCKED = 1 << 0,
- RMP_USE_SHARED_ZEROPAGE = 1 << 1,
-};
-
-void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
+void remove_migration_ptes(struct folio *src, struct folio *dst,
+ enum ttu_flags flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index 2266f4dc77b6..7a01a0760405 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -163,4 +163,15 @@ void rseq_syscall(struct pt_regs *regs);
static inline void rseq_syscall(struct pt_regs *regs) { }
#endif /* !CONFIG_DEBUG_RSEQ */
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+void rseq_syscall_enter_work(long syscall);
+int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3);
+#else /* CONFIG_RSEQ_SLICE_EXTENSION */
+static inline void rseq_syscall_enter_work(long syscall) { }
+static inline int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3)
+{
+ return -ENOTSUPP;
+}
+#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
+
#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index a36b472627de..cbc4a791618b 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -15,6 +15,11 @@ struct rseq_stats {
unsigned long cs;
unsigned long clear;
unsigned long fixup;
+ unsigned long s_granted;
+ unsigned long s_expired;
+ unsigned long s_revoked;
+ unsigned long s_yielded;
+ unsigned long s_aborted;
};
DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
@@ -37,6 +42,7 @@ DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
#ifdef CONFIG_RSEQ
#include <linux/jump_label.h>
#include <linux/rseq.h>
+#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/tracepoint-defs.h>
@@ -75,6 +81,147 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
#define rseq_inline __always_inline
#endif
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+DECLARE_STATIC_KEY_TRUE(rseq_slice_extension_key);
+
+static __always_inline bool rseq_slice_extension_enabled(void)
+{
+ return static_branch_likely(&rseq_slice_extension_key);
+}
+
+extern unsigned int rseq_slice_ext_nsecs;
+bool __rseq_arm_slice_extension_timer(void);
+
+static __always_inline bool rseq_arm_slice_extension_timer(void)
+{
+ if (!rseq_slice_extension_enabled())
+ return false;
+
+ if (likely(!current->rseq.slice.state.granted))
+ return false;
+
+ return __rseq_arm_slice_extension_timer();
+}
+
+static __always_inline void rseq_slice_clear_grant(struct task_struct *t)
+{
+ if (IS_ENABLED(CONFIG_RSEQ_STATS) && t->rseq.slice.state.granted)
+ rseq_stat_inc(rseq_stats.s_revoked);
+ t->rseq.slice.state.granted = false;
+}
+
+static __always_inline bool rseq_grant_slice_extension(bool work_pending)
+{
+ struct task_struct *curr = current;
+ struct rseq_slice_ctrl usr_ctrl;
+ union rseq_slice_state state;
+ struct rseq __user *rseq;
+
+ if (!rseq_slice_extension_enabled())
+ return false;
+
+ /* If not enabled or not a return from interrupt, nothing to do. */
+ state = curr->rseq.slice.state;
+ state.enabled &= curr->rseq.event.user_irq;
+ if (likely(!state.state))
+ return false;
+
+ rseq = curr->rseq.usrptr;
+ scoped_user_rw_access(rseq, efault) {
+
+ /*
+ * Quick check conditions where a grant is not possible or
+ * needs to be revoked.
+ *
+ * 1) Any TIF bit which needs to do extra work aside of
+ * rescheduling prevents a grant.
+ *
+ * 2) A previous rescheduling request resulted in a slice
+ * extension grant.
+ */
+ if (unlikely(work_pending || state.granted)) {
+ /* Clear user control unconditionally. No point for checking */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ rseq_slice_clear_grant(curr);
+ return false;
+ }
+
+ unsafe_get_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
+ if (likely(!(usr_ctrl.request)))
+ return false;
+
+ /* Grant the slice extention */
+ usr_ctrl.request = 0;
+ usr_ctrl.granted = 1;
+ unsafe_put_user(usr_ctrl.all, &rseq->slice_ctrl.all, efault);
+ }
+
+ rseq_stat_inc(rseq_stats.s_granted);
+
+ curr->rseq.slice.state.granted = true;
+ /* Store expiry time for arming the timer on the way out */
+ curr->rseq.slice.expires = data_race(rseq_slice_ext_nsecs) + ktime_get_mono_fast_ns();
+ /*
+ * This is racy against a remote CPU setting TIF_NEED_RESCHED in
+ * several ways:
+ *
+ * 1)
+ * CPU0 CPU1
+ * clear_tsk()
+ * set_tsk()
+ * clear_preempt()
+ * Raise scheduler IPI on CPU0
+ * --> IPI
+ * fold_need_resched() -> Folds correctly
+ * 2)
+ * CPU0 CPU1
+ * set_tsk()
+ * clear_tsk()
+ * clear_preempt()
+ * Raise scheduler IPI on CPU0
+ * --> IPI
+ * fold_need_resched() <- NOOP as TIF_NEED_RESCHED is false
+ *
+ * #1 is not any different from a regular remote reschedule as it
+ * sets the previously not set bit and then raises the IPI which
+ * folds it into the preempt counter
+ *
+ * #2 is obviously incorrect from a scheduler POV, but it's not
+ * differently incorrect than the code below clearing the
+ * reschedule request with the safety net of the timer.
+ *
+ * The important part is that the clearing is protected against the
+ * scheduler IPI and also against any other interrupt which might
+ * end up waking up a task and setting the bits in the middle of
+ * the operation:
+ *
+ * clear_tsk()
+ * ---> Interrupt
+ * wakeup_on_this_cpu()
+ * set_tsk()
+ * set_preempt()
+ * clear_preempt()
+ *
+ * which would be inconsistent state.
+ */
+ scoped_guard(irq) {
+ clear_tsk_need_resched(curr);
+ clear_preempt_need_resched();
+ }
+ return true;
+
+efault:
+ force_sig(SIGSEGV);
+ return false;
+}
+
+#else /* CONFIG_RSEQ_SLICE_EXTENSION */
+static inline bool rseq_slice_extension_enabled(void) { return false; }
+static inline bool rseq_arm_slice_extension_timer(void) { return false; }
+static inline void rseq_slice_clear_grant(struct task_struct *t) { }
+static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
+#endif /* !CONFIG_RSEQ_SLICE_EXTENSION */
+
bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
bool rseq_debug_validate_ids(struct task_struct *t);
@@ -359,8 +506,15 @@ bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
if (csaddr)
unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
+
+ /* Open coded, so it's in the same user access region */
+ if (rseq_slice_extension_enabled()) {
+ /* Unconditionally clear it, no point in conditionals */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ }
}
+ rseq_slice_clear_grant(t);
/* Cache the new values */
t->rseq.ids.cpu_cid = ids->cpu_cid;
rseq_stat_inc(rseq_stats.ids);
@@ -456,8 +610,17 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
*/
u64 csaddr;
- if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
- return false;
+ scoped_user_rw_access(rseq, efault) {
+ unsafe_get_user(csaddr, &rseq->rseq_cs, efault);
+
+ /* Open coded, so it's in the same user access region */
+ if (rseq_slice_extension_enabled()) {
+ /* Unconditionally clear it, no point in conditionals */
+ unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
+ }
+ }
+
+ rseq_slice_clear_grant(t);
if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
@@ -473,6 +636,8 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t
u32 node_id = cpu_to_node(ids.cpu_id);
return rseq_update_usr(t, regs, &ids, node_id);
+efault:
+ return false;
}
static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)
@@ -527,17 +692,19 @@ static __always_inline void clear_tif_rseq(void) { }
static __always_inline bool
rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
{
- if (likely(!test_tif_rseq(ti_work)))
- return false;
-
- if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
- current->rseq.event.slowpath = true;
- set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
- return true;
+ if (unlikely(test_tif_rseq(ti_work))) {
+ if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
+ current->rseq.event.slowpath = true;
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ return true;
+ }
+ clear_tif_rseq();
}
-
- clear_tif_rseq();
- return false;
+ /*
+ * Arm the slice extension timer if nothing to do anymore and the
+ * task really goes out to user space.
+ */
+ return rseq_arm_slice_extension_timer();
}
#else /* CONFIG_GENERIC_ENTRY */
@@ -611,6 +778,7 @@ static inline void rseq_syscall_exit_to_user_mode(void) { }
static inline void rseq_irqentry_exit_to_user_mode(void) { }
static inline void rseq_exit_to_user_mode_legacy(void) { }
static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
+static inline bool rseq_grant_slice_extension(bool work_pending) { return false; }
#endif /* !CONFIG_RSEQ */
#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
index 332dc14b81c9..da5fa6f40294 100644
--- a/include/linux/rseq_types.h
+++ b/include/linux/rseq_types.h
@@ -73,12 +73,39 @@ struct rseq_ids {
};
/**
+ * union rseq_slice_state - Status information for rseq time slice extension
+ * @state: Compound to access the overall state
+ * @enabled: Time slice extension is enabled for the task
+ * @granted: Time slice extension was granted to the task
+ */
+union rseq_slice_state {
+ u16 state;
+ struct {
+ u8 enabled;
+ u8 granted;
+ };
+};
+
+/**
+ * struct rseq_slice - Status information for rseq time slice extension
+ * @state: Time slice extension state
+ * @expires: The time when a grant expires
+ * @yielded: Indicator for rseq_slice_yield()
+ */
+struct rseq_slice {
+ union rseq_slice_state state;
+ u64 expires;
+ u8 yielded;
+};
+
+/**
* struct rseq_data - Storage for all rseq related data
* @usrptr: Pointer to the registered user space RSEQ memory
* @len: Length of the RSEQ region
- * @sig: Signature of critial section abort IPs
+ * @sig: Signature of critical section abort IPs
* @event: Storage for event management
* @ids: Storage for cached CPU ID and MM CID
+ * @slice: Storage for time slice extension data
*/
struct rseq_data {
struct rseq __user *usrptr;
@@ -86,6 +113,9 @@ struct rseq_data {
u32 sig;
struct rseq_event event;
struct rseq_ids ids;
+#ifdef CONFIG_RSEQ_SLICE_EXTENSION
+ struct rseq_slice slice;
+#endif
};
#else /* CONFIG_RSEQ */
@@ -121,8 +151,7 @@ struct mm_cid_pcpu {
/**
* struct mm_mm_cid - Storage for per MM CID data
* @pcpu: Per CPU storage for CIDs associated to a CPU
- * @percpu: Set, when CIDs are in per CPU mode
- * @transit: Set to MM_CID_TRANSIT during a mode change transition phase
+ * @mode: Indicates per CPU and transition mode
* @max_cids: The exclusive maximum CID value for allocation and convergence
* @irq_work: irq_work to handle the affinity mode change case
* @work: Regular work to handle the affinity mode change case
@@ -139,8 +168,7 @@ struct mm_cid_pcpu {
struct mm_mm_cid {
/* Hotpath read mostly members */
struct mm_cid_pcpu __percpu *pcpu;
- unsigned int percpu;
- unsigned int transit;
+ unsigned int mode;
unsigned int max_cids;
/* Rarely used. Moves @lock and @mutex into the second cacheline */
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 92fd467547e7..58774eb3aecf 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -10,6 +10,10 @@
#define MAX_DA_NAME_LEN 32
#define MAX_DA_RETRY_RACING_EVENTS 3
+#define RV_MON_GLOBAL 0
+#define RV_MON_PER_CPU 1
+#define RV_MON_PER_TASK 2
+
#ifdef CONFIG_RV
#include <linux/array_size.h>
#include <linux/bitops.h>
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 5b87c6f4a243..3390d21c95dd 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -29,16 +29,16 @@ do { \
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
+ extern void do_raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
extern int do_raw_read_trylock(rwlock_t *lock);
- extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
+ extern void do_raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
+# define do_raw_read_lock(rwlock) do {__acquire_shared(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
-# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release_shared(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
@@ -49,8 +49,8 @@ do { \
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+#define read_trylock(lock) _raw_read_trylock(lock)
+#define write_trylock(lock) _raw_write_trylock(lock)
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
@@ -112,12 +112,7 @@ do { \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define write_trylock_irqsave(lock, flags) _raw_write_trylock_irqsave(lock, &(flags))
#ifdef arch_rwlock_is_contended
#define rwlock_is_contended(lock) \
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 31d3d1116323..61a852609eab 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -15,24 +15,24 @@
* Released under the General Public License (GPL).
*/
-void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
-void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-int __lockfunc _raw_read_trylock(rwlock_t *lock);
-int __lockfunc _raw_write_trylock(rwlock_t *lock);
-void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock) __cond_acquires_shared(true, lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock) __cond_acquires(true, lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
@@ -137,6 +137,16 @@ static inline int __raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ local_irq_save(*flags);
+ if (_raw_write_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -145,6 +155,7 @@ static inline int __raw_write_trylock(rwlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline void __raw_read_lock(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -152,6 +163,7 @@ static inline void __raw_read_lock(rwlock_t *lock)
}
static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags;
@@ -163,6 +175,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_read_lock_irq(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -171,6 +184,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
}
static inline void __raw_read_lock_bh(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -178,6 +192,7 @@ static inline void __raw_read_lock_bh(rwlock_t *lock)
}
static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -189,6 +204,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_write_lock_irq(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -197,6 +213,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
}
static inline void __raw_write_lock_bh(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -204,6 +221,7 @@ static inline void __raw_write_lock_bh(rwlock_t *lock)
}
static inline void __raw_write_lock(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -211,6 +229,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
}
static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -220,6 +239,7 @@ static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -227,6 +247,7 @@ static inline void __raw_write_unlock(rwlock_t *lock)
}
static inline void __raw_read_unlock(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -235,6 +256,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -243,6 +265,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
}
static inline void __raw_read_unlock_irq(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -251,6 +274,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
}
static inline void __raw_read_unlock_bh(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -259,6 +283,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -267,6 +292,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
}
static inline void __raw_write_unlock_irq(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -275,6 +301,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
}
static inline void __raw_write_unlock_bh(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 7d81fc6918ee..5353abbfdc0b 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -24,26 +24,29 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
-extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
-extern int rt_read_trylock(rwlock_t *rwlock);
-extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock) __cond_acquires_shared(true, rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases_shared(rwlock);
extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
-extern int rt_write_trylock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock) __cond_acquires(true, rwlock);
extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
static __always_inline void read_lock_bh(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
local_bh_disable();
rt_read_lock(rwlock);
}
static __always_inline void read_lock_irq(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
@@ -55,37 +58,43 @@ static __always_inline void read_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+#define read_trylock(lock) rt_read_trylock(lock)
static __always_inline void read_unlock(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
local_bh_enable();
}
static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void write_lock(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+ __acquires(rwlock)
{
rt_write_lock_nested(rwlock, subclass);
}
@@ -94,12 +103,14 @@ static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
#endif
static __always_inline void write_lock_bh(rwlock_t *rwlock)
+ __acquires(rwlock)
{
local_bh_disable();
rt_write_lock(rwlock);
}
static __always_inline void write_lock_irq(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
@@ -111,36 +122,38 @@ static __always_inline void write_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+#define write_trylock(lock) rt_write_trylock(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = write_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+ __cond_acquires(true, rwlock)
+{
+ *flags = 0;
+ return rt_write_trylock(rwlock);
+}
+#define write_trylock_irqsave(lock, flags) _write_trylock_irqsave(lock, &(flags))
static __always_inline void write_unlock(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
local_bh_enable();
}
static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 1948442e7750..d5e7316401e7 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -22,7 +22,7 @@
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
-typedef struct {
+context_lock_struct(rwlock) {
arch_rwlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -31,7 +31,8 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
@@ -54,13 +55,14 @@ typedef struct {
#include <linux/rwbase_rt.h>
-typedef struct {
+context_lock_struct(rwlock) {
struct rwbase_rt rwbase;
atomic_t readers;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f1aaf676a874..9bf1d93d3d7b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,7 +45,7 @@
* reduce the chance that they will share the same cacheline causing
* cacheline bouncing problem.
*/
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
atomic_long_t count;
/*
* Write owner or one of the read owners as well flags regarding
@@ -76,11 +76,13 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
}
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
}
@@ -148,7 +150,7 @@ extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
#include <linux/rwbase_rt.h>
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
struct rwbase_rt rwbase;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -180,11 +182,13 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
}
static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
@@ -202,6 +206,7 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
*/
static inline void rwsem_assert_held(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held(sem);
@@ -210,6 +215,7 @@ static inline void rwsem_assert_held(const struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held_write(sem);
@@ -220,48 +226,66 @@ static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
/*
* lock for reading
*/
-extern void down_read(struct rw_semaphore *sem);
-extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
-extern int __must_check down_read_killable(struct rw_semaphore *sem);
+extern void down_read(struct rw_semaphore *sem) __acquires_shared(sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-extern int down_read_trylock(struct rw_semaphore *sem);
+extern int down_read_trylock(struct rw_semaphore *sem) __cond_acquires_shared(true, sem);
/*
* lock for writing
*/
-extern void down_write(struct rw_semaphore *sem);
-extern int __must_check down_write_killable(struct rw_semaphore *sem);
+extern void down_write(struct rw_semaphore *sem) __acquires(sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem) __cond_acquires(0, sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-extern int down_write_trylock(struct rw_semaphore *sem);
+extern int down_write_trylock(struct rw_semaphore *sem) __cond_acquires(true, sem);
/*
* release a read lock
*/
-extern void up_read(struct rw_semaphore *sem);
+extern void up_read(struct rw_semaphore *sem) __releases_shared(sem);
/*
* release a write lock
*/
-extern void up_write(struct rw_semaphore *sem);
-
-DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
-DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
-
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
-DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
+extern void up_write(struct rw_semaphore *sem) __releases(sem);
+
+DEFINE_LOCK_GUARD_1(rwsem_read, struct rw_semaphore, down_read(_T->lock), up_read(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _try, down_read_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _intr, down_read_interruptible(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_try, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_intr, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_intr, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_write, struct rw_semaphore, down_write(_T->lock), up_write(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _try, down_write_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _kill, down_write_killable(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T)
/*
* downgrade write lock to read lock
*/
-extern void downgrade_write(struct rw_semaphore *sem);
+extern void downgrade_write(struct rw_semaphore *sem) __releases(sem) __acquires_shared(sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -277,11 +301,11 @@ extern void downgrade_write(struct rw_semaphore *sem);
* lockdep_set_class() at lock initialization time.
* See Documentation/locking/lockdep-design.rst for more details.)
*/
-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
-extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
-extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+extern void down_read_nested(struct rw_semaphore *sem, int subclass) __acquires_shared(sem);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires_shared(0, sem);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass) __acquires(sem);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires(0, sem);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock) __acquires(sem);
# define down_write_nest_lock(sem, nest_lock) \
do { \
@@ -295,8 +319,8 @@ do { \
* [ This API should be avoided as much as possible - the
* proper abstraction for this case is completions. ]
*/
-extern void down_read_non_owner(struct rw_semaphore *sem);
-extern void up_read_non_owner(struct rw_semaphore *sem);
+extern void down_read_non_owner(struct rw_semaphore *sem) __acquires_shared(sem);
+extern void up_read_non_owner(struct rw_semaphore *sem) __releases_shared(sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index da0133524d08..074ad4ef3d81 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -49,6 +49,7 @@
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
+#include <linux/time64.h>
#ifndef COMPILE_OFFSETS
#include <generated/rq-offsets.h>
#endif
@@ -86,6 +87,7 @@ struct signal_struct;
struct task_delay_info;
struct task_group;
struct task_struct;
+struct timespec64;
struct user_event_mm;
#include <linux/sched/ext.h>
@@ -435,6 +437,9 @@ struct sched_info {
/* When were we last queued to run? */
unsigned long long last_queued;
+ /* Timestamp of max time spent waiting on a runqueue: */
+ struct timespec64 max_run_delay_ts;
+
#endif /* CONFIG_SCHED_INFO */
};
@@ -586,15 +591,10 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
- union {
- /*
- * When !@on_rq this field is vlag.
- * When cfs_rq->curr == se (which implies @on_rq)
- * this field is vprot. See protect_slice().
- */
- s64 vlag;
- u64 vprot;
- };
+ /* Approximated virtual lag: */
+ s64 vlag;
+ /* 'Protected' deadline, to give out minimum quantums: */
+ u64 vprot;
u64 slice;
u64 nr_migrations;
@@ -945,11 +945,7 @@ struct task_struct {
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
- int trc_ipi_to_cpu;
- union rcu_special trc_reader_special;
- struct list_head trc_holdout_list;
- struct list_head trc_blkd_node;
- int trc_blkd_cpu;
+ struct srcu_ctr __percpu *trc_reader_scp;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info;
@@ -960,7 +956,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
@@ -1190,6 +1185,7 @@ struct task_struct {
#ifdef CONFIG_IO_URING
struct io_uring_task *io_uring;
+ struct io_restriction *io_uring_restrict;
#endif
/* Namespaces: */
@@ -1419,6 +1415,10 @@ struct task_struct {
struct page_frag task_frag;
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+ struct lazy_mmu_state lazy_mmu_state;
+#endif
+
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
@@ -1702,6 +1702,47 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+/**
+ * __task_lazy_mmu_mode_active() - Test the lazy MMU mode state for a task.
+ * @tsk: The task to check.
+ *
+ * Test whether @tsk has its lazy MMU mode state set to active (i.e. enabled
+ * and not paused).
+ *
+ * This function only considers the state saved in task_struct; to test whether
+ * current actually is in lazy MMU mode, is_lazy_mmu_mode_active() should be
+ * used instead.
+ *
+ * This function is intended for architectures that implement the lazy MMU
+ * mode; it must not be called from generic code.
+ */
+static inline bool __task_lazy_mmu_mode_active(struct task_struct *tsk)
+{
+ struct lazy_mmu_state *state = &tsk->lazy_mmu_state;
+
+ return state->enable_count > 0 && state->pause_count == 0;
+}
+
+/**
+ * is_lazy_mmu_mode_active() - Test whether we are currently in lazy MMU mode.
+ *
+ * Test whether the current context is in lazy MMU mode. This is true if both:
+ * 1. We are not in interrupt context
+ * 2. Lazy MMU mode is active for the current task
+ *
+ * This function is intended for architectures that implement the lazy MMU
+ * mode; it must not be called from generic code.
+ */
+static inline bool is_lazy_mmu_mode_active(void)
+{
+ if (in_interrupt())
+ return false;
+
+ return __task_lazy_mmu_mode_active(current);
+}
+#endif
+
extern struct pid *cad_pid;
/*
@@ -1776,6 +1817,11 @@ static __always_inline bool is_percpu_thread(void)
(current->nr_cpus_allowed == 1);
}
+static __always_inline bool is_user_task(struct task_struct *task)
+{
+ return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER));
+}
+
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
@@ -2093,9 +2139,9 @@ static inline int _cond_resched(void)
_cond_resched(); \
})
-extern int __cond_resched_lock(spinlock_t *lock);
-extern int __cond_resched_rwlock_read(rwlock_t *lock);
-extern int __cond_resched_rwlock_write(rwlock_t *lock);
+extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 5f8fd5b24a2e..e90efaf6d26e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_SCHED_CPUTIME_H
#define _LINUX_SCHED_CPUTIME_H
+#include <linux/static_call_types.h>
#include <linux/sched/signal.h>
/*
@@ -180,4 +181,21 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
extern unsigned long long
task_sched_runtime(struct task_struct *task);
+#ifdef CONFIG_PARAVIRT
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return static_call(pv_steal_clock)(cpu);
+}
+#endif
+#endif
+
#endif /* _LINUX_SCHED_CPUTIME_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index d8501f4709b5..dc3975ff1b2e 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,13 +2,21 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
-#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
enum hk_type {
+ /* Inverse of boot-time isolcpus= argument */
+ HK_TYPE_DOMAIN_BOOT,
+ /*
+ * Same as HK_TYPE_DOMAIN_BOOT but also includes the
+ * inverse of cpuset isolated partitions. As such it
+ * is always a subset of HK_TYPE_DOMAIN_BOOT.
+ */
HK_TYPE_DOMAIN,
+ /* Inverse of boot-time isolcpus=managed_irq argument */
HK_TYPE_MANAGED_IRQ,
+ /* Inverse of boot-time nohz_full= or isolcpus=nohz arguments */
HK_TYPE_KERNEL_NOISE,
HK_TYPE_MAX,
@@ -31,6 +39,7 @@ extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
extern bool housekeeping_enabled(enum hk_type type);
extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
+extern int housekeeping_update(struct cpumask *isol_mask);
extern void __init housekeeping_init(void);
#else
@@ -58,6 +67,7 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
return true;
}
+static inline int housekeeping_update(struct cpumask *isol_mask) { return 0; }
static inline void housekeeping_init(void) { }
#endif /* CONFIG_CPU_ISOLATION */
@@ -72,9 +82,7 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
- return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
- cpuset_cpu_is_isolated(cpu);
+ return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 7d6449982822..a22248aebcf9 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -737,21 +737,13 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
- unsigned long *flags);
-
-static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
- unsigned long *flags)
-{
- struct sighand_struct *ret;
-
- ret = __lock_task_sighand(task, flags);
- (void)__cond_lock(&task->sighand->siglock, ret);
- return ret;
-}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
+ unsigned long *flags)
+ __acquires(&task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
+ __releases(&task->sighand->siglock)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 525aa2a632b2..41ed884cffc9 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -214,15 +214,19 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
+ __acquires(&p->alloc_lock)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
+ __releases(&p->alloc_lock)
{
spin_unlock(&p->alloc_lock);
}
-DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+DEFINE_LOCK_GUARD_1(task_lock, struct task_struct, task_lock(_T->lock), task_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(task_lock, __acquires(&_T->alloc_lock), __releases(&(*(struct task_struct **)_T)->alloc_lock))
+#define class_task_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_lock, _T)
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 0f28b4623ad4..765bbc3d54be 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -66,6 +66,7 @@ extern void wake_up_q(struct wake_q_head *head);
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
static inline
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock(lock);
@@ -77,6 +78,7 @@ void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irq(lock);
@@ -89,6 +91,7 @@ void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irqrestore(lock, flags);
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
index 27bd372cbfb1..2407d7693b6b 100644
--- a/include/linux/scmi_imx_protocol.h
+++ b/include/linux/scmi_imx_protocol.h
@@ -59,6 +59,8 @@ struct scmi_imx_misc_proto_ops {
u32 *num, u32 *val);
int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
u32 ctrl_id, u32 evt_id, u32 flags);
+ int (*misc_syslog)(const struct scmi_protocol_handle *ph, u16 *size,
+ void *array);
};
/* See LMM_ATTRIBUTES in imx95.rst */
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 1690706206e8..c022403c599a 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -151,6 +151,4 @@ static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
}
#endif
-extern struct screen_info screen_info;
-
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index d6ebf0596510..2fb266ea69fa 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -181,7 +181,6 @@ int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
-__printf(2, 0)
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 221123660e71..5a40252b8334 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -14,6 +14,7 @@
*/
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
* Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
return read_seqcount_begin(&sl->seqcount);
}
@@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
* Return: true if a read section retry is required, else false
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ __releases_shared(sl) __no_context_analysis
{
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
* _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl)
* critical section of given seqlock_t.
*/
static inline void write_sequnlock(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
@@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl)
* other write side sections, can be invoked from softirq contexts.
*/
static inline void write_seqlock_bh(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
* write_seqlock_bh().
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
@@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
* other write sections, can be invoked from hardirq contexts.
*/
static inline void write_seqlock_irq(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl)
* seqlock_t write side section opened with write_seqlock_irq().
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
unsigned long flags;
@@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
*/
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
@@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
* The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock(&sl->lock);
}
@@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock(&sl->lock);
}
@@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
* from softirq contexts.
*/
static inline void read_seqlock_excl_bh(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
}
@@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_bh(&sl->lock);
}
@@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl)
* hardirq context.
*/
static inline void read_seqlock_excl_irq(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
}
@@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
unsigned long flags;
@@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
*/
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irqrestore(&sl->lock, flags);
}
@@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
* parameter of the next read_seqbegin_or_lock() iteration.
*/
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
@@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
* Return: true if a read section retry is required, false otherwise
*/
static inline int need_seqretry(seqlock_t *lock, int seq)
+ __releases_shared(lock) __no_context_analysis
{
return !(seq & 1) && read_seqretry(lock, seq);
}
@@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
* with read_seqbegin_or_lock() and validated by need_seqretry().
*/
static inline void done_seqretry(seqlock_t *lock, int seq)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl(lock);
@@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
*/
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags = 0;
@@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
*/
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
@@ -1225,6 +1249,7 @@ struct ss_tmp {
};
static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+ __no_context_analysis
{
if (sst->lock)
spin_unlock(sst->lock);
@@ -1254,6 +1279,7 @@ extern void __scoped_seqlock_bug(void);
static __always_inline void
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+ __no_context_analysis
{
switch (sst->state) {
case ss_done:
@@ -1296,22 +1322,31 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
}
}
+/*
+ * Context analysis no-op helper to release seqlock at the end of the for-scope;
+ * the alias analysis of the compiler will recognize that the pointer @s is an
+ * alias to @_seqlock passed to read_seqbegin(_seqlock) below.
+ */
+static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s)
+ __releases_shared(*((seqlock_t **)s)) __no_context_analysis {}
+
#define __scoped_seqlock_read(_seqlock, _target, _s) \
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
- { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }, \
+ *__UNIQUE_ID(ctx) __cleanup(__scoped_seqlock_cleanup_ctx) =\
+ (struct ss_tmp *)_seqlock; \
_s.state != ss_done; \
__scoped_seqlock_next(&_s, _seqlock, _target))
/**
- * scoped_seqlock_read (lock, ss_state) - execute the read side critical
- * section without manual sequence
- * counter handling or calls to other
- * helpers
- * @lock: pointer to seqlock_t protecting the data
- * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
- * the type of critical read section
+ * scoped_seqlock_read() - execute the read-side critical section
+ * without manual sequence counter handling
+ * or calls to other helpers
+ * @_seqlock: pointer to seqlock_t protecting the data
+ * @_target: an enum ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless}
+ * indicating the type of critical read section
*
- * Example:
+ * Example::
*
* scoped_seqlock_read (&lock, ss_lock) {
* // read-side critical section
@@ -1323,4 +1358,8 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
#define scoped_seqlock_read(_seqlock, _target) \
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T))
+#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T)
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
index dfdf43e3fa3d..2d5d793ef660 100644
--- a/include/linux/seqlock_types.h
+++ b/include/linux/seqlock_types.h
@@ -81,13 +81,14 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
-typedef struct {
+context_lock_struct(seqlock) {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
-} seqlock_t;
+};
+typedef struct seqlock seqlock_t;
#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 86737076101d..daa4e4944ce3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2813,6 +2813,7 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
skb->data -= len;
+ DEBUG_NET_WARN_ON_ONCE(skb->data < skb->head);
skb->len += len;
return skb->data;
}
@@ -4301,6 +4302,18 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb_headlen(skb), buffer);
}
+/* Variant of skb_header_pointer() where @offset is user-controlled
+ * and potentially negative.
+ */
+static inline void * __must_check
+skb_header_pointer_careful(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ if (unlikely(offset < 0 && -offset > skb_headroom(skb)))
+ return NULL;
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
static inline void * __must_check
skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
{
@@ -4763,7 +4776,7 @@ static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
}
}
-static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
+static __always_inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
@@ -4989,6 +5002,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_INET_PSP)
SKB_EXT_PSP,
#endif
+#if IS_ENABLED(CONFIG_CAN)
+ SKB_EXT_CAN,
+#endif
SKB_EXT_NUM, /* must be last */
};
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 9e49372ef1a0..05c8486bafac 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -15,7 +15,7 @@
*
* Takes an additional reference on the paged fragment @frag.
*/
-static inline void __skb_frag_ref(skb_frag_t *frag)
+static __always_inline void __skb_frag_ref(skb_frag_t *frag)
{
get_netmem(skb_frag_netmem(frag));
}
@@ -27,14 +27,14 @@ static inline void __skb_frag_ref(skb_frag_t *frag)
*
* Takes an additional reference on the @f'th paged fragment of @skb.
*/
-static inline void skb_frag_ref(struct sk_buff *skb, int f)
+static __always_inline void skb_frag_ref(struct sk_buff *skb, int f)
{
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
bool napi_pp_put_page(netmem_ref netmem);
-static inline void skb_page_unref(netmem_ref netmem, bool recycle)
+static __always_inline void skb_page_unref(netmem_ref netmem, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
if (recycle && napi_pp_put_page(netmem))
@@ -51,7 +51,7 @@ static inline void skb_page_unref(netmem_ref netmem, bool recycle)
* Releases a reference on the paged fragment @frag
* or recycles the page via the page_pool API.
*/
-static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
+static __always_inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
skb_page_unref(skb_frag_netmem(frag), recycle);
}
@@ -63,7 +63,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
*
* Releases a reference on the @f'th paged fragment of @skb.
*/
-static inline void skb_frag_unref(struct sk_buff *skb, int f)
+static __always_inline void skb_frag_unref(struct sk_buff *skb, int f)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 49847888c287..829b281d6c9c 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -97,6 +97,8 @@ struct sk_psock {
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
spinlock_t ingress_lock;
+ /** @msg_tot_len: Total bytes queued in ingress_msg list. */
+ u32 msg_tot_len;
unsigned long state;
struct list_head link;
spinlock_t link_lock;
@@ -141,6 +143,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
+int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags, int *copied_from_self);
bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
@@ -319,6 +323,27 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline u32 sk_psock_get_msg_len_nolock(struct sk_psock *psock)
+{
+ /* Used by ioctl to read msg_tot_len only; lock-free for performance */
+ return READ_ONCE(psock->msg_tot_len);
+}
+
+static inline void sk_psock_msg_len_add_locked(struct sk_psock *psock, int diff)
+{
+ /* Use WRITE_ONCE to ensure correct read in sk_psock_get_msg_len_nolock().
+ * ingress_lock should be held to prevent concurrent updates to msg_tot_len
+ */
+ WRITE_ONCE(psock->msg_tot_len, psock->msg_tot_len + diff);
+}
+
+static inline void sk_psock_msg_len_add(struct sk_psock *psock, int diff)
+{
+ spin_lock_bh(&psock->ingress_lock);
+ sk_psock_msg_len_add_locked(psock, diff);
+ spin_unlock_bh(&psock->ingress_lock);
+}
+
static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
@@ -327,6 +352,7 @@ static inline bool sk_psock_queue_msg(struct sk_psock *psock,
spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
list_add_tail(&msg->list, &psock->ingress_msg);
+ sk_psock_msg_len_add_locked(psock, msg->sg.size);
ret = true;
} else {
sk_msg_free(psock->sk, msg);
@@ -343,18 +369,25 @@ static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
spin_lock_bh(&psock->ingress_lock);
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
- if (msg)
+ if (msg) {
list_del(&msg->list);
+ sk_psock_msg_len_add_locked(psock, -msg->sg.size);
+ }
spin_unlock_bh(&psock->ingress_lock);
return msg;
}
+static inline struct sk_msg *sk_psock_peek_msg_locked(struct sk_psock *psock)
+{
+ return list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+}
+
static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
{
struct sk_msg *msg;
spin_lock_bh(&psock->ingress_lock);
- msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
+ msg = sk_psock_peek_msg_locked(psock);
spin_unlock_bh(&psock->ingress_lock);
return msg;
}
@@ -521,6 +554,39 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
+/* for tcp only, sk is locked */
+static inline ssize_t sk_psock_msg_inq(struct sock *sk)
+{
+ struct sk_psock *psock;
+ ssize_t inq = 0;
+
+ psock = sk_psock_get(sk);
+ if (likely(psock)) {
+ inq = sk_psock_get_msg_len_nolock(psock);
+ sk_psock_put(sk, psock);
+ }
+ return inq;
+}
+
+/* for udp only, sk is not locked */
+static inline ssize_t sk_msg_first_len(struct sock *sk)
+{
+ struct sk_psock *psock;
+ struct sk_msg *msg;
+ ssize_t inq = 0;
+
+ psock = sk_psock_get(sk);
+ if (likely(psock)) {
+ spin_lock_bh(&psock->ingress_lock);
+ msg = sk_psock_peek_msg_locked(psock);
+ if (msg)
+ inq = msg->sg.size;
+ spin_unlock_bh(&psock->ingress_lock);
+ sk_psock_put(sk, psock);
+ }
+ return inq;
+}
+
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2482992248dc..c5fde8740281 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
+#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
@@ -57,8 +58,9 @@ enum _slab_flag_bits {
#endif
_SLAB_OBJECT_POISON,
_SLAB_CMPXCHG_DOUBLE,
-#ifdef CONFIG_SLAB_OBJ_EXT
_SLAB_NO_OBJ_EXT,
+#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
+ _SLAB_OBJ_EXT_IN_OBJ,
#endif
_SLAB_FLAGS_LAST_BIT
};
@@ -238,10 +240,12 @@ enum _slab_flag_bits {
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Slab created using create_boot_cache */
-#ifdef CONFIG_SLAB_OBJ_EXT
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
+
+#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
+#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ)
#else
-#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
+#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_UNUSED
#endif
/*
@@ -299,24 +303,26 @@ struct kmem_cache_args {
unsigned int usersize;
/**
* @freeptr_offset: Custom offset for the free pointer
- * in &SLAB_TYPESAFE_BY_RCU caches
+ * in caches with &SLAB_TYPESAFE_BY_RCU or @ctor
*
- * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
- * outside of the object. This might cause the object to grow in size.
- * Cache creators that have a reason to avoid this can specify a custom
- * free pointer offset in their struct where the free pointer will be
- * placed.
+ * By default, &SLAB_TYPESAFE_BY_RCU and @ctor caches place the free
+ * pointer outside of the object. This might cause the object to grow
+ * in size. Cache creators that have a reason to avoid this can specify
+ * a custom free pointer offset in their data structure where the free
+ * pointer will be placed.
*
- * Note that placing the free pointer inside the object requires the
- * caller to ensure that no fields are invalidated that are required to
- * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
- * details).
+ * For caches with &SLAB_TYPESAFE_BY_RCU, the caller must ensure that
+ * the free pointer does not overlay fields required to guard against
+ * object recycling (See &SLAB_TYPESAFE_BY_RCU for details).
*
- * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
- * is specified, %use_freeptr_offset must be set %true.
+ * For caches with @ctor, the caller must ensure that the free pointer
+ * does not overlay fields initialized by the constructor.
+ *
+ * Currently, only caches with &SLAB_TYPESAFE_BY_RCU or @ctor
+ * may specify @freeptr_offset.
*
- * Note that @ctor currently isn't supported with custom free pointers
- * as a @ctor requires an external free pointer.
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, @use_freeptr_offset must be set %true.
*/
unsigned int freeptr_offset;
/**
@@ -507,7 +513,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
void kfree(const void *objp);
void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
-size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
@@ -965,6 +970,111 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+/**
+ * __alloc_objs - Allocate objects of a given type using
+ * @KMALLOC: which size-based kmalloc wrapper to allocate with.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type to allocate space for.
+ * @COUNT: how many @TYPE objects to allocate.
+ *
+ * Returns: Newly allocated pointer to (first) @TYPE of @COUNT-many
+ * allocated @TYPE objects, or NULL on failure.
+ */
+#define __alloc_objs(KMALLOC, GFP, TYPE, COUNT) \
+({ \
+ const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \
+ (TYPE *)KMALLOC(__obj_size, GFP); \
+})
+
+/**
+ * __alloc_flex - Allocate an object that has a trailing flexible array
+ * @KMALLOC: kmalloc wrapper function to use for allocation.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type of structure to allocate space for.
+ * @FAM: The name of the flexible array member of @TYPE structure.
+ * @COUNT: how many @FAM elements to allocate space for.
+ *
+ * Returns: Newly allocated pointer to @TYPE with @COUNT-many trailing
+ * @FAM elements, or NULL on failure or if @COUNT cannot be represented
+ * by the member of @TYPE that counts the @FAM elements (annotated via
+ * __counted_by()).
+ */
+#define __alloc_flex(KMALLOC, GFP, TYPE, FAM, COUNT) \
+({ \
+ const size_t __count = (COUNT); \
+ const size_t __obj_size = struct_size_t(TYPE, FAM, __count); \
+ TYPE *__obj_ptr; \
+ if (WARN_ON_ONCE(overflows_flex_counter_type(TYPE, FAM, __count))) \
+ __obj_ptr = NULL; \
+ else \
+ __obj_ptr = KMALLOC(__obj_size, GFP); \
+ if (__obj_ptr) \
+ __set_flex_counter(__obj_ptr->FAM, __count); \
+ __obj_ptr; \
+})
+
+/**
+ * kmalloc_obj - Allocate a single instance of the given type
+ * @VAR_OR_TYPE: Variable or type to allocate.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to a @VAR_OR_TYPE on success, or NULL
+ * on failure.
+ */
+#define kmalloc_obj(VAR_OR_TYPE, GFP) \
+ __alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), 1)
+
+/**
+ * kmalloc_objs - Allocate an array of the given type
+ * @VAR_OR_TYPE: Variable or type to allocate an array of.
+ * @COUNT: How many elements in the array.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to array of @VAR_OR_TYPE on success,
+ * or NULL on failure.
+ */
+#define kmalloc_objs(VAR_OR_TYPE, COUNT, GFP) \
+ __alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), COUNT)
+
+/**
+ * kmalloc_flex - Allocate a single instance of the given flexible structure
+ * @VAR_OR_TYPE: Variable or type to allocate (with its flex array).
+ * @FAM: The name of the flexible array member of the structure.
+ * @COUNT: How many flexible array member elements are desired.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to @VAR_OR_TYPE on success, NULL on
+ * failure. If @FAM has been annotated with __counted_by(), the allocation
+ * will immediately fail if @COUNT is larger than what the type of the
+ * struct's counter variable can represent.
+ */
+#define kmalloc_flex(VAR_OR_TYPE, FAM, COUNT, GFP) \
+ __alloc_flex(kmalloc, GFP, typeof(VAR_OR_TYPE), FAM, COUNT)
+
+/* All kzalloc aliases for kmalloc_(obj|objs|flex). */
+#define kzalloc_obj(P, GFP) \
+ __alloc_objs(kzalloc, GFP, typeof(P), 1)
+#define kzalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kzalloc, GFP, typeof(P), COUNT)
+#define kzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kzalloc, GFP, typeof(P), FAM, COUNT)
+
+/* All kvmalloc aliases for kmalloc_(obj|objs|flex). */
+#define kvmalloc_obj(P, GFP) \
+ __alloc_objs(kvmalloc, GFP, typeof(P), 1)
+#define kvmalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kvmalloc, GFP, typeof(P), COUNT)
+#define kvmalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvmalloc, GFP, typeof(P), FAM, COUNT)
+
+/* All kvzalloc aliases for kmalloc_(obj|objs|flex). */
+#define kvzalloc_obj(P, GFP) \
+ __alloc_objs(kvzalloc, GFP, typeof(P), 1)
+#define kvzalloc_objs(P, COUNT, GFP) \
+ __alloc_objs(kvzalloc, GFP, typeof(P), COUNT)
+#define kvzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvzalloc, GFP, typeof(P), FAM, COUNT)
+
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91d0ecf3b8d3..1ebd88026119 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -62,6 +62,7 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd);
void __noreturn panic_smp_self_stop(void);
void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
void crash_smp_send_stop(void);
+int panic_smp_redirect_cpu(int target_cpu, void *msg);
/*
* Call a function on all processors
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index 736f53018017..bda3c528b515 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -126,6 +126,13 @@ int apple_rtkit_wake(struct apple_rtkit *rtk);
int apple_rtkit_shutdown(struct apple_rtkit *rtk);
/*
+ * Put the co-processor into the lowest power state. Note that it usually
+ * is not possible to recover from this state without a full SoC reset.
+ */
+
+int apple_rtkit_poweroff(struct apple_rtkit *rtk);
+
+/*
* Put the co-processor into idle mode
*/
int apple_rtkit_idle(struct apple_rtkit *rtk);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 0c3906e8ad19..a06b5a61f337 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -23,6 +23,8 @@
#define CMDQ_THR_SPR_IDX2 (2)
#define CMDQ_THR_SPR_IDX3 (3)
+#define CMDQ_SUBSYS_INVALID (U8_MAX)
+
struct cmdq_pkt;
enum cmdq_logic_op {
@@ -52,8 +54,20 @@ struct cmdq_operand {
struct cmdq_client_reg {
u8 subsys;
+ phys_addr_t pa_base;
u16 offset;
u16 size;
+
+ /*
+ * Client only uses these functions for MMIO access,
+ * so doesn't need to handle the mminfra_offset.
+ * The mminfra_offset is used for DRAM access and
+ * is handled internally by CMDQ APIs.
+ */
+ int (*pkt_write)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base,
+ u16 offset, u32 value);
+ int (*pkt_write_mask)(struct cmdq_pkt *pkt, u8 subsys, u32 pa_base,
+ u16 offset, u32 value, u32 mask);
};
struct cmdq_client {
@@ -122,6 +136,32 @@ void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
/**
+ * cmdq_pkt_write_pa() - append write command to the CMDQ packet with pa_base
+ * @pkt: the CMDQ packet
+ * @subsys: unused parameter
+ * @pa_base: the physical address base of the hardware register
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_write_subsys() - append write command to the CMDQ packet with subsys
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @pa_base: unused parameter
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value);
+
+/**
* cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
@@ -134,6 +174,34 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+/**
+ * cmdq_pkt_write_mask_pa() - append write command with mask to the CMDQ packet with pa
+ * @pkt: the CMDQ packet
+ * @subsys: unused parameter
+ * @pa_base: the physical address base of the hardware register
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_write_mask_subsys() - append write command with mask to the CMDQ packet with subsys
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @pa_base: unused parameter
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value, u32 mask);
+
/*
* cmdq_pkt_read_s() - append read_s command to the CMDQ packet
* @pkt: the CMDQ packet
@@ -418,12 +486,37 @@ static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u3
return -ENOENT;
}
+static inline int cmdq_pkt_write_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
return -ENOENT;
}
+static inline int cmdq_pkt_write_mask_pa(struct cmdq_pkt *pkt, u8 subsys /*unused*/,
+ u32 pa_base, u16 offset, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_mask_subsys(struct cmdq_pkt *pkt, u8 subsys,
+ u32 pa_base /*unused*/, u16 offset,
+ u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
static inline int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 reg_idx)
{
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index a532d1e4b1f4..6e1b1202e818 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -122,7 +122,7 @@ struct gpr_ibasic_rsp_accepted_t {
#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
-typedef int (*gpr_port_cb) (struct gpr_resp_pkt *d, void *priv, int op);
+typedef int (*gpr_port_cb) (const struct gpr_resp_pkt *d, void *priv, int op);
struct packet_router;
struct pkt_router_svc {
struct device *dev;
@@ -155,8 +155,8 @@ struct apr_driver {
int (*probe)(struct apr_device *sl);
void (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
- struct apr_resp_pkt *d);
- int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
+ const struct apr_resp_pkt *d);
+ gpr_port_cb gpr_callback;
struct device_driver driver;
const struct apr_device_id *id_table;
};
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 0287f9182c4d..8243ab3a12a8 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -74,13 +74,17 @@
#define LLCC_CAMSRTIP 73
#define LLCC_CAMRTRF 74
#define LLCC_CAMSRTRF 75
+#define LLCC_OOBM_NS 81
+#define LLCC_OOBM_S 82
#define LLCC_VIDEO_APV 83
#define LLCC_COMPUTE1 87
#define LLCC_CPUSS_OPP 88
#define LLCC_CPUSSMPAM 89
+#define LLCC_VIDSC_VSP1 91
#define LLCC_CAM_IPE_STROV 92
#define LLCC_CAM_OFE_STROV 93
#define LLCC_CPUSS_HEU 94
+#define LLCC_PCIE_TCU 97
#define LLCC_MDM_PNG_FIXED 100
/**
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 8ea8230579a2..82372e0db0a1 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -10,19 +10,19 @@
struct device;
struct firmware;
-struct qcom_scm_pas_metadata;
+struct qcom_scm_pas_context;
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
ssize_t qcom_mdt_get_size(const struct firmware *fw);
-int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, phys_addr_t mem_phys,
- struct qcom_scm_pas_metadata *pas_metadata_ctx);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
+int qcom_mdt_pas_load(struct qcom_scm_pas_context *ctx, const struct firmware *fw,
+ const char *firmware, void *mem_region, phys_addr_t *reloc_base);
+
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
@@ -37,13 +37,6 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
return -ENODEV;
}
-static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, phys_addr_t mem_phys,
- struct qcom_scm_pas_metadata *pas_metadata_ctx)
-{
- return -ENODEV;
-}
-
static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id,
void *mem_region, phys_addr_t mem_phys,
@@ -52,6 +45,13 @@ static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
return -ENODEV;
}
+static inline int qcom_mdt_pas_load(struct qcom_scm_pas_context *ctx,
+ const struct firmware *fw, const char *firmware,
+ void *mem_region, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
static inline int qcom_mdt_load_no_init(struct device *dev,
const struct firmware *fw,
const char *fw_name, void *mem_region,
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
index 0a4edfe3d96d..f052e241736c 100644
--- a/include/linux/soc/qcom/ubwc.h
+++ b/include/linux/soc/qcom/ubwc.h
@@ -8,6 +8,7 @@
#define __QCOM_UBWC_H__
#include <linux/bits.h>
+#include <linux/printk.h>
#include <linux/types.h>
struct qcom_ubwc_cfg_data {
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 82390712794c..5774e554c0f0 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -20,10 +20,10 @@
.opcode = __opcode, \
}
-#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \
+#define SPI_MEM_DTR_OP_RPT_CMD(__opcode, __buswidth) \
{ \
- .nbytes = 1, \
- .opcode = __opcode, \
+ .nbytes = 2, \
+ .opcode = __opcode | __opcode << 8, \
.buswidth = __buswidth, \
.dtr = true, \
}
@@ -43,6 +43,22 @@
.dtr = true, \
}
+#define SPI_MEM_DTR_OP_RPT_ADDR(__val, __buswidth) \
+ { \
+ .nbytes = 2, \
+ .val = __val | __val << 8, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
+#define SPI_MEM_DTR_OP_RPT_ADDR(__val, __buswidth) \
+ { \
+ .nbytes = 2, \
+ .val = __val | __val << 8, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
#define SPI_MEM_OP_NO_ADDR { }
#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index cb2c2df31089..fd8dce4169f7 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,9 @@
/* Max no. of CS supported per spi device */
#define SPI_DEVICE_CS_CNT_MAX 4
+/* Max no. of data lanes supported per spi device */
+#define SPI_DEVICE_DATA_LANE_CNT_MAX 8
+
struct dma_chan;
struct software_node;
struct ptp_system_timestamp;
@@ -174,6 +177,10 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
* @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
* (optional, NULL when not using a GPIO line)
+ * @tx_lane_map: Map of peripheral lanes (index) to controller lanes (value).
+ * @num_tx_lanes: Number of transmit lanes wired up.
+ * @rx_lane_map: Map of peripheral lanes (index) to controller lanes (value).
+ * @num_rx_lanes: Number of receive lanes wired up.
*
* A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
@@ -242,6 +249,12 @@ struct spi_device {
struct gpio_desc *cs_gpiod[SPI_DEVICE_CS_CNT_MAX]; /* Chip select gpio desc */
+ /* Multi-lane SPI controller support. */
+ u8 tx_lane_map[SPI_DEVICE_DATA_LANE_CNT_MAX];
+ u8 num_tx_lanes;
+ u8 rx_lane_map[SPI_DEVICE_DATA_LANE_CNT_MAX];
+ u8 num_rx_lanes;
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -401,6 +414,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* SPI targets, and are numbered from zero to num_chipselects.
* each target has a chipselect signal, but it's common that not
* every chipselect is connected to a target.
+ * @num_data_lanes: Number of data lanes supported by this controller. Default is 1.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
* @buswidth_override_bits: flags to override for this controller driver
@@ -576,6 +590,14 @@ struct spi_controller {
*/
u16 num_chipselect;
+ /*
+ * Some specialized SPI controllers can have more than one physical
+ * data lane interface per controller (each having it's own serializer).
+ * This specifies the number of data lanes in that case. Other
+ * controllers do not need to set this (defaults to 1).
+ */
+ u16 num_data_lanes;
+
/* Some SPI controllers pose alignment requirements on DMAable
* buffers; let protocol drivers know about these requirements.
*/
@@ -882,6 +904,15 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
+#if IS_ENABLED(CONFIG_OF)
+extern struct spi_controller *of_find_spi_controller_by_node(struct device_node *node);
+#else
+static inline struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif
+
#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER)
extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
@@ -959,6 +990,8 @@ struct spi_res {
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
* (SPI_NBITS_SINGLE) is used.
+ * @multi_lane_mode: How to serialize data on multiple lanes. One of the
+ * SPI_MULTI_LANE_MODE_* values.
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
@@ -1095,6 +1128,12 @@ struct spi_transfer {
unsigned cs_change:1;
unsigned tx_nbits:4;
unsigned rx_nbits:4;
+
+#define SPI_MULTI_LANE_MODE_SINGLE 0 /* only use single lane */
+#define SPI_MULTI_LANE_MODE_STRIPE 1 /* one data word per lane */
+#define SPI_MULTI_LANE_MODE_MIRROR 2 /* same word sent on all lanes */
+ unsigned multi_lane_mode: 2;
+
unsigned timestamped:1;
bool dtr_mode;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d3561c4a080e..e1e2f144af9b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -212,7 +212,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* various methods are defined as nops in the case they are not
* required.
*/
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
#define raw_spin_lock(lock) _raw_spin_lock(lock)
@@ -283,22 +283,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define raw_spin_trylock_bh(lock) \
- __cond_lock(lock, _raw_spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) _raw_spin_trylock_bh(lock)
-#define raw_spin_trylock_irq(lock) \
-({ \
- local_irq_disable(); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_enable(); 0; }); \
-})
+#define raw_spin_trylock_irq(lock) _raw_spin_trylock_irq(lock)
-#define raw_spin_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
#ifndef CONFIG_PREEMPT_RT
/* Include rwlock functions for !RT */
@@ -347,16 +336,19 @@ do { \
#endif
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock(&lock->rlock);
}
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_bh(&lock->rlock);
}
static __always_inline int spin_trylock(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock(&lock->rlock);
}
@@ -364,14 +356,17 @@ static __always_inline int spin_trylock(spinlock_t *lock)
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -379,47 +374,57 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock(&lock->rlock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_bh(&lock->rlock);
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irq(&lock->rlock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
static __always_inline int spin_trylock_bh(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_bh(&lock->rlock);
}
static __always_inline int spin_trylock_irq(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_irq(&lock->rlock);
}
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
/**
* spin_is_locked() - Check whether a spinlock is locked.
@@ -497,23 +502,17 @@ static inline int rwlock_needbreak(rwlock_t *lock)
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
-extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
-#define atomic_dec_and_raw_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
@@ -535,86 +534,144 @@ void free_bucket_spinlocks(spinlock_t *locks);
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
raw_spin_lock_bh(_T->lock),
raw_spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
spin_lock_bh(_T->lock),
spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T)
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T)
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T)
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T)
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T)
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
+
+DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 9ecb0ab504e3..bda5e7a390cd 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -34,8 +34,8 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
unsigned long __lockfunc
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
-int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(true, lock);
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
@@ -84,6 +84,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
preempt_disable();
if (do_raw_spin_trylock(lock)) {
@@ -94,6 +95,26 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return 0;
}
+static __always_inline bool _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ local_irq_disable();
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_enable();
+ return false;
+}
+
+static __always_inline bool _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ local_irq_save(*flags);
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
@@ -102,6 +123,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -113,6 +135,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -121,6 +144,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -128,6 +152,7 @@ static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -137,6 +162,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -145,6 +171,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -153,6 +180,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
}
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -161,6 +189,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -168,6 +197,7 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 819aeba1c87e..a9d5c7c66e03 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,68 +24,120 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
-#define ___LOCK(lock) \
+#define ___LOCK_(lock) \
do { __acquire(lock); (void)(lock); } while (0)
-#define __LOCK(lock) \
- do { preempt_disable(); ___LOCK(lock); } while (0)
+#define ___LOCK_shared(lock) \
+ do { __acquire_shared(lock); (void)(lock); } while (0)
-#define __LOCK_BH(lock) \
- do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
+#define __LOCK(lock, ...) \
+ do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQ(lock) \
- do { local_irq_disable(); __LOCK(lock); } while (0)
+#define __LOCK_BH(lock, ...) \
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQSAVE(lock, flags) \
- do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define __LOCK_IRQ(lock, ...) \
+ do { local_irq_disable(); __LOCK(lock, ##__VA_ARGS__); } while (0)
-#define ___UNLOCK(lock) \
+#define __LOCK_IRQSAVE(lock, flags, ...) \
+ do { local_irq_save(flags); __LOCK(lock, ##__VA_ARGS__); } while (0)
+
+#define ___UNLOCK_(lock) \
do { __release(lock); (void)(lock); } while (0)
-#define __UNLOCK(lock) \
- do { preempt_enable(); ___UNLOCK(lock); } while (0)
+#define ___UNLOCK_shared(lock) \
+ do { __release_shared(lock); (void)(lock); } while (0)
+
+#define __UNLOCK(lock, ...) \
+ do { preempt_enable(); ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_BH(lock) \
+#define __UNLOCK_BH(lock, ...) \
do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
- ___UNLOCK(lock); } while (0)
+ ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_IRQ(lock) \
- do { local_irq_enable(); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQ(lock, ...) \
+ do { local_irq_enable(); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
-#define __UNLOCK_IRQRESTORE(lock, flags) \
- do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQRESTORE(lock, flags, ...) \
+ do { local_irq_restore(flags); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock, shared)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
-#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock, shared)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock, shared)
#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags, shared)
#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+
+static __always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_BH(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQ(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
+static __always_inline int _raw_read_trylock(rwlock_t *lock)
+ __cond_acquires_shared(true, lock)
+{
+ __LOCK(lock, shared);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock(rwlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
#define _raw_spin_unlock(lock) __UNLOCK(lock)
-#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock, shared)
#define _raw_write_unlock(lock) __UNLOCK(lock)
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock, shared)
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock, shared)
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _raw_spin_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
#define _raw_read_unlock_irqrestore(lock, flags) \
- __UNLOCK_IRQRESTORE(lock, flags)
+ __UNLOCK_IRQRESTORE(lock, flags, shared)
#define _raw_write_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index f6499c37157d..373618a4243c 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -36,10 +36,11 @@ extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock)
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
-extern int rt_spin_trylock_bh(spinlock_t *lock);
-extern int rt_spin_trylock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
+extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -82,6 +83,7 @@ static __always_inline void spin_lock(spinlock_t *lock)
__spin_lock_irqsave_nested(lock, flags, subclass)
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock)
{
/* Investigate: Drop bh when blocking ? */
local_bh_disable();
@@ -89,6 +91,7 @@ static __always_inline void spin_lock_bh(spinlock_t *lock)
}
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -101,45 +104,44 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
local_bh_enable();
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
-#define spin_trylock(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock(lock) rt_spin_trylock(lock)
-#define spin_trylock_bh(lock) \
- __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock) rt_spin_trylock_bh(lock)
-#define spin_trylock_irq(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock_irq(lock) rt_spin_trylock(lock)
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = spin_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ *flags = 0;
+ return rt_spin_trylock(lock);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
#define spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 2dfa35ffec76..b65bb6e4451c 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -14,7 +14,7 @@
#ifndef CONFIG_PREEMPT_RT
/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
-typedef struct spinlock {
+context_lock_struct(spinlock) {
union {
struct raw_spinlock rlock;
@@ -26,7 +26,8 @@ typedef struct spinlock {
};
#endif
};
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define ___SPIN_LOCK_INITIALIZER(lockname) \
{ \
@@ -47,12 +48,13 @@ typedef struct spinlock {
/* PREEMPT_RT kernels map spinlock to rt_mutex */
#include <linux/rtmutex.h>
-typedef struct spinlock {
+context_lock_struct(spinlock) {
struct rt_mutex_base lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define __SPIN_LOCK_UNLOCKED(name) \
{ \
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
index 91cb36b65a17..e5644ab2161f 100644
--- a/include/linux/spinlock_types_raw.h
+++ b/include/linux/spinlock_types_raw.h
@@ -11,7 +11,7 @@
#include <linux/lockdep_types.h>
-typedef struct raw_spinlock {
+context_lock_struct(raw_spinlock) {
arch_spinlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -20,7 +20,8 @@ typedef struct raw_spinlock {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} raw_spinlock_t;
+};
+typedef struct raw_spinlock raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 344ad51c8f6c..bb44a0bd7696 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -21,7 +21,7 @@
#include <linux/workqueue.h>
#include <linux/rcu_segcblist.h>
-struct srcu_struct;
+context_lock_struct(srcu_struct, __reentrant_ctx_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -77,7 +77,7 @@ int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
@@ -131,14 +131,16 @@ static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned
}
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
-int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#else
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_read_lock(ssp);
}
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, idx);
}
@@ -210,6 +212,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/*
+ * No-op helper to denote that ssp must be held. Because SRCU-protected pointers
+ * should still be marked with __rcu_guarded, and we do not want to mark them
+ * with __guarded_by(ssp) as it would complicate annotations for writers, we
+ * choose the following strategy: srcu_dereference_check() calls this helper
+ * that checks that the passed ssp is held, and then fake-acquires 'RCU'.
+ */
+static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) __must_hold_shared(ssp) { }
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
@@ -223,9 +233,15 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
- (c) || srcu_read_lock_held(ssp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+({ \
+ __srcu_read_lock_must_hold(ssp); \
+ __acquire_shared_ctx_lock(RCU); \
+ __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu); \
+ __release_shared_ctx_lock(RCU); \
+ __v; \
+})
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -268,7 +284,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
* from another.
*/
-static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -304,7 +321,8 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
* contexts where RCU is watching, that is, from contexts where it would
* be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
*/
-static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -344,7 +362,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *
* complain.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
-__acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -360,7 +378,7 @@ __acquires(ssp)
* See srcu_read_lock_fast() for more information.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
- __acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -381,7 +399,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_
* and srcu_read_lock_fast(). However, the same definition/initialization
* requirements called out for srcu_read_lock_safe() apply.
*/
-static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
@@ -400,7 +418,8 @@ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *
* then none of the other flavors may be used, whether before, during,
* or after.
*/
-static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -412,7 +431,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
+srcu_read_lock_notrace(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -443,7 +463,8 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
* which calls to down_read() may be nested. The same srcu_struct may be
* used concurrently by srcu_down_read() and srcu_read_lock().
*/
-static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_down_read(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
WARN_ON_ONCE(in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -458,7 +479,7 @@ static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
* Exit an SRCU read-side critical section.
*/
static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -474,7 +495,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
* Exit a light-weight SRCU read-side critical section.
*/
static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
srcu_lock_release(&ssp->dep_map);
@@ -490,7 +511,7 @@ static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ct
* Exit an SRCU-fast-updown read-side critical section.
*/
static inline void
-srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
srcu_lock_release(&ssp->dep_map);
@@ -504,7 +525,7 @@ srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *
* See srcu_read_unlock_fast() for more information.
*/
static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
- struct srcu_ctr __percpu *scp) __releases(ssp)
+ struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
__srcu_read_unlock_fast(ssp, scp);
@@ -519,7 +540,7 @@ static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
* the same context as the maching srcu_down_read_fast().
*/
static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
@@ -535,7 +556,7 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __
* Exit an SRCU read-side critical section, but in an NMI-safe manner.
*/
static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
@@ -545,7 +566,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
@@ -560,7 +581,7 @@ srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
* the same context as the maching srcu_down_read().
*/
static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
@@ -600,15 +621,21 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
_T->idx = srcu_read_lock(_T->lock),
srcu_read_unlock(_T->lock, _T->idx),
int idx)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T)
DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
_T->scp = srcu_read_lock_fast(_T->lock),
srcu_read_unlock_fast(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T)
DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
_T->scp = srcu_read_lock_fast_notrace(_T->lock),
srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_notrace_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T)
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index e0698024667a..dec7cbe015aa 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -73,6 +73,7 @@ void synchronize_srcu(struct srcu_struct *ssp);
* index that must be passed to the matching srcu_read_unlock().
*/
static inline int __srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int idx;
@@ -80,6 +81,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
preempt_enable();
+ __acquire_shared(ssp);
return idx;
}
@@ -96,22 +98,26 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline
void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index d6f978b50472..958cb7ef41cb 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -233,7 +233,7 @@ struct srcu_struct {
#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
__DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires_shared(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_expedite_current(struct srcu_struct *ssp);
@@ -286,6 +286,7 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
* implementations of this_cpu_inc().
*/
static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -294,6 +295,7 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -308,7 +310,9 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
*/
static inline void notrace
__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
@@ -326,6 +330,7 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
*/
static inline
struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -334,6 +339,7 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -348,7 +354,9 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
*/
static inline void notrace
__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index f1054b9c2d8a..32352a216567 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,14 +28,14 @@
* This could also be configured at run time using CPU freq framework. */
/* MDC Clock Selection define*/
-#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */
-#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */
-#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
-#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
-#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
-#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/124 */
-#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_scr_i/204 */
-#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_scr_i/324 */
+#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_csr_i/42 */
+#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_csr_i/62 */
+#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_csr_i/16 */
+#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_csr_i/26 */
+#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_csr_i/102 */
+#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_csr_i/124 */
+#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_csr_i/204 */
+#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_csr_i/324 */
/* MTL algorithms identifiers */
#define MTL_TX_ALGORITHM_WRR 0x0
@@ -191,6 +191,7 @@ enum dwmac_core_type {
#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(12)
#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(13)
+#define STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD BIT(14)
struct mac_device_info;
@@ -256,7 +257,7 @@ struct plat_stmmacenet_data {
int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
- int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
int (*mac_finish)(struct net_device *ndev,
@@ -299,7 +300,6 @@ struct plat_stmmacenet_data {
int int_snapshot_num;
int msi_mac_vec;
int msi_wol_vec;
- int msi_lpi_vec;
int msi_sfty_ce_vec;
int msi_sfty_ue_vec;
int msi_rx_base_vec;
diff --git a/include/linux/string.h b/include/linux/string.h
index 1b564c36d721..b850bd91b3d8 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -336,8 +336,8 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s);
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
-__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
-__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 891f6173c951..eb4bd62df319 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -14,12 +14,10 @@
/*
* Debugging macros etc
*/
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
extern unsigned int rpc_debug;
extern unsigned int nfs_debug;
extern unsigned int nfsd_debug;
extern unsigned int nlm_debug;
-#endif
#define dprintk(fmt, ...) \
dfprintk(FACILITY, fmt, ##__VA_ARGS__)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5506d20857c3..4dc14c7a711b 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -35,8 +35,10 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
+ unsigned int sp_nrthreads; /* # of threads currently running in pool */
+ unsigned int sp_nrthrmin; /* Min number of threads to run per pool */
+ unsigned int sp_nrthrmax; /* Max requested number of threads in pool */
struct lwq sp_xprts; /* pending transports */
- unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
@@ -53,6 +55,7 @@ enum {
SP_TASK_PENDING, /* still work to do even if no xprt is queued */
SP_NEED_VICTIM, /* One thread needs to agree to exit */
SP_VICTIM_REMAINS, /* One thread needs to actually exit */
+ SP_TASK_STARTING, /* Task has started but not added to idle yet */
};
@@ -71,7 +74,7 @@ struct svc_serv {
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
unsigned int sv_nprogs; /* Number of sv_programs */
- unsigned int sv_nrthreads; /* # of server threads */
+ unsigned int sv_nrthreads; /* # of running server threads */
unsigned int sv_max_payload; /* datagram payload size */
unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
unsigned int sv_xdrsize; /* XDR buffer size */
@@ -440,13 +443,17 @@ struct svc_serv *svc_create(struct svc_program *, unsigned int,
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
+int svc_new_thread(struct svc_serv *serv, struct svc_pool *pool);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
-int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+int svc_set_pool_threads(struct svc_serv *serv, struct svc_pool *pool,
+ unsigned int min_threads, unsigned int max_threads);
+int svc_set_num_threads(struct svc_serv *serv, unsigned int min_threads,
+ unsigned int nrservs);
int svc_pool_stats_open(struct svc_info *si, struct file *file);
void svc_process(struct svc_rqst *rqstp);
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index de37069aba90..372a00882ca6 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -61,7 +61,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_recv(struct svc_rqst *rqstp);
+int svc_recv(struct svc_rqst *rqstp, long timeo);
void svc_send(struct svc_rqst *rqstp);
int svc_addsock(struct svc_serv *serv, struct net *net,
const int fd, char *name_return, const size_t len,
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
index 66ca3ece951a..a723fb1da9c8 100644
--- a/include/linux/sunrpc/xdrgen/_builtins.h
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -46,6 +46,66 @@ xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
return true;
}
+/*
+ * De facto (non-standard but commonly implemented) signed short type:
+ * - Wire sends sign-extended 32-bit value (e.g., 0xFFFFFFFF)
+ * - be32_to_cpup() returns u32 (0xFFFFFFFF)
+ * - Explicit (s16) cast truncates to 16 bits (0xFFFF = -1)
+ */
+static inline bool
+xdrgen_decode_short(struct xdr_stream *xdr, s16 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (s16)be32_to_cpup(p);
+ return true;
+}
+
+/*
+ * De facto (non-standard but commonly implemented) signed short type:
+ * - C integer promotion sign-extends s16 val to int before passing to
+ * cpu_to_be32()
+ * - This is well-defined: -1 as s16 -1 as int 0xFFFFFFFF on wire
+ */
+static inline bool
+xdrgen_encode_short(struct xdr_stream *xdr, s16 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+/*
+ * De facto (non-standard but commonly implemented) unsigned short type:
+ * 16-bit integer zero-extended to fill one XDR_UNIT.
+ */
+static inline bool
+xdrgen_decode_unsigned_short(struct xdr_stream *xdr, u16 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (u16)be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_short(struct xdr_stream *xdr, u16 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
static inline bool
xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
{
@@ -188,12 +248,10 @@ xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
return false;
if (unlikely(maxlen && len > maxlen))
return false;
- if (len != 0) {
- p = xdr_inline_decode(xdr, len);
- if (unlikely(!p))
- return false;
- ptr->data = (unsigned char *)p;
- }
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
ptr->len = len;
return true;
}
@@ -219,12 +277,10 @@ xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
return false;
if (unlikely(maxlen && len > maxlen))
return false;
- if (len != 0) {
- p = xdr_inline_decode(xdr, len);
- if (unlikely(!p))
- return false;
- ptr->data = (u8 *)p;
- }
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
ptr->len = len;
return true;
}
diff --git a/include/linux/sunrpc/xdrgen/nfs4_1.h b/include/linux/sunrpc/xdrgen/nfs4_1.h
index cf21a14aa885..4ac54bdbd335 100644
--- a/include/linux/sunrpc/xdrgen/nfs4_1.h
+++ b/include/linux/sunrpc/xdrgen/nfs4_1.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Generated by xdrgen. Manual edits will be lost. */
/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
-/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+/* XDR specification modification time: Thu Jan 8 23:12:07 2026 */
#ifndef _LINUX_XDRGEN_NFS4_1_DEF_H
#define _LINUX_XDRGEN_NFS4_1_DEF_H
@@ -18,6 +18,14 @@ typedef struct {
uint32_t *element;
} bitmap4;
+typedef opaque utf8string;
+
+typedef utf8string utf8str_cis;
+
+typedef utf8string utf8str_cs;
+
+typedef utf8string utf8str_mixed;
+
struct nfstime4 {
int64_t seconds;
uint32_t nseconds;
@@ -40,6 +48,7 @@ enum open_args_share_access4 {
OPEN_ARGS_SHARE_ACCESS_WRITE = 2,
OPEN_ARGS_SHARE_ACCESS_BOTH = 3,
};
+
typedef enum open_args_share_access4 open_args_share_access4;
enum open_args_share_deny4 {
@@ -48,6 +57,7 @@ enum open_args_share_deny4 {
OPEN_ARGS_SHARE_DENY_WRITE = 2,
OPEN_ARGS_SHARE_DENY_BOTH = 3,
};
+
typedef enum open_args_share_deny4 open_args_share_deny4;
enum open_args_share_access_want4 {
@@ -59,6 +69,7 @@ enum open_args_share_access_want4 {
OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20,
OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21,
};
+
typedef enum open_args_share_access_want4 open_args_share_access_want4;
enum open_args_open_claim4 {
@@ -70,6 +81,7 @@ enum open_args_open_claim4 {
OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5,
OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6,
};
+
typedef enum open_args_open_claim4 open_args_open_claim4;
enum open_args_createmode4 {
@@ -78,10 +90,15 @@ enum open_args_createmode4 {
OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2,
OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3,
};
+
typedef enum open_args_createmode4 open_args_createmode4;
typedef struct open_arguments4 fattr4_open_arguments;
+/*
+ * Determine what OPEN supports.
+ */
+
enum { FATTR4_OPEN_ARGUMENTS = 86 };
enum { OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010 };
@@ -90,6 +107,11 @@ typedef struct nfstime4 fattr4_time_deleg_access;
typedef struct nfstime4 fattr4_time_deleg_modify;
+/*
+ * New RECOMMENDED Attribute for
+ * delegation caching of times
+ */
+
enum { FATTR4_TIME_DELEG_ACCESS = 84 };
enum { FATTR4_TIME_DELEG_MODIFY = 85 };
@@ -124,13 +146,88 @@ enum open_delegation_type4 {
OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
};
+
typedef enum open_delegation_type4 open_delegation_type4;
+enum aclmodel4 {
+ ACL_MODEL_NFS4 = 1,
+ ACL_MODEL_POSIX_DRAFT = 2,
+ ACL_MODEL_NONE = 3,
+};
+
+typedef enum aclmodel4 aclmodel4;
+
+enum aclscope4 {
+ ACL_SCOPE_FILE_OBJECT = 1,
+ ACL_SCOPE_FILE_SYSTEM = 2,
+ ACL_SCOPE_SERVER = 3,
+};
+
+typedef enum aclscope4 aclscope4;
+
+enum posixacetag4 {
+ POSIXACE4_TAG_USER_OBJ = 1,
+ POSIXACE4_TAG_USER = 2,
+ POSIXACE4_TAG_GROUP_OBJ = 3,
+ POSIXACE4_TAG_GROUP = 4,
+ POSIXACE4_TAG_MASK = 5,
+ POSIXACE4_TAG_OTHER = 6,
+};
+
+typedef enum posixacetag4 posixacetag4;
+
+typedef uint32_t posixaceperm4;
+
+enum { POSIXACE4_PERM_EXECUTE = 0x00000001 };
+
+enum { POSIXACE4_PERM_WRITE = 0x00000002 };
+
+enum { POSIXACE4_PERM_READ = 0x00000004 };
+
+struct posixace4 {
+ posixacetag4 tag;
+ posixaceperm4 perm;
+ utf8str_mixed who;
+};
+
+typedef aclmodel4 fattr4_acl_trueform;
+
+typedef aclscope4 fattr4_acl_trueform_scope;
+
+typedef struct {
+ u32 count;
+ struct posixace4 *element;
+} fattr4_posix_default_acl;
+
+typedef struct {
+ u32 count;
+ struct posixace4 *element;
+} fattr4_posix_access_acl;
+
+/*
+ * New for POSIX ACL extension
+ */
+
+enum { FATTR4_ACL_TRUEFORM = 89 };
+
+enum { FATTR4_ACL_TRUEFORM_SCOPE = 90 };
+
+enum { FATTR4_POSIX_DEFAULT_ACL = 91 };
+
+enum { FATTR4_POSIX_ACCESS_ACL = 92 };
+
#define NFS4_int64_t_sz \
(XDR_hyper)
#define NFS4_uint32_t_sz \
(XDR_unsigned_int)
#define NFS4_bitmap4_sz (XDR_unsigned_int)
+#define NFS4_utf8string_sz (XDR_unsigned_int)
+#define NFS4_utf8str_cis_sz \
+ (NFS4_utf8string_sz)
+#define NFS4_utf8str_cs_sz \
+ (NFS4_utf8string_sz)
+#define NFS4_utf8str_mixed_sz \
+ (NFS4_utf8string_sz)
#define NFS4_nfstime4_sz \
(NFS4_int64_t_sz + NFS4_uint32_t_sz)
#define NFS4_fattr4_offline_sz \
@@ -149,5 +246,18 @@ typedef enum open_delegation_type4 open_delegation_type4;
#define NFS4_fattr4_time_deleg_modify_sz \
(NFS4_nfstime4_sz)
#define NFS4_open_delegation_type4_sz (XDR_int)
+#define NFS4_aclmodel4_sz (XDR_int)
+#define NFS4_aclscope4_sz (XDR_int)
+#define NFS4_posixacetag4_sz (XDR_int)
+#define NFS4_posixaceperm4_sz \
+ (NFS4_uint32_t_sz)
+#define NFS4_posixace4_sz \
+ (NFS4_posixacetag4_sz + NFS4_posixaceperm4_sz + NFS4_utf8str_mixed_sz)
+#define NFS4_fattr4_acl_trueform_sz \
+ (NFS4_aclmodel4_sz)
+#define NFS4_fattr4_acl_trueform_scope_sz \
+ (NFS4_aclscope4_sz)
+#define NFS4_fattr4_posix_default_acl_sz (XDR_unsigned_int)
+#define NFS4_fattr4_posix_access_acl_sz (XDR_unsigned_int)
#endif /* _LINUX_XDRGEN_NFS4_1_DEF_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 38ca3df68716..62fc7499b408 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -224,13 +224,11 @@ enum {
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/* Bit flag in swap_map */
-#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
/* Special value in first swap_map */
#define SWAP_MAP_MAX 0x3e /* Max count */
#define SWAP_MAP_BAD 0x3f /* Note page is bad */
-#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
/* Special value in each swap_map continuation */
#define SWAP_CONT_MAX 0x7f /* Max count */
@@ -453,16 +451,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-int folio_alloc_swap(struct folio *folio);
-bool folio_free_swap(struct folio *folio);
-void put_swap_folio(struct folio *folio, swp_entry_t entry);
-extern swp_entry_t get_swap_page_of_type(int);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t, int);
-extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t entry, int nr);
-extern void swap_free_nr(swp_entry_t entry, int nr_pages);
-extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
@@ -474,6 +463,29 @@ struct backing_dev_info;
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
sector_t swap_folio_sector(struct folio *folio);
+/*
+ * If there is an existing swap slot reference (swap entry) and the caller
+ * guarantees that there is no race modification of it (e.g., PTL
+ * protecting the swap entry in page table; shmem's cmpxchg protects t
+ * he swap entry in shmem mapping), these two helpers below can be used
+ * to put/dup the entries directly.
+ *
+ * All entries must be allocated by folio_alloc_swap(). And they must have
+ * a swap count > 1. See comments of folio_*_swap helpers for more info.
+ */
+int swap_dup_entry_direct(swp_entry_t entry);
+void swap_put_entries_direct(swp_entry_t entry, int nr);
+
+/*
+ * folio_free_swap tries to free the swap entries pinned by a swap cache
+ * folio, it has to be here to be called by other components.
+ */
+bool folio_free_swap(struct folio *folio);
+
+/* Allocate / free (hibernation) exclusive entries */
+swp_entry_t swap_alloc_hibernation_slot(int type);
+void swap_free_hibernation_slot(swp_entry_t entry);
+
static inline void put_swap_device(struct swap_info_struct *si)
{
percpu_ref_put(&si->users);
@@ -501,10 +513,6 @@ static inline void put_swap_device(struct swap_info_struct *si)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
-static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr)
-{
-}
-
static inline void free_swap_cache(struct folio *folio)
{
}
@@ -514,25 +522,12 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
-{
-}
-
-static inline int swap_duplicate(swp_entry_t swp)
-{
- return 0;
-}
-
-static inline int swapcache_prepare(swp_entry_t swp, int nr)
+static inline int swap_dup_entry_direct(swp_entry_t ent)
{
return 0;
}
-static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
-{
-}
-
-static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
+static inline void swap_put_entries_direct(swp_entry_t ent, int nr)
{
}
@@ -551,11 +546,6 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline int folio_alloc_swap(struct folio *folio)
-{
- return -EINVAL;
-}
-
static inline bool folio_free_swap(struct folio *folio)
{
return false;
@@ -568,17 +558,6 @@ static inline int add_swap_extent(struct swap_info_struct *sis,
return -EINVAL;
}
#endif /* CONFIG_SWAP */
-
-static inline void free_swap_and_cache(swp_entry_t entry)
-{
- free_swap_and_cache_nr(entry, 1);
-}
-
-static inline void swap_free(swp_entry_t entry)
-{
- swap_free_nr(entry, 1);
-}
-
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index cf84d98964b2..02bd6ddb6278 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -59,7 +59,6 @@ struct compat_stat;
struct old_timeval32;
struct robust_list_head;
struct futex_waitv;
-struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
@@ -718,7 +717,7 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
asmlinkage long sys_umask(int mask);
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
+asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, void __user *cache);
asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
@@ -961,6 +960,7 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
+asmlinkage long sys_rseq_slice_yield(void);
asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
asmlinkage long sys_open_tree_attr(int dfd, const char __user *path,
unsigned flags,
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index b449665c686a..5226efde9ad4 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -8,9 +8,11 @@
*/
#include <linux/err.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
#include <linux/types.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/edid.h>
struct device;
struct platform_device;
@@ -60,6 +62,16 @@ struct efifb_dmi_info {
int flags;
};
+struct sysfb_display_info {
+ struct screen_info screen;
+
+#if defined(CONFIG_FIRMWARE_EDID)
+ struct edid_info edid;
+#endif
+};
+
+extern struct sysfb_display_info sysfb_primary_display;
+
#ifdef CONFIG_SYSFB
void sysfb_disable(struct device *dev);
@@ -82,16 +94,17 @@ static inline bool sysfb_handles_screen_info(void)
#ifdef CONFIG_EFI
extern struct efifb_dmi_info efifb_dmi_list[];
-void sysfb_apply_efi_quirks(void);
-void sysfb_set_efifb_fwnode(struct platform_device *pd);
+void sysfb_apply_efi_quirks(struct screen_info *si);
+void sysfb_set_efifb_fwnode(const struct screen_info *si, struct platform_device *pd);
#else /* CONFIG_EFI */
-static inline void sysfb_apply_efi_quirks(void)
+static inline void sysfb_apply_efi_quirks(struct screen_info *si)
{
}
-static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
+static inline void sysfb_set_efifb_fwnode(const struct screen_info *si,
+ struct platform_device *pd)
{
}
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c33a96b7391a..99b775f3ff46 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -808,4 +808,17 @@ static inline void sysfs_put(struct kernfs_node *kn)
kernfs_put(kn);
}
+/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
+#define VERIFY_OCTAL_PERMISSIONS(perms) \
+ (BUILD_BUG_ON_ZERO((perms) < 0) + \
+ BUILD_BUG_ON_ZERO((perms) > 0777) + \
+ /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
+ BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
+ /* USER_WRITABLE >= GROUP_WRITABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
+ /* OTHER_WRITABLE? Generally considered a bad idea. */ \
+ BUILD_BUG_ON_ZERO((perms) & 2) + \
+ (perms))
+
#endif /* _SYSFS_H_ */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 20b8c6e21fef..f72eef31fa23 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -181,10 +181,7 @@ struct tcp_request_sock {
#endif
};
-static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
-{
- return (struct tcp_request_sock *)req;
-}
+#define tcp_rsk(ptr) container_of_const(ptr, struct tcp_request_sock, req.req)
static inline bool tcp_rsk_used_ao(const struct request_sock *req)
{
@@ -294,7 +291,8 @@ struct tcp_sock {
u8 nonagle : 4,/* Disable Nagle algorithm? */
rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
u8 received_ce_pending:4, /* Not yet transmit cnt of received_ce */
- unused2:4;
+ accecn_opt_sent_w_dsack:1,/* Sent ACCECN opt in previous ACK w/ D-SACK */
+ unused2:3;
u8 accecn_minlen:2,/* Minimum length of AccECN option sent */
est_ecnfield:2,/* ECN field for AccECN delivered estimates */
accecn_opt_demand:2,/* Demand AccECN option for n next ACKs */
@@ -345,6 +343,7 @@ struct tcp_sock {
u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_rtt_last_tsecr;
u32 delivered_ecn_bytes[3];
+ u16 pkts_acked_ewma;/* Pkts acked EWMA for AccECN cep heuristic */
u64 first_tx_mstamp; /* start of window send phase */
u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index 1f3e5dad6d0d..ee5f0bd41f43 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -76,6 +76,9 @@ struct tee_device {
/**
* struct tee_driver_ops - driver operations vtable
* @get_version: returns version of driver
+ * @get_tee_revision: returns revision string (diagnostic only);
+ * do not infer feature support from this, use
+ * TEE_IOC_VERSION instead
* @open: called for a context when the device file is opened
* @close_context: called when the device file is closed
* @release: called to release the context
@@ -95,9 +98,12 @@ struct tee_device {
* client closes the device file, even if there are existing references to the
* context. The TEE driver can use @close_context to start cleaning up.
*/
+
struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev,
struct tee_ioctl_version_data *vers);
+ int (*get_tee_revision)(struct tee_device *teedev,
+ char *buf, size_t len);
int (*open)(struct tee_context *ctx);
void (*close_context)(struct tee_context *ctx);
void (*release)(struct tee_context *ctx);
@@ -123,6 +129,9 @@ struct tee_driver_ops {
int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
};
+/* Size for TEE revision string buffer used by get_tee_revision(). */
+#define TEE_REVISION_STR_SIZE 128
+
/**
* struct tee_desc - Describes the TEE driver to the subsystem
* @name: name of driver
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 88a6f9697c89..e561a26f537a 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -315,6 +315,9 @@ struct tee_client_device {
* @driver: driver structure
*/
struct tee_client_driver {
+ int (*probe)(struct tee_client_device *);
+ void (*remove)(struct tee_client_device *);
+ void (*shutdown)(struct tee_client_device *);
const struct tee_client_device_id *id_table;
struct device_driver driver;
};
@@ -322,4 +325,13 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \
container_of_const(d, struct tee_client_driver, driver)
+#define tee_client_driver_register(drv) \
+ __tee_client_driver_register(drv, THIS_MODULE)
+int __tee_client_driver_register(struct tee_client_driver *, struct module *);
+void tee_client_driver_unregister(struct tee_client_driver *);
+
+#define module_tee_client_driver(__tee_client_driver) \
+ module_driver(__tee_client_driver, tee_client_driver_register, \
+ tee_client_driver_unregister)
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index b40de9bab4b7..051e42902690 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -46,15 +46,17 @@ enum syscall_work_bit {
SYSCALL_WORK_BIT_SYSCALL_AUDIT,
SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
+ SYSCALL_WORK_BIT_SYSCALL_RSEQ_SLICE,
};
-#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
-#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
-#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
-#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
-#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
-#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
-#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
+#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
+#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
+#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
+#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
+#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
+#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
+#define SYSCALL_WORK_SYSCALL_RSEQ_SLICE BIT(SYSCALL_WORK_BIT_SYSCALL_RSEQ_SLICE)
#endif
#include <asm/thread_info.h>
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ac76ae9fa36d..738007d6f577 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -126,6 +126,7 @@ enum tick_dep_bits {
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
+extern bool tick_nohz_is_active(void);
extern bool tick_nohz_tick_stopped(void);
extern bool tick_nohz_tick_stopped_cpu(int cpu);
extern void tick_nohz_idle_stop_tick(void);
@@ -142,6 +143,7 @@ extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
+static inline bool tick_nohz_is_active(void) { return false; }
static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
static inline void tick_nohz_idle_stop_tick(void) { }
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index dce03a5cafb7..7de6b350e559 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -115,6 +115,15 @@ extern void timecounter_init(struct timecounter *tc,
*/
extern u64 timecounter_read(struct timecounter *tc);
+/*
+ * This is like cyclecounter_cyc2ns(), but it is used for computing a
+ * time previous to the time stored in the cycle counter.
+ */
+static inline u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, u64 cycles, u64 frac)
+{
+ return ((cycles * cc->mult) - frac) >> cc->shift;
+}
+
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
@@ -131,7 +140,25 @@ extern u64 timecounter_read(struct timecounter *tc);
*
* Returns: cycle counter converted to nanoseconds since the initial time stamp
*/
-extern u64 timecounter_cyc2time(const struct timecounter *tc,
- u64 cycle_tstamp);
+static inline u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp)
+{
+ const struct cyclecounter *cc = tc->cc;
+ u64 delta = (cycle_tstamp - tc->cycle_last) & cc->mask;
+ u64 nsec = tc->nsec, frac = tc->frac;
+
+ /*
+ * Instead of always treating cycle_tstamp as more recent than
+ * tc->cycle_last, detect when it is too far in the future and
+ * treat it as old time stamp instead.
+ */
+ if (unlikely(delta > cc->mask / 2)) {
+ delta = (tc->cycle_last - cycle_tstamp) & cc->mask;
+ nsec -= cc_cyc2ns_backwards(cc, delta, frac);
+ } else {
+ nsec += cyclecounter_cyc2ns(cc, delta, tc->mask, &frac);
+ }
+
+ return nsec;
+}
#endif
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index c52b862dad45..fa4654ffb621 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -63,6 +63,11 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2);
/* Return @a with all but the lowest @size bytes cleared */
struct tnum tnum_cast(struct tnum a, u8 size);
+/* Swap the bytes of a tnum */
+struct tnum tnum_bswap16(struct tnum a);
+struct tnum tnum_bswap32(struct tnum a);
+struct tnum tnum_bswap64(struct tnum a);
+
/* Returns true if @a is a known constant */
static inline bool tnum_is_const(struct tnum a)
{
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 3690221ba3d8..0a2b8229b999 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -38,7 +38,10 @@ const char *trace_print_symbols_seq_u64(struct trace_seq *p,
*symbol_array);
#endif
-const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+struct trace_iterator;
+struct trace_event;
+
+const char *trace_print_bitmask_seq(struct trace_iterator *iter, void *bitmask_ptr,
unsigned int bitmask_size);
const char *trace_print_hex_seq(struct trace_seq *p,
@@ -54,9 +57,6 @@ trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
-struct trace_iterator;
-struct trace_event;
-
int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event);
extern __printf(2, 3)
diff --git a/include/linux/trace_printk.h b/include/linux/trace_printk.h
new file mode 100644
index 000000000000..bb5874097f24
--- /dev/null
+++ b/include/linux/trace_printk.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TRACE_PRINTK_H
+#define _LINUX_TRACE_PRINTK_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/instruction_pointer.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_on/tracing_off and tracing_start()/tracing_stop
+ *
+ * Use tracing_on/tracing_off when you want to quickly turn on or off
+ * tracing. It simply enables or disables the recording of the trace events.
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
+ * file, which gives a means for the kernel and userspace to interact.
+ * Place a tracing_off() in the kernel where you want tracing to end.
+ * From user space, examine the trace, and then echo 1 > tracing_on
+ * to continue tracing.
+ *
+ * tracing_stop/tracing_start has slightly more overhead. It is used
+ * by things like suspend to ram where disabling the recording of the
+ * trace is not enough, but tracing must actually stop because things
+ * like calling smp_processor_id() may crash the system.
+ *
+ * Most likely, you want to use tracing_on/tracing_off.
+ */
+
+enum ftrace_dump_mode {
+ DUMP_NONE,
+ DUMP_ALL,
+ DUMP_ORIG,
+ DUMP_PARAM,
+};
+
+#ifdef CONFIG_TRACING
+void tracing_on(void);
+void tracing_off(void);
+int tracing_is_on(void);
+void tracing_snapshot(void);
+void tracing_snapshot_alloc(void);
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+
+static inline __printf(1, 2)
+void ____trace_printk_check_format(const char *fmt, ...)
+{
+}
+#define __trace_printk_check_format(fmt, args...) \
+do { \
+ if (0) \
+ ____trace_printk_check_format(fmt, ##args); \
+} while (0)
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_printk() is used.)
+ *
+ * A little optimization trick is done here. If there's only one
+ * argument, there's no need to scan the string for printf formats.
+ * The trace_puts() will suffice. But how can we take advantage of
+ * using trace_puts() when trace_printk() has only one argument?
+ * By stringifying the args and checking the size we can tell
+ * whether or not there are args. __stringify((__VA_ARGS__)) will
+ * turn into "()\0" with a size of 3 when there are no args, anything
+ * else will be bigger. All we need to do is define a string to this,
+ * and then take its size and compare to 3. If it's bigger, use
+ * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
+ * let gcc optimize the rest.
+ */
+
+#define trace_printk(fmt, ...) \
+do { \
+ char _______STR[] = __stringify((__VA_ARGS__)); \
+ if (sizeof(_______STR) > 3) \
+ do_trace_printk(fmt, ##__VA_ARGS__); \
+ else \
+ trace_puts(fmt); \
+} while (0)
+
+#define do_trace_printk(fmt, args...) \
+do { \
+ static const char *trace_printk_fmt __used \
+ __section("__trace_printk_fmt") = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_printk_check_format(fmt, ##args); \
+ \
+ if (__builtin_constant_p(fmt)) \
+ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
+ else \
+ __trace_printk(_THIS_IP_, fmt, ##args); \
+} while (0)
+
+extern __printf(2, 3)
+int __trace_bprintk(unsigned long ip, const char *fmt, ...);
+
+extern __printf(2, 3)
+int __trace_printk(unsigned long ip, const char *fmt, ...);
+
+/**
+ * trace_puts - write a string into the ftrace buffer
+ * @str: the string to record
+ *
+ * Note: __trace_bputs is an internal function for trace_puts and
+ * the @ip is passed in via the trace_puts macro.
+ *
+ * This is similar to trace_printk() but is made for those really fast
+ * paths that a developer wants the least amount of "Heisenbug" effects,
+ * where the processing of the print format is still too much.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_puts scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_puts() is used.)
+ *
+ * Returns: 0 if nothing was written, positive # if string was.
+ * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
+ */
+
+#define trace_puts(str) ({ \
+ static const char *trace_printk_fmt __used \
+ __section("__trace_printk_fmt") = \
+ __builtin_constant_p(str) ? str : NULL; \
+ \
+ if (__builtin_constant_p(str)) \
+ __trace_bputs(_THIS_IP_, trace_printk_fmt); \
+ else \
+ __trace_puts(_THIS_IP_, str); \
+})
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str);
+
+extern void trace_dump_stack(int skip);
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement.
+ */
+#define ftrace_vprintk(fmt, vargs) \
+do { \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt __used \
+ __section("__trace_printk_fmt") = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
+ } else \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+} while (0)
+
+extern __printf(2, 0) int
+__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern __printf(2, 0) int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
+#else
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void trace_dump_stack(int skip) { }
+
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline int tracing_is_on(void) { return 0; }
+static inline void tracing_snapshot(void) { }
+static inline void tracing_snapshot_alloc(void) { }
+
+static inline __printf(1, 2)
+int trace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static __printf(1, 0) inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
+#endif /* CONFIG_TRACING */
+
+#endif
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 4a0b8c172d27..697d619aafdc 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -114,7 +114,11 @@ extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
extern int trace_seq_path(struct trace_seq *s, const struct path *path);
extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
- int nmaskbits);
+ int nmaskbits);
+
+extern void trace_seq_bitmask_list(struct trace_seq *s,
+ const unsigned long *maskp,
+ int nmaskbits);
extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
@@ -137,6 +141,12 @@ trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
{
}
+static inline void
+trace_seq_bitmask_list(struct trace_seq *s, const unsigned long *maskp,
+ int nmaskbits)
+{
+}
+
static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
return 0;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 8a56f3278b1b..22ca1c8b54f3 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -108,14 +108,15 @@ void for_each_tracepoint_in_module(struct module *mod,
* An alternative is to use the following for batch reclaim associated
* with a given tracepoint:
*
- * - tracepoint_is_faultable() == false: call_rcu()
+ * - tracepoint_is_faultable() == false: call_srcu()
* - tracepoint_is_faultable() == true: call_rcu_tasks_trace()
*/
#ifdef CONFIG_TRACEPOINTS
+extern struct srcu_struct tracepoint_srcu;
static inline void tracepoint_synchronize_unregister(void)
{
synchronize_rcu_tasks_trace();
- synchronize_rcu();
+ synchronize_srcu(&tracepoint_srcu);
}
static inline bool tracepoint_is_faultable(struct tracepoint *tp)
{
@@ -275,13 +276,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
return static_branch_unlikely(&__tracepoint_##name.key);\
}
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
static inline void __do_trace_##name(proto) \
{ \
TRACEPOINT_CHECK(name) \
if (cond) { \
- guard(preempt_notrace)(); \
+ guard(srcu_fast_notrace)(&tracepoint_srcu); \
__DO_TRACE_CALL(name, TP_ARGS(args)); \
} \
} \
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index 2efc271a96fa..b1f53fd5cfa3 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -56,6 +56,7 @@ struct anon_transport_class cls = { \
struct transport_container {
struct attribute_container ac;
const struct attribute_group *statistics;
+ const struct attribute_group *encryption;
};
#define attribute_container_to_transport_container(x) \
@@ -87,9 +88,9 @@ transport_unregister_device(struct device *dev)
transport_destroy_device(dev);
}
-static inline int transport_container_register(struct transport_container *tc)
+static inline void transport_container_register(struct transport_container *tc)
{
- return attribute_container_register(&tc->ac);
+ attribute_container_register(&tc->ac);
}
static inline void transport_container_unregister(struct transport_container *tc)
@@ -99,7 +100,7 @@ static inline void transport_container_unregister(struct transport_container *tc
}
int transport_class_register(struct transport_class *);
-int anon_transport_class_register(struct anon_transport_class *);
+void anon_transport_class_register(struct anon_transport_class *);
void transport_class_unregister(struct transport_class *);
void anon_transport_class_unregister(struct anon_transport_class *);
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
index a3b7ab668eff..22e05b2aac69 100644
--- a/include/linux/tsm.h
+++ b/include/linux/tsm.h
@@ -123,7 +123,4 @@ int tsm_report_unregister(const struct tsm_report_ops *ops);
struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops);
void tsm_unregister(struct tsm_dev *tsm_dev);
struct tsm_dev *find_tsm_dev(int id);
-struct pci_ide;
-int tsm_ide_stream_register(struct pci_ide *ide);
-void tsm_ide_stream_unregister(struct pci_ide *ide);
#endif /* __TSM_H */
diff --git a/include/linux/types.h b/include/linux/types.h
index d4437e9c452c..7e71d260763c 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
-#define __EXPORTED_HEADERS__
#include <uapi/linux/types.h>
#ifndef __ASSEMBLY__
@@ -171,6 +170,11 @@ typedef u64 phys_addr_t;
typedef u32 phys_addr_t;
#endif
+struct phys_vec {
+ phys_addr_t paddr;
+ size_t len;
+};
+
typedef phys_addr_t resource_size_t;
/*
@@ -180,7 +184,7 @@ typedef phys_addr_t resource_size_t;
typedef unsigned long irq_hw_number_t;
typedef struct {
- int counter;
+ int __aligned(sizeof(int)) counter;
} atomic_t;
#define ATOMIC_INIT(i) { (i) }
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 457879938fc1..f47cf4d78ad7 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -79,6 +79,14 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return local64_read(&p->v);
}
+static inline void *u64_stats_copy(void *dst, const void *src, size_t len)
+{
+ BUILD_BUG_ON(len % sizeof(u64_stats_t));
+ for (size_t i = 0; i < len / sizeof(u64_stats_t); i++)
+ ((u64 *)dst)[i] = local64_read(&((local64_t *)src)[i]);
+ return dst;
+}
+
static inline void u64_stats_set(u64_stats_t *p, u64 val)
{
local64_set(&p->v, val);
@@ -89,6 +97,11 @@ static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
local64_add(val, &p->v);
}
+static inline void u64_stats_sub(u64_stats_t *p, s64 val)
+{
+ local64_sub(val, &p->v);
+}
+
static inline void u64_stats_inc(u64_stats_t *p)
{
local64_inc(&p->v);
@@ -110,6 +123,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
}
#else /* 64 bit */
+#include <linux/string.h>
typedef struct {
u64 v;
@@ -120,6 +134,12 @@ static inline u64 u64_stats_read(const u64_stats_t *p)
return p->v;
}
+static inline void *u64_stats_copy(void *dst, const void *src, size_t len)
+{
+ BUILD_BUG_ON(len % sizeof(u64_stats_t));
+ return memcpy(dst, src, len);
+}
+
static inline void u64_stats_set(u64_stats_t *p, u64 val)
{
p->v = val;
@@ -130,6 +150,11 @@ static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
p->v += val;
}
+static inline void u64_stats_sub(u64_stats_t *p, s64 val)
+{
+ p->v -= val;
+}
+
static inline void u64_stats_inc(u64_stats_t *p)
{
p->v++;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 58795688a186..1cbf6b4d3aab 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -236,7 +236,7 @@ static inline void udp_allow_gso(struct sock *sk)
hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
#endif
-#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
+#define IS_UDPLITE(__sk) (unlikely(__sk->sk_protocol == IPPROTO_UDPLITE))
static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
{
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5b127043a151..a9bc5b3067e3 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -389,6 +389,9 @@ ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
size_t maxsize, unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0);
+ssize_t iov_iter_extract_bvecs(struct iov_iter *iter, struct bio_vec *bv,
+ size_t max_size, unsigned short *nr_vecs,
+ unsigned short max_vecs, iov_iter_extraction_t extraction_flags);
/**
* iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
diff --git a/include/linux/unwind_user.h b/include/linux/unwind_user.h
index 7f7282516bf5..64618618febd 100644
--- a/include/linux/unwind_user.h
+++ b/include/linux/unwind_user.h
@@ -5,8 +5,22 @@
#include <linux/unwind_user_types.h>
#include <asm/unwind_user.h>
-#ifndef ARCH_INIT_USER_FP_FRAME
- #define ARCH_INIT_USER_FP_FRAME
+#ifndef CONFIG_HAVE_UNWIND_USER_FP
+
+#define ARCH_INIT_USER_FP_FRAME(ws)
+
+#endif
+
+#ifndef ARCH_INIT_USER_FP_ENTRY_FRAME
+#define ARCH_INIT_USER_FP_ENTRY_FRAME(ws)
+#endif
+
+#ifndef unwind_user_at_function_start
+static inline bool unwind_user_at_function_start(struct pt_regs *regs)
+{
+ return false;
+}
+#define unwind_user_at_function_start unwind_user_at_function_start
#endif
int unwind_user(struct unwind_stacktrace *trace, unsigned int max_entries);
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index ee3d36eda45d..f548fea2adec 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -242,6 +242,7 @@ extern void arch_uprobe_clear_state(struct mm_struct *mm);
extern void arch_uprobe_init_state(struct mm_struct *mm);
extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
+extern unsigned long arch_uprobe_get_xol_area(void);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 2945923a8a95..b0e84896e6ac 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -290,6 +290,7 @@ extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
extern void usbnet_set_rx_mode(struct net_device *net);
extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+extern int usbnet_mii_ioctl(struct net_device *net, struct ifreq *rq, int cmd);
extern int usbnet_nway_reset(struct net_device *net);
extern int usbnet_manage_power(struct usbnet *, int);
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index 22e0dab0809e..ea92ac623a45 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -10,6 +10,14 @@
/* ------------------------------------------------------------------------
* GUIDs
+ *
+ * The GUID returned by lsusb can be converted to this format with the
+ * following python snippet:
+ *
+ * import uuid
+ * id = "{01234567-89ab-cdef-0123-456789abcdef}"
+ * le = uuid.UUID(id).bytes_le
+ * print("{" + ", ".join([f"0x{b:02x}" for b in le]) + "}")
*/
#define UVC_GUID_UVC_CAMERA \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 2eb528058d0d..71564868b8f6 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -119,7 +119,7 @@
* a fuss about it. This makes the programmer responsible for tagging
* the functions that can be garbage-collected.
*
- * With the macro it is possible to write the following:
+ * With the macro it is possible to write the following::
*
* static int foo_suspend(struct device *dev)
* {
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 4cf21d6e9cfd..2bfe3baa63f4 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -312,7 +312,9 @@ struct vdpa_map_file {
* @idx: virtqueue index
* Returns the affinity mask
* @set_group_asid: Set address space identifier for a
- * virtqueue group (optional)
+ * virtqueue group (optional). Caller must
+ * prevent this from being executed concurrently
+ * with set_status.
* @vdev: vdpa device
* @group: virtqueue group
* @asid: address space id for this group
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 1ac86896875c..2ebba746c18f 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -28,7 +28,6 @@
struct vfio_pci_core_device;
struct vfio_pci_region;
struct p2pdma_provider;
-struct dma_buf_phys_vec;
struct dma_buf_attachment;
struct vfio_pci_eventfd {
@@ -62,25 +61,25 @@ struct vfio_pci_device_ops {
int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
};
#if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
-int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len);
int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
#else
static inline int
-vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len)
@@ -89,7 +88,7 @@ vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
}
static inline int vfio_pci_core_get_dmabuf_phys(
struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
- unsigned int region_index, struct dma_buf_phys_vec *phys_vec,
+ unsigned int region_index, struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
{
return -EOPNOTSUPP;
@@ -236,6 +235,6 @@ static inline bool is_aligned_for_order(struct vm_area_struct *vma,
}
int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys);
+ struct phys_vec *phys);
#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 3626eb694728..3bbc4cb6a672 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -43,13 +43,13 @@ struct virtqueue {
void *priv;
};
-struct vduse_iova_domain;
+struct vduse_vq_group;
union virtio_map {
/* Device that performs DMA */
struct device *dma_dev;
- /* VDUSE specific mapping data */
- struct vduse_iova_domain *iova_domain;
+ /* VDUSE specific virtqueue group for doing map */
+ struct vduse_vq_group *group;
};
int virtqueue_add_outbuf(struct virtqueue *vq,
@@ -62,6 +62,11 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
void *data,
gfp_t gfp);
+int virtqueue_add_inbuf_cache_clean(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp);
+
int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
void *data,
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 0c67543a45c8..f91704731057 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -173,6 +173,7 @@ struct virtio_vsock_pkt_info {
u32 remote_cid, remote_port;
struct vsock_sock *vsk;
struct msghdr *msg;
+ struct net *net;
u32 pkt_len;
u16 type;
u16 op;
@@ -185,7 +186,7 @@ struct virtio_transport {
struct vsock_transport transport;
/* Takes ownership of the packet */
- int (*send_pkt)(struct sk_buff *skb);
+ int (*send_pkt)(struct sk_buff *skb, struct net *net);
/* Used in MSG_ZEROCOPY mode. Checks, that provided data
* (number of buffers) could be transmitted with zerocopy
@@ -256,10 +257,10 @@ void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
-bool virtio_transport_stream_allow(u32 cid, u32 port);
+bool virtio_transport_stream_allow(struct vsock_sock *vsk, u32 cid, u32 port);
int virtio_transport_dgram_bind(struct vsock_sock *vsk,
struct sockaddr_vm *addr);
-bool virtio_transport_dgram_allow(u32 cid, u32 port);
+bool virtio_transport_dgram_allow(struct vsock_sock *vsk, u32 cid, u32 port);
int virtio_transport_connect(struct vsock_sock *vsk);
@@ -280,7 +281,7 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_transport *t,
- struct sk_buff *skb);
+ struct sk_buff *skb, struct net *net);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 92f80b4d69a6..22a139f82d75 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -122,13 +122,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SWPOUT,
THP_SWPOUT_FALLBACK,
#endif
-#ifdef CONFIG_MEMORY_BALLOON
+#ifdef CONFIG_BALLOON
BALLOON_INFLATE,
BALLOON_DEFLATE,
-#ifdef CONFIG_BALLOON_COMPACTION
+#ifdef CONFIG_BALLOON_MIGRATION
BALLOON_MIGRATE,
-#endif
-#endif
+#endif /* CONFIG_BALLOON_MIGRATION */
+#endif /* CONFIG_BALLOON */
#ifdef CONFIG_DEBUG_TLBFLUSH
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3398a345bda8..3c9c266cf782 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -286,10 +286,8 @@ void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
void inc_node_page_state(struct page *, enum node_stat_item);
void dec_node_page_state(struct page *, enum node_stat_item);
-extern void inc_node_state(struct pglist_data *, enum node_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
-extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
@@ -303,6 +301,7 @@ int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *));
+void vmstat_flush_workqueue(void);
#else /* CONFIG_SMP */
/*
@@ -394,15 +393,12 @@ static inline void __dec_node_page_state(struct page *page,
#define dec_node_page_state __dec_node_page_state
#define mod_node_page_state __mod_node_page_state
-#define inc_zone_state __inc_zone_state
-#define inc_node_state __inc_node_state
-#define dec_zone_state __dec_zone_state
-
#define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_zone_stat_thresholds(void) { }
static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
+static inline void vmstat_flush_workqueue(void) { }
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_zonestat *pzstats) { }
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 665ea7dc8a92..75cb0c7cfe57 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -8,9 +8,11 @@
#ifndef _LINUX_WMI_H
#define _LINUX_WMI_H
+#include <linux/compiler_attributes.h>
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
+#include <linux/types.h>
/**
* struct wmi_device - WMI device structure
@@ -36,6 +38,42 @@ struct wmi_device {
*/
#define to_wmi_device(device) container_of_const(device, struct wmi_device, dev)
+/**
+ * struct wmi_buffer - WMI data buffer
+ * @length: Buffer length in bytes
+ * @data: Pointer to the buffer content
+ *
+ * This structure is used to exchange data with the WMI driver core.
+ */
+struct wmi_buffer {
+ size_t length;
+ void *data;
+};
+
+/**
+ * struct wmi_string - WMI string representation
+ * @length: Size of @chars in bytes
+ * @chars: UTF16-LE characters with optional nul termination and padding
+ *
+ * This structure is used when exchanging string data over the WMI interface.
+ */
+struct wmi_string {
+ __le16 length;
+ __le16 chars[];
+} __packed;
+
+ssize_t wmi_string_to_utf8s(const struct wmi_string *str, u8 *dst, size_t length);
+
+ssize_t wmi_string_from_utf8s(struct wmi_string *str, size_t max_chars, const u8 *src,
+ size_t src_length);
+
+int wmidev_invoke_method(struct wmi_device *wdev, u8 instance, u32 method_id,
+ const struct wmi_buffer *in, struct wmi_buffer *out);
+
+int wmidev_query_block(struct wmi_device *wdev, u8 instance, struct wmi_buffer *out);
+
+int wmidev_set_block(struct wmi_device *wdev, u8 instance, const struct wmi_buffer *in);
+
acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id,
const struct acpi_buffer *in, struct acpi_buffer *out);
@@ -54,9 +92,11 @@ u8 wmidev_instance_count(struct wmi_device *wdev);
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
- * @notify: Callback for receiving WMI events
+ * @notify: Callback for receiving WMI events (deprecated)
+ * @notify_new: Callback for receiving WMI events
*
- * This represents WMI drivers which handle WMI devices.
+ * This represents WMI drivers which handle WMI devices. The data inside the buffer
+ * passed to the @notify_new callback is guaranteed to be aligned on a 8-byte boundary.
*/
struct wmi_driver {
struct device_driver driver;
@@ -68,6 +108,7 @@ struct wmi_driver {
void (*remove)(struct wmi_device *wdev);
void (*shutdown)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
+ void (*notify_new)(struct wmi_device *device, const struct wmi_buffer *data);
};
/**
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index dabc351cc127..a4749f56398f 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -588,7 +588,7 @@ struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_housekeeping_update(const struct cpumask *hk);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f48e8ccffe81..e530112c4b3a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -328,9 +328,6 @@ struct dirty_throttle_control {
bool dirty_exceeded;
};
-void laptop_io_completion(struct backing_dev_info *info);
-void laptop_sync_completion(void);
-void laptop_mode_timer_fn(struct timer_list *t);
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -342,7 +339,6 @@ extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
-extern int laptop_mode;
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 45ff6f7a872b..85b1fff02fde 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -17,6 +17,7 @@
#ifndef __LINUX_WW_MUTEX_H
#define __LINUX_WW_MUTEX_H
+#include <linux/instruction_pointer.h>
#include <linux/mutex.h>
#include <linux/rtmutex.h>
@@ -44,7 +45,7 @@ struct ww_class {
unsigned int is_wait_die;
};
-struct ww_mutex {
+context_lock_struct(ww_mutex) {
struct WW_MUTEX_BASE base;
struct ww_acquire_ctx *ctx;
#ifdef DEBUG_WW_MUTEXES
@@ -52,7 +53,7 @@ struct ww_mutex {
#endif
};
-struct ww_acquire_ctx {
+context_lock_struct(ww_acquire_ctx) {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
@@ -141,6 +142,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
+ __acquires(ctx) __no_context_analysis
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
@@ -179,6 +181,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+ __releases(ctx) __acquires_shared(ctx) __no_context_analysis
{
#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
@@ -196,6 +199,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ __releases_shared(ctx) __no_context_analysis
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
@@ -245,7 +249,8 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -278,7 +283,8 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
@@ -305,6 +311,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __acquires(lock) __must_hold(ctx) __no_context_analysis
{
int ret;
#ifdef DEBUG_WW_MUTEXES
@@ -342,6 +349,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx)
{
#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
@@ -349,10 +357,11 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
return ww_mutex_lock_interruptible(lock, ctx);
}
-extern void ww_mutex_unlock(struct ww_mutex *lock);
+extern void ww_mutex_unlock(struct ww_mutex *lock) __releases(lock);
extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(true, lock) __must_hold(ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
@@ -363,6 +372,7 @@ extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
+ __must_not_hold(lock)
{
#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index a4d6cc0c9f68..1e0e2cb53579 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -19,6 +19,7 @@
* @WWAN_PORT_FASTBOOT: Fastboot protocol control
* @WWAN_PORT_ADB: ADB protocol control
* @WWAN_PORT_MIPC: MTK MIPC diagnostic interface
+ * @WWAN_PORT_NMEA: embedded GNSS receiver with NMEA output
*
* @WWAN_PORT_MAX: Highest supported port types
* @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
@@ -34,6 +35,7 @@ enum wwan_port_type {
WWAN_PORT_FASTBOOT,
WWAN_PORT_ADB,
WWAN_PORT_MIPC,
+ WWAN_PORT_NMEA,
/* Add new port types above this line */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 64e9afe7d647..296b5ee5c979 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -114,7 +114,7 @@ struct simple_xattr {
struct rb_node rb_node;
char *name;
size_t size;
- char value[];
+ char value[] __counted_by(size);
};
void simple_xattrs_init(struct simple_xattrs *xattrs);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index f3ccff2d966c..478410c880b1 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -22,6 +22,7 @@ struct zs_pool_stats {
};
struct zs_pool;
+struct scatterlist;
struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
@@ -40,9 +41,12 @@ unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
- void *local_copy);
+ size_t mem_len, void *local_copy);
void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
- void *handle_mem);
+ size_t mem_len, void *handle_mem);
+void zs_obj_read_sg_begin(struct zs_pool *pool, unsigned long handle,
+ struct scatterlist *sg, size_t mem_len);
+void zs_obj_read_sg_end(struct zs_pool *pool, unsigned long handle);
void zs_obj_write(struct zs_pool *pool, unsigned long handle,
void *handle_mem, size_t mem_len);