summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap6
-rw-r--r--Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml26
-rw-r--r--Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml19
-rw-r--r--Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml26
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,versal-pinctrl.yaml1
-rw-r--r--Documentation/input/event-codes.rst25
-rw-r--r--Documentation/netlink/genetlink-c.yaml2
-rw-r--r--Documentation/netlink/genetlink.yaml2
-rw-r--r--Documentation/netlink/netlink-raw.yaml2
-rw-r--r--Documentation/netlink/specs/devlink.yaml9
-rw-r--r--Documentation/netlink/specs/rt-addr.yaml7
-rw-r--r--Documentation/netlink/specs/rt-link.yaml50
-rw-r--r--Documentation/netlink/specs/rt-neigh.yaml2
-rw-r--r--Documentation/netlink/specs/rt-route.yaml8
-rw-r--r--Documentation/netlink/specs/rt-rule.yaml6
-rw-r--r--Documentation/networking/devlink/devlink-params.rst10
-rw-r--r--Documentation/networking/devlink/mlx5.rst14
-rw-r--r--Documentation/networking/index.rst5
-rw-r--r--Documentation/networking/ip-sysctl.rst13
-rw-r--r--Documentation/networking/net_cachelines/inet_connection_sock.rst2
-rw-r--r--Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst3
-rw-r--r--Documentation/networking/xfrm/index.rst13
-rw-r--r--Documentation/networking/xfrm/xfrm_device.rst (renamed from Documentation/networking/xfrm_device.rst)20
-rw-r--r--Documentation/networking/xfrm/xfrm_proc.rst (renamed from Documentation/networking/xfrm_proc.rst)0
-rw-r--r--Documentation/networking/xfrm/xfrm_sync.rst (renamed from Documentation/networking/xfrm_sync.rst)97
-rw-r--r--Documentation/networking/xfrm/xfrm_sysctl.rst (renamed from Documentation/networking/xfrm_sysctl.rst)4
-rw-r--r--Documentation/sound/codecs/cs35l56.rst9
-rw-r--r--Documentation/wmi/driver-development-guide.rst1
-rw-r--r--MAINTAINERS86
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-fuji-data64.dts14
-rw-r--r--arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-engicam-microgea-rmm.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-kontron-bl-osm-s.dts24
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576.dtsi14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588j.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts4
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/page.h4
-rw-r--r--arch/arm64/kernel/acpi.c10
-rw-r--r--arch/arm64/kernel/proton-pack.c2
-rw-r--r--arch/arm64/kvm/arm.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/mm/fault.c21
-rw-r--r--arch/loongarch/include/asm/cpu.h21
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h40
-rw-r--r--arch/loongarch/kernel/cpu-probe.c34
-rw-r--r--arch/loongarch/kernel/machine_kexec.c2
-rw-r--r--arch/loongarch/kernel/numa.c60
-rw-r--r--arch/loongarch/kernel/proc.c2
-rw-r--r--arch/loongarch/net/bpf_jit.c3
-rw-r--r--arch/loongarch/pci/pci.c8
-rw-r--r--arch/mips/boot/dts/econet/en751221.dtsi2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c100
-rw-r--r--arch/mips/mti-malta/malta-init.c20
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/riscv/include/asm/vendorid_list.h2
-rw-r--r--arch/riscv/kernel/sbi.c6
-rw-r--r--arch/s390/include/asm/pgtable.h12
-rw-r--r--arch/s390/mm/pgtable.c4
-rw-r--r--arch/x86/events/core.c10
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/include/asm/ftrace.h5
-rw-r--r--arch/x86/kernel/acpi/cppc.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c7
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c1
-rw-r--r--arch/x86/kernel/ftrace_64.S8
-rw-r--r--arch/x86/kvm/svm/svm.c9
-rw-r--r--arch/x86/kvm/svm/svm.h1
-rw-r--r--block/bdev.c2
-rw-r--r--drivers/acpi/acpi_mrrm.c43
-rw-r--r--drivers/acpi/apei/einj-core.c64
-rw-r--r--drivers/acpi/arm64/gtdt.c4
-rw-r--r--drivers/acpi/cppc_acpi.c6
-rw-r--r--drivers/acpi/numa/hmat.c46
-rw-r--r--drivers/acpi/numa/srat.c2
-rw-r--r--drivers/acpi/processor_driver.c6
-rw-r--r--drivers/acpi/processor_idle.c115
-rw-r--r--drivers/android/binder_netlink.c1
-rw-r--r--drivers/android/binder_netlink.h1
-rw-r--r--drivers/ata/libata-core.c10
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/base/power/main.c25
-rw-r--r--drivers/bluetooth/btusb.c39
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c9
-rw-r--r--drivers/crypto/hisilicon/qm.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c6
-rw-r--r--drivers/cxl/core/region.c2
-rw-r--r--drivers/dibs/dibs_main.c3
-rw-r--r--drivers/dpll/dpll_nl.c1
-rw-r--r--drivers/dpll/dpll_nl.h1
-rw-r--r--drivers/dpll/zl3073x/Makefile3
-rw-r--r--drivers/dpll/zl3073x/core.c243
-rw-r--r--drivers/dpll/zl3073x/core.h184
-rw-r--r--drivers/dpll/zl3073x/dpll.c776
-rw-r--r--drivers/dpll/zl3073x/fw.c6
-rw-r--r--drivers/dpll/zl3073x/out.c157
-rw-r--r--drivers/dpll/zl3073x/out.h93
-rw-r--r--drivers/dpll/zl3073x/prop.c12
-rw-r--r--drivers/dpll/zl3073x/ref.c204
-rw-r--r--drivers/dpll/zl3073x/ref.h134
-rw-r--r--drivers/dpll/zl3073x/synth.c87
-rw-r--r--drivers/dpll/zl3073x/synth.h72
-rw-r--r--drivers/edac/altera_edac.c22
-rw-r--r--drivers/edac/versalnet_edac.c24
-rw-r--r--drivers/firewire/core-card.c2
-rw-r--r--drivers/firewire/core-topology.c3
-rw-r--r--drivers/gpio/gpiolib-cdev.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c138
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c11
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c7
-rw-r--r--drivers/gpu/drm/tegra/dc.c1
-rw-r--r--drivers/gpu/drm/tegra/dsi.c9
-rw-r--r--drivers/gpu/drm/tegra/uapi.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c12
-rw-r--r--drivers/gpu/drm/xe/Kconfig1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c18
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c1
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c6
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c11
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c2
-rw-r--r--drivers/hid/hid-apple.c1
-rw-r--r--drivers/hid/hid-corsair-void.c5
-rw-r--r--drivers/hid/hid-elecom.c6
-rw-r--r--drivers/hid/hid-haptic.c2
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c5
-rw-r--r--drivers/hid/hid-lenovo.c17
-rw-r--r--drivers/hid/hid-ntrig.c7
-rw-r--r--drivers/hid/hid-playstation.c2
-rw-r--r--drivers/hid/hid-quirks.c16
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/usbhid/hid-pidff.c4
-rw-r--r--drivers/hwmon/gpd-fan.c54
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c6
-rw-r--r--drivers/input/keyboard/imx_sc_key.c2
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c9
-rw-r--r--drivers/input/touchscreen/goodix.c28
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/iommu/iommufd/driver.c2
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h4
-rw-r--r--drivers/irqchip/irq-riscv-intc.c3
-rw-r--r--drivers/md/dm-pcache/Makefile2
-rw-r--r--drivers/md/dm-pcache/cache.c4
-rw-r--r--drivers/md/dm-pcache/cache.h2
-rw-r--r--drivers/md/dm-pcache/cache_req.c6
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h2
-rw-r--r--drivers/md/dm-verity-fec.c6
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/memory/tegra/tegra210.c4
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c4
-rw-r--r--drivers/mmc/host/pxamci.c56
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c2
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/ecc-realtek.c6
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3
-rw-r--r--drivers/mtd/nand/spi/fmsh.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c9
-rw-r--r--drivers/net/can/Kconfig17
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/dev/bittiming.c63
-rw-r--r--drivers/net/can/dev/calc_bittiming.c114
-rw-r--r--drivers/net/can/dev/dev.c42
-rw-r--r--drivers/net/can/dev/netlink.c319
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c299
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c100
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c4
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c14
-rw-r--r--drivers/net/dsa/ks8995.c6
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c31
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c22
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c7
-rw-r--r--drivers/net/dsa/yt921x.c105
-rw-r--r--drivers/net/dsa/yt921x.h54
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h3
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c55
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c45
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c198
-rw-r--r--drivers/net/ethernet/freescale/fec.h31
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c72
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c64
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c91
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h14
-rw-r--r--drivers/net/ethernet/google/gve/gve.h7
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c12
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c73
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c217
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c12
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c22
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c24
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h15
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h41
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c11
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c187
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c31
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c5
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c148
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c87
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c76
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c105
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c245
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c469
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c394
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h25
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c45
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c12
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c260
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c38
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c10
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h34
-rw-r--r--drivers/net/hyperv/netvsc_drv.c15
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/mdio/fwnode_mdio.c5
-rw-r--r--drivers/net/mdio/of_mdio.c5
-rw-r--r--drivers/net/netconsole.c386
-rw-r--r--drivers/net/netdevsim/dev.c56
-rw-r--r--drivers/net/netdevsim/netdev.c26
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/ovpn/netlink-gen.c1
-rw-r--r--drivers/net/ovpn/netlink-gen.h1
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c5
-rw-r--r--drivers/net/pcs/pcs-xpcs.c136
-rw-r--r--drivers/net/phy/adin1100.c7
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c21
-rw-r--r--drivers/net/phy/dp83640.c29
-rw-r--r--drivers/net/phy/dp83867.c36
-rw-r--r--drivers/net/phy/fixed_phy.c32
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c96
-rw-r--r--drivers/net/phy/mdio_device.c60
-rw-r--r--drivers/net/phy/micrel.c42
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c8
-rw-r--r--drivers/net/phy/mscc/mscc.h4
-rw-r--r--drivers/net/phy/mscc/mscc_main.c439
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c21
-rw-r--r--drivers/net/phy/mxl-gpy.c135
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c22
-rw-r--r--drivers/net/phy/phy-c45.c8
-rw-r--r--drivers/net/phy/phy-caps.h1
-rw-r--r--drivers/net/phy/phy-core.c47
-rw-r--r--drivers/net/phy/phy.c14
-rw-r--r--drivers/net/phy/phy_caps.c2
-rw-r--r--drivers/net/phy/phylink.c3
-rw-r--r--drivers/net/phy/realtek/realtek_main.c159
-rw-r--r--drivers/net/team/team_core.c23
-rw-r--r--drivers/net/team/team_nl.c1
-rw-r--r--drivers/net/team/team_nl.h1
-rw-r--r--drivers/net/tun_vnet.h2
-rw-r--r--drivers/net/usb/usbnet.c233
-rw-r--r--drivers/net/veth.c43
-rw-r--r--drivers/net/virtio_net.c47
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c18
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c7
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c19
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/nfc/mei_phy.h4
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/target/auth.c4
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c1
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aspm.c25
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/pci/quirks.c42
-rw-r--r--drivers/pci/tph.c16
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c23
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c6
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/pinctrl/realtek/Kconfig1
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c16
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c25
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c3
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c106
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c6
-rw-r--r--drivers/platform/x86/huawei-wmi.c4
-rw-r--r--drivers/platform/x86/intel/hid.c1
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c4
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c43
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c13
-rw-r--r--drivers/pmdomain/imx/gpc.c2
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c29
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c10
-rw-r--r--drivers/ptp/ptp_ines.c31
-rw-r--r--drivers/ptp/ptp_ocp.c46
-rw-r--r--drivers/pwm/pwm-adp5585.c4
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c4
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.c247
-rw-r--r--drivers/s390/net/qeth_core_mpc.h20
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/spi/spi-imx.c15
-rw-r--r--drivers/spi/spi-xilinx.c2
-rw-r--r--drivers/spi/spi.c12
-rw-r--r--drivers/target/loopback/tcm_loop.c3
-rw-r--r--drivers/vhost/net.c53
-rw-r--r--drivers/vhost/vhost.c76
-rw-r--r--drivers/vhost/vhost.h10
-rw-r--r--fs/afs/cell.c78
-rw-r--r--fs/afs/dynroot.c3
-rw-r--r--fs/afs/internal.h12
-rw-r--r--fs/afs/mntpt.c3
-rw-r--r--fs/afs/proc.c3
-rw-r--r--fs/afs/super.c2
-rw-r--r--fs/afs/vl_alias.c3
-rw-r--r--fs/bfs/inode.c19
-rw-r--r--fs/binfmt_misc.c4
-rw-r--r--fs/efivarfs/super.c1
-rw-r--r--fs/exfat/super.c5
-rw-r--r--fs/fat/inode.c6
-rw-r--r--fs/fuse/virtio_fs.c2
-rw-r--r--fs/hostfs/hostfs_kern.c29
-rw-r--r--fs/inode.c12
-rw-r--r--fs/isofs/inode.c5
-rw-r--r--fs/lockd/netlink.c1
-rw-r--r--fs/lockd/netlink.h1
-rw-r--r--fs/namespace.c46
-rw-r--r--fs/nfs/client.c8
-rw-r--r--fs/nfs/dir.c7
-rw-r--r--fs/nfs/inode.c18
-rw-r--r--fs/nfs/localio.c229
-rw-r--r--fs/nfs/nfs3client.c14
-rw-r--r--fs/nfs/nfs4client.c14
-rw-r--r--fs/nfs/nfs4proc.c9
-rw-r--r--fs/nfs/pnfs_nfs.c66
-rw-r--r--fs/nfs/sysfs.c1
-rw-r--r--fs/nfsd/netlink.c1
-rw-r--r--fs/nfsd/netlink.h1
-rw-r--r--fs/smb/client/cached_dir.c41
-rw-r--r--fs/smb/client/cifssmb.c22
-rw-r--r--fs/smb/client/connect.c1
-rw-r--r--fs/smb/client/fs_context.c8
-rw-r--r--fs/smb/client/smbdirect.c3
-rw-r--r--fs/smb/client/transport.c2
-rw-r--r--fs/super.c13
-rw-r--r--fs/xfs/scrub/symlink_repair.c2
-rw-r--r--fs/xfs/xfs_super.c5
-rw-r--r--include/acpi/processor.h34
-rw-r--r--include/drm/intel/pciids.h5
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/can/bittiming.h81
-rw-r--r--include/linux/can/dev.h68
-rw-r--r--include/linux/entry-virt.h2
-rw-r--r--include/linux/filter.h20
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/ftrace.h10
-rw-r--r--include/linux/highmem.h6
-rw-r--r--include/linux/mii_timestamper.h13
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mlx5/fs.h24
-rw-r--r--include/linux/mlx5/mlx5_ifc.h47
-rw-r--r--include/linux/mlx5/port.h1
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mm.h13
-rw-r--r--include/linux/pci-tph.h1
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pcs/pcs-xpcs.h4
-rw-r--r--include/linux/phy.h7
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/stmmac.h6
-rw-r--r--include/linux/virtio_net.h7
-rw-r--r--include/net/bluetooth/hci_core.h21
-rw-r--r--include/net/devlink.h45
-rw-r--r--include/net/dsa.h8
-rw-r--r--include/net/gro.h27
-rw-r--r--include/net/inet_connection_sock.h20
-rw-r--r--include/net/mana/gdma.h12
-rw-r--r--include/net/mana/mana.h20
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/net/sch_generic.h101
-rw-r--r--include/net/sctp/auth.h1
-rw-r--r--include/net/sock.h15
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/uapi/linux/android/binder_netlink.h1
-rw-r--r--include/uapi/linux/can/netlink.h34
-rw-r--r--include/uapi/linux/devlink.h3
-rw-r--r--include/uapi/linux/dpll.h1
-rw-r--r--include/uapi/linux/ethtool.h5
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h1
-rw-r--r--include/uapi/linux/fou.h1
-rw-r--r--include/uapi/linux/handshake.h1
-rw-r--r--include/uapi/linux/if_ether.h4
-rw-r--r--include/uapi/linux/if_team.h1
-rw-r--r--include/uapi/linux/input-event-codes.h2
-rw-r--r--include/uapi/linux/io_uring/query.h3
-rw-r--r--include/uapi/linux/isst_if.h50
-rw-r--r--include/uapi/linux/lockd_netlink.h1
-rw-r--r--include/uapi/linux/mdio.h23
-rw-r--r--include/uapi/linux/mount.h2
-rw-r--r--include/uapi/linux/mptcp_pm.h1
-rw-r--r--include/uapi/linux/net_shaper.h1
-rw-r--r--include/uapi/linux/netdev.h1
-rw-r--r--include/uapi/linux/nfsd_netlink.h1
-rw-r--r--include/uapi/linux/ovpn.h1
-rw-r--r--include/uapi/linux/psp.h1
-rw-r--r--include/uapi/linux/tee.h23
-rw-r--r--io_uring/cmd_net.c2
-rw-r--r--io_uring/query.c2
-rw-r--r--io_uring/rsrc.c16
-rw-r--r--io_uring/rw.c3
-rw-r--r--kernel/bpf/helpers.c26
-rw-r--r--kernel/bpf/stream.c3
-rw-r--r--kernel/bpf/trampoline.c5
-rw-r--r--kernel/bpf/verifier.c18
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/power/hibernate.c9
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/power/swap.c22
-rw-r--r--kernel/sched/ext.c31
-rw-r--r--kernel/time/posix-timers.c12
-rw-r--r--kernel/time/tick-sched.c11
-rw-r--r--kernel/time/timekeeping.c21
-rw-r--r--kernel/time/timer.c7
-rw-r--r--kernel/trace/ftrace.c60
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/crypto/tests/sha256_kunit.c1
-rw-r--r--lib/test_kho.c3
-rw-r--r--mm/Kconfig7
-rw-r--r--mm/filemap.c27
-rw-r--r--mm/huge_memory.c28
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/memfd.c27
-rw-r--r--mm/mempool.c32
-rw-r--r--mm/mmap_lock.c1
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/shmem.c15
-rw-r--r--mm/slub.c8
-rw-r--r--mm/swap_state.c13
-rw-r--r--mm/swapfile.c4
-rw-r--r--net/atm/common.c2
-rw-r--r--net/bluetooth/hci_core.c89
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/iso.c30
-rw-r--r--net/bluetooth/l2cap_core.c23
-rw-r--r--net/bluetooth/sco.c35
-rw-r--r--net/bluetooth/smp.c31
-rw-r--r--net/can/raw.c54
-rw-r--r--net/core/dev.c62
-rw-r--r--net/core/dev_ioctl.c12
-rw-r--r--net/core/devmem.c6
-rw-r--r--net/core/net_namespace.c11
-rw-r--r--net/core/netdev-genl-gen.c1
-rw-r--r--net/core/netdev-genl-gen.h1
-rw-r--r--net/core/skbuff.c70
-rw-r--r--net/core/sock.c22
-rw-r--r--net/devlink/netlink_gen.c6
-rw-r--r--net/devlink/netlink_gen.h1
-rw-r--r--net/devlink/param.c180
-rw-r--r--net/devlink/rate.c4
-rw-r--r--net/dsa/conduit.c145
-rw-r--r--net/dsa/devlink.c3
-rw-r--r--net/ethernet/eth.c16
-rw-r--r--net/ethtool/common.c8
-rw-r--r--net/handshake/genl.c1
-rw-r--r--net/handshake/genl.h1
-rw-r--r--net/ipv4/af_inet.c17
-rw-r--r--net/ipv4/esp4_offload.c6
-rw-r--r--net/ipv4/fou_nl.c1
-rw-r--r--net/ipv4/fou_nl.h1
-rw-r--r--net/ipv4/inet_connection_sock.c12
-rw-r--r--net/ipv4/inet_diag.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_offload.c27
-rw-r--r--net/ipv4/tcp_timer.c23
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/esp6_offload.c6
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/iucv/iucv.c3
-rw-r--r--net/kcm/Kconfig4
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c6
-rw-r--r--net/mctp/route.c1
-rw-r--r--net/mptcp/fastopen.c4
-rw-r--r--net/mptcp/mib.c1
-rw-r--r--net/mptcp/mib.h1
-rw-r--r--net/mptcp/mptcp_diag.c3
-rw-r--r--net/mptcp/mptcp_pm_gen.c1
-rw-r--r--net/mptcp/mptcp_pm_gen.h1
-rw-r--r--net/mptcp/options.c54
-rw-r--r--net/mptcp/pm.c24
-rw-r--r--net/mptcp/pm_kernel.c4
-rw-r--r--net/mptcp/protocol.c548
-rw-r--r--net/mptcp/protocol.h54
-rw-r--r--net/mptcp/subflow.c50
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_fo.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_mh.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ovf.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_twos.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c3
-rw-r--r--net/openvswitch/actions.c68
-rw-r--r--net/openvswitch/flow_netlink.c64
-rw-r--r--net/openvswitch/flow_netlink.h2
-rw-r--r--net/psp/psp-nl-gen.c1
-rw-r--r--net/psp/psp-nl-gen.h1
-rw-r--r--net/sched/act_bpf.c6
-rw-r--r--net/sched/act_ct.c8
-rw-r--r--net/sched/act_ife.c6
-rw-r--r--net/sched/cls_api.c6
-rw-r--r--net/sched/cls_bpf.c6
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/em_canid.c3
-rw-r--r--net/sched/em_cmp.c5
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/em_text.c11
-rw-r--r--net/sched/sch_cake.c19
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_dualpi2.c1
-rw-r--r--net/sched/sch_fq.c9
-rw-r--r--net/sched/sch_fq_codel.c5
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sched/sch_taprio.c1
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--net/shaper/shaper_nl_gen.c1
-rw-r--r--net/shaper/shaper_nl_gen.h1
-rw-r--r--net/smc/af_smc.c3
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/unix/af_unix.h4
-rw-r--r--net/unix/garbage.c92
-rw-r--r--net/vmw_vsock/af_vsock.c40
-rw-r--r--net/xdp/xsk.c143
-rw-r--r--net/xfrm/Kconfig11
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_input.c30
-rw-r--r--net/xfrm/xfrm_output.c8
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c8
-rw-r--r--samples/vfs/test-statx.c6
-rw-r--r--samples/watch_queue/watch_test.c6
-rw-r--r--scripts/gendwarfksyms/gendwarfksyms.c3
-rw-r--r--scripts/gendwarfksyms/gendwarfksyms.h2
-rw-r--r--scripts/gendwarfksyms/symbols.c4
-rw-r--r--security/landlock/fs.c7
-rw-r--r--security/selinux/hooks.c251
-rw-r--r--security/selinux/include/objsec.h22
-rw-r--r--sound/hda/codecs/cirrus/cs420x.c1
-rw-r--r--sound/hda/codecs/hdmi/nvhdmi-mcp.c4
-rw-r--r--sound/hda/codecs/realtek/alc269.c11
-rw-r--r--sound/pci/au88x0/au88x0.c8
-rw-r--r--sound/soc/codecs/cs4271.c10
-rw-r--r--sound/soc/codecs/da7213.c69
-rw-r--r--sound/soc/codecs/da7213.h1
-rw-r--r--sound/soc/codecs/lpass-va-macro.c2
-rw-r--r--sound/soc/codecs/tas2781-i2c.c9
-rw-r--r--sound/soc/codecs/tas2783-sdw.c20
-rw-r--r--sound/soc/renesas/rcar/ssiu.c3
-rw-r--r--sound/soc/sdca/sdca_functions.c3
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c20
-rw-r--r--sound/usb/endpoint.c5
-rw-r--r--sound/usb/mixer.c4
-rw-r--r--sound/usb/quirks.c11
-rw-r--r--tools/arch/riscv/include/asm/csr.h5
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst2
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/include/uapi/linux/netdev.h1
-rw-r--r--tools/lib/bpf/bpf_helpers.h28
-rw-r--r--tools/net/ynl/Makefile8
-rwxr-xr-xtools/net/ynl/pyynl/cli.py79
-rw-r--r--tools/net/ynl/pyynl/lib/ynl.py9
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py26
-rw-r--r--tools/net/ynl/samples/.gitignore1
-rw-r--r--tools/net/ynl/samples/Makefile1
-rw-r--r--tools/net/ynl/samples/tc-filter-add.c335
-rw-r--r--tools/net/ynl/tests/Makefile32
-rw-r--r--tools/net/ynl/tests/config6
-rwxr-xr-xtools/net/ynl/tests/test_ynl_cli.sh327
-rwxr-xr-xtools/net/ynl/tests/test_ynl_ethtool.sh222
-rw-r--r--tools/net/ynl/ynltool/.gitignore1
-rw-r--r--tools/net/ynl/ynltool/Makefile2
-rw-r--r--tools/perf/Makefile.config5
-rw-r--r--tools/perf/builtin-lock.c2
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh14
-rw-r--r--tools/perf/util/header.c10
-rw-r--r--tools/perf/util/libbfd.c38
-rw-r--r--tools/perf/util/mutex.c14
-rw-r--r--tools/perf/util/mutex.h2
-rw-r--r--tools/testing/selftests/bpf/config3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c140
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c150
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c8
-rw-r--r--tools/testing/selftests/bpf/progs/iters_looping.c53
-rw-r--r--tools/testing/selftests/bpf/progs/livepatch_trampoline.c30
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sockmap.c43
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_ips.c49
-rw-r--r--tools/testing/selftests/bpf/progs/stream_fail.c6
-rw-r--r--tools/testing/selftests/bpf/progs/task_work.c6
-rw-r--r--tools/testing/selftests/bpf/progs/task_work_fail.c8
-rw-r--r--tools/testing/selftests/bpf/progs/task_work_stress.c4
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c26
-rw-r--r--tools/testing/selftests/drivers/net/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/net/Makefile2
-rw-r--r--tools/testing/selftests/drivers/net/gro.c (renamed from tools/testing/selftests/net/gro.c)5
-rwxr-xr-xtools/testing/selftests/drivers/net/gro.py164
-rw-r--r--tools/testing/selftests/drivers/net/hw/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile25
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py4
-rw-r--r--tools/testing/selftests/drivers/net/hw/toeplitz.c (renamed from tools/testing/selftests/net/toeplitz.c)72
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/toeplitz.py211
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py4
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py2
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_overflow.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh116
-rwxr-xr-xtools/testing/selftests/drivers/net/xdp.py57
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc4
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c15
-rw-r--r--tools/testing/selftests/net/.gitignore8
-rw-r--r--tools/testing/selftests/net/Makefile7
-rw-r--r--tools/testing/selftests/net/af_unix/.gitignore8
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile2
-rw-r--r--tools/testing/selftests/net/af_unix/so_peek_off.c162
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connreset.c180
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh66
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib_sh_test.sh7
-rwxr-xr-xtools/testing/selftests/net/gro.sh105
-rw-r--r--tools/testing/selftests/net/lib.sh2
-rw-r--r--tools/testing/selftests/net/lib/Makefile1
-rwxr-xr-xtools/testing/selftests/net/lib/ksft_setup_loopback.sh111
-rw-r--r--tools/testing/selftests/net/lib/py/__init__.py5
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py89
-rw-r--r--tools/testing/selftests/net/lib/py/nsim.py2
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py20
-rw-r--r--tools/testing/selftests/net/lib/xdp_native.bpf.c5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh146
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh92
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh58
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh45
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh46
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt4
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt2
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh20
-rw-r--r--tools/testing/selftests/net/setup_loopback.sh120
-rw-r--r--tools/testing/selftests/net/setup_veth.sh45
-rw-r--r--tools/testing/selftests/net/so_txtime.c2
-rwxr-xr-xtools/testing/selftests/net/toeplitz.sh199
-rwxr-xr-xtools/testing/selftests/net/toeplitz_client.sh28
-rw-r--r--tools/testing/selftests/net/txtimestamp.c2
-rw-r--r--tools/testing/selftests/user_events/perf_test.c2
-rw-r--r--tools/testing/selftests/vfio/lib/include/vfio_util.h19
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_device.c246
-rw-r--r--tools/testing/selftests/vfio/vfio_dma_mapping_test.c20
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_driver_test.c12
-rw-r--r--tools/testing/vsock/vsock_test.c7
938 files changed, 17130 insertions, 7707 deletions
diff --git a/.mailmap b/.mailmap
index c8f5f36993ae..fffbfd413474 100644
--- a/.mailmap
+++ b/.mailmap
@@ -206,6 +206,7 @@ Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>
+David Hildenbrand <david@kernel.org> <david@redhat.com>
David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com>
David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com>
David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
@@ -426,7 +427,7 @@ Kenneth W Chen <kenneth.w.chen@intel.com>
Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org>
Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org>
Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
-Kirill A. Shutemov <kas@kernel.org> <kirill.shutemov@linux.intel.com>
+Kiryl Shutsemau <kas@kernel.org> <kirill.shutemov@linux.intel.com>
Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com>
Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@linaro.org>
Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@somainline.org>
@@ -437,6 +438,7 @@ Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
+Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@linaro.org>
Krzysztof Wilczyński <kwilczynski@kernel.org> <krzysztof.wilczynski@linux.com>
Krzysztof Wilczyński <kwilczynski@kernel.org> <kw@linux.com>
Kshitiz Godara <quic_kgodara@quicinc.com> <kgodara@codeaurora.org>
@@ -689,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
Sachin P Sant <ssant@in.ibm.com>
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
+Sam Protsenko <semen.protsenko@linaro.org>
+Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com>
Sam Ravnborg <sam@mars.ravnborg.org>
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
diff --git a/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml b/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml
index d6ef468495c5..a105dc07ed12 100644
--- a/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/aspeed,ast2600-mdio.yaml
@@ -19,7 +19,12 @@ allOf:
properties:
compatible:
- const: aspeed,ast2600-mdio
+ oneOf:
+ - const: aspeed,ast2600-mdio
+ - items:
+ - enum:
+ - aspeed,ast2700-mdio
+ - const: aspeed,ast2600-mdio
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml b/Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml
index 1219c5cb601f..519a11fbe972 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml
+++ b/Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml
@@ -32,11 +32,15 @@ properties:
- description: AHB peripheral clock
- description: CAN bus clock
+ resets:
+ maxItems: 1
+
required:
- compatible
- reg
- interrupts
- clocks
+ - resets
additionalProperties: false
@@ -46,6 +50,7 @@ examples:
compatible = "microchip,mpfs-can";
reg = <0x2010c000 0x1000>;
clocks = <&clkcfg 17>, <&clkcfg 37>;
+ resets = <&clkcfg 17>;
interrupt-parent = <&plic>;
interrupts = <56>;
};
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index ea8337846ab2..cb14c35ba996 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -38,7 +38,10 @@ properties:
- cdns,sam9x60-macb # Microchip sam9x60 SoC
- microchip,mpfs-macb # Microchip PolarFire SoC
- const: cdns,macb # Generic
-
+ - items:
+ - const: microchip,pic64gx-macb # Microchip PIC64GX SoC
+ - const: microchip,mpfs-macb # Microchip PolarFire SoC
+ - const: cdns,macb # Generic
- items:
- enum:
- atmel,sama5d3-macb # 10/100Mbit IP on Atmel sama5d3 SoCs
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index b45f67f92e80..cc346946291a 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -112,7 +112,7 @@ properties:
mediatek,wed:
$ref: /schemas/types.yaml#/definitions/phandle-array
- minItems: 2
+ minItems: 1
maxItems: 2
items:
maxItems: 1
@@ -249,6 +249,9 @@ allOf:
minItems: 1
maxItems: 1
+ mediatek,wed:
+ minItems: 2
+
mediatek,wed-pcie: false
else:
properties:
@@ -338,12 +341,13 @@ allOf:
- const: netsys0
- const: netsys1
- mediatek,infracfg: false
-
mediatek,sgmiisys:
minItems: 2
maxItems: 2
+ mediatek,wed:
+ maxItems: 1
+
- if:
properties:
compatible:
@@ -385,6 +389,9 @@ allOf:
minItems: 2
maxItems: 2
+ mediatek,wed:
+ minItems: 2
+
- if:
properties:
compatible:
@@ -429,6 +436,19 @@ allOf:
- const: xgp2
- const: xgp3
+ mediatek,wed:
+ minItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ralink,rt5350-eth
+ then:
+ properties:
+ mediatek,wed:
+ minItems: 2
+
patternProperties:
"^mac@[0-2]$":
type: object
diff --git a/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
index ce21979a2d9a..e8d3814db0e9 100644
--- a/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
@@ -70,6 +70,25 @@ required:
allOf:
- $ref: snps,dwmac.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: sophgo,sg2042-dwmac
+ then:
+ properties:
+ phy-mode:
+ enum:
+ - rgmii-rxid
+ - rgmii-id
+ else:
+ properties:
+ phy-mode:
+ enum:
+ - rgmii
+ - rgmii-rxid
+ - rgmii-txid
+ - rgmii-id
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
index 19d47fd414bc..ce04d2eadec9 100644
--- a/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
@@ -50,18 +50,20 @@ patternProperties:
groups:
description:
Name of the pin group to use for the functions.
- $ref: /schemas/types.yaml#/definitions/string
- enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
- i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
- spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
- spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp,
- uart0_grp, uart1_grp, uart2_grp, uart3_grp,
- pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp,
- pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp,
- pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp,
- pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
- pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
- pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
+ items:
+ enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
+ i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
+ spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
+ spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp,
+ uart0_grp, uart1_grp, uart2_grp, uart3_grp,
+ pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp,
+ pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp,
+ pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp,
+ pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
+ pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
+ pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
+ minItems: 1
+ maxItems: 8
drive-strength:
enum: [2, 4, 6, 8, 16, 24, 32]
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,versal-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/xlnx,versal-pinctrl.yaml
index 55ece6a8be5e..81e2164ea98f 100644
--- a/Documentation/devicetree/bindings/pinctrl/xlnx,versal-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,versal-pinctrl.yaml
@@ -74,6 +74,7 @@ patternProperties:
'^conf':
type: object
+ unevaluatedProperties: false
description:
Pinctrl node's client devices use subnodes for pin configurations,
which in turn use the standard properties below.
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index 1ead9bb8d9c6..4424cbff251f 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -400,19 +400,30 @@ can report through the rotational axes (absolute and/or relative rx, ry, rz).
All other axes retain their meaning. A device must not mix
regular directional axes and accelerometer axes on the same event node.
-INPUT_PROP_HAPTIC_TOUCHPAD
---------------------------
+INPUT_PROP_PRESSUREPAD
+----------------------
+
+The INPUT_PROP_PRESSUREPAD property indicates that the device provides
+simulated haptic feedback (e.g. a vibrator motor situated below the surface)
+instead of physical haptic feedback (e.g. a hinge). This property is only set
+if the device:
-The INPUT_PROP_HAPTIC_TOUCHPAD property indicates that device:
-- supports simple haptic auto and manual triggering
- can differentiate between at least 5 fingers
- uses correct resolution for the X/Y (units and value)
-- reports correct force per touch, and correct units for them (newtons or grams)
- follows the MT protocol type B
+If the simulated haptic feedback is controllable by userspace the device must:
+
+- support simple haptic auto and manual triggering, and
+- report correct force per touch, and correct units for them (newtons or grams), and
+- provide the EV_FF FF_HAPTIC force feedback effect.
+
Summing up, such devices follow the MS spec for input devices in
-Win8 and Win8.1, and in addition support the Simple haptic controller HID table,
-and report correct units for the pressure.
+Win8 and Win8.1, and in addition may support the Simple haptic controller HID
+table, and report correct units for the pressure.
+
+Where applicable, this property is set in addition to INPUT_PROP_BUTTONPAD, it
+does not replace that property.
Guidelines
==========
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index 5a234e9b5fa2..57f59fe23e3f 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -227,7 +227,7 @@ properties:
Optional format indicator that is intended only for choosing
the right formatting mechanism when displaying values of this
type.
- enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ enum: [ hex, mac, fddi, ipv4, ipv6, ipv4-or-v6, uuid ]
# Start genetlink-c
name-prefix:
type: string
diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml
index 7b1ec153e834..b020a537d8ac 100644
--- a/Documentation/netlink/genetlink.yaml
+++ b/Documentation/netlink/genetlink.yaml
@@ -185,7 +185,7 @@ properties:
Optional format indicator that is intended only for choosing
the right formatting mechanism when displaying values of this
type.
- enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ enum: [ hex, mac, fddi, ipv4, ipv6, ipv4-or-v6, uuid ]
# Make sure name-prefix does not appear in subsets (subsets inherit naming)
dependencies:
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
index 246fa07bccf6..0166a7e4afbb 100644
--- a/Documentation/netlink/netlink-raw.yaml
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -157,7 +157,7 @@ properties:
Optional format indicator that is intended only for choosing
the right formatting mechanism when displaying values of this
type.
- enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ enum: [ hex, mac, fddi, ipv4, ipv6, ipv4-or-v6, uuid ]
struct:
description: Name of the nested struct type.
type: string
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index 426d5aa7d955..837112da6738 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -859,6 +859,14 @@ attribute-sets:
name: health-reporter-burst-period
type: u64
doc: Time (in msec) for recoveries before starting the grace period.
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: param-reset-default
+ type: flag
+ doc: Request restoring parameter to its default value.
+ value: 183
-
name: dl-dev-stats
subset-of: devlink
@@ -1793,6 +1801,7 @@ operations:
- param-type
# param-value-data is missing here as the type is variable
- param-value-cmode
+ - param-reset-default
-
name: region-get
diff --git a/Documentation/netlink/specs/rt-addr.yaml b/Documentation/netlink/specs/rt-addr.yaml
index 3a582eac1629..163a106c41bb 100644
--- a/Documentation/netlink/specs/rt-addr.yaml
+++ b/Documentation/netlink/specs/rt-addr.yaml
@@ -86,17 +86,18 @@ attribute-sets:
-
name: address
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: local
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: label
type: string
-
name: broadcast
- type: binary
+ type: u32
+ byte-order: big-endian
display-hint: ipv4
-
name: anycast
diff --git a/Documentation/netlink/specs/rt-link.yaml b/Documentation/netlink/specs/rt-link.yaml
index 2a23e9699c0b..6beeb6ee5adf 100644
--- a/Documentation/netlink/specs/rt-link.yaml
+++ b/Documentation/netlink/specs/rt-link.yaml
@@ -1707,11 +1707,11 @@ attribute-sets:
-
name: local
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: remote
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: ttl
type: u8
@@ -1833,11 +1833,11 @@ attribute-sets:
-
name: local
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: remote
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: fwmark
type: u32
@@ -1868,7 +1868,8 @@ attribute-sets:
type: u32
-
name: remote
- type: binary
+ type: u32
+ byte-order: big-endian
display-hint: ipv4
-
name: ttl
@@ -1914,6 +1915,35 @@ attribute-sets:
type: binary
struct: ifla-geneve-port-range
-
+ name: linkinfo-hsr-attrs
+ name-prefix: ifla-hsr-
+ attributes:
+ -
+ name: slave1
+ type: u32
+ -
+ name: slave2
+ type: u32
+ -
+ name: multicast-spec
+ type: u8
+ -
+ name: supervision-addr
+ type: binary
+ display-hint: mac
+ -
+ name: seq-nr
+ type: u16
+ -
+ name: version
+ type: u8
+ -
+ name: protocol
+ type: u8
+ -
+ name: interlink
+ type: u32
+ -
name: linkinfo-iptun-attrs
name-prefix: ifla-iptun-
attributes:
@@ -1923,11 +1953,11 @@ attribute-sets:
-
name: local
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: remote
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: ttl
type: u8
@@ -1957,7 +1987,8 @@ attribute-sets:
display-hint: ipv6
-
name: 6rd-relay-prefix
- type: binary
+ type: u32
+ byte-order: big-endian
display-hint: ipv4
-
name: 6rd-prefixlen
@@ -2300,6 +2331,9 @@ sub-messages:
value: geneve
attribute-set: linkinfo-geneve-attrs
-
+ value: hsr
+ attribute-set: linkinfo-hsr-attrs
+ -
value: ipip
attribute-set: linkinfo-iptun-attrs
-
diff --git a/Documentation/netlink/specs/rt-neigh.yaml b/Documentation/netlink/specs/rt-neigh.yaml
index 2f568a6231c9..0f46ef313590 100644
--- a/Documentation/netlink/specs/rt-neigh.yaml
+++ b/Documentation/netlink/specs/rt-neigh.yaml
@@ -194,7 +194,7 @@ attribute-sets:
-
name: dst
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: lladdr
type: binary
diff --git a/Documentation/netlink/specs/rt-route.yaml b/Documentation/netlink/specs/rt-route.yaml
index 1ecb3fadc067..33195db96746 100644
--- a/Documentation/netlink/specs/rt-route.yaml
+++ b/Documentation/netlink/specs/rt-route.yaml
@@ -87,11 +87,11 @@ attribute-sets:
-
name: dst
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: src
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: iif
type: u32
@@ -101,14 +101,14 @@ attribute-sets:
-
name: gateway
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: priority
type: u32
-
name: prefsrc
type: binary
- display-hint: ipv4
+ display-hint: ipv4-or-v6
-
name: metrics
type: nest
diff --git a/Documentation/netlink/specs/rt-rule.yaml b/Documentation/netlink/specs/rt-rule.yaml
index bebee452a950..7f03a44ab036 100644
--- a/Documentation/netlink/specs/rt-rule.yaml
+++ b/Documentation/netlink/specs/rt-rule.yaml
@@ -96,10 +96,12 @@ attribute-sets:
attributes:
-
name: dst
- type: u32
+ type: binary
+ display-hint: ipv4-or-v6
-
name: src
- type: u32
+ type: binary
+ display-hint: ipv4-or-v6
-
name: iifname
type: string
diff --git a/Documentation/networking/devlink/devlink-params.rst b/Documentation/networking/devlink/devlink-params.rst
index c0597d456641..ea17756dcda6 100644
--- a/Documentation/networking/devlink/devlink-params.rst
+++ b/Documentation/networking/devlink/devlink-params.rst
@@ -41,6 +41,16 @@ In order for ``driverinit`` parameters to take effect, the driver must
support reloading via the ``devlink-reload`` command. This command will
request a reload of the device driver.
+Default parameter values
+=========================
+
+Drivers may optionally export default values for parameters of cmode
+``runtime`` and ``permanent``. For ``driverinit`` parameters, the last
+value set by the driver will be used as the default value. Drivers can
+also support resetting params with cmode ``runtime`` and ``permanent``
+to their default values. Resetting ``driverinit`` params is supported
+by devlink core without additional driver support needed.
+
.. _devlink_params_generic:
Generic configuration parameters
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 0e5f9c76e514..4bba4d780a4a 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -218,6 +218,20 @@ parameters.
* ``balanced`` : Merges fewer CQEs, resulting in a moderate compression ratio but maintaining a balance between bandwidth savings and performance
* ``aggressive`` : Merges more CQEs into a single entry, achieving a higher compression rate and maximizing performance, particularly under high traffic loads
+ * - ``swp_l4_csum_mode``
+ - string
+ - permanent
+ - Configure how the L4 checksum is calculated by the device when using
+ Software Parser (SWP) hints for header locations.
+
+ * ``default`` : Use the device's default checksum calculation
+ mode. The driver will discover during init whether or
+ full_csum or l4_only is in use. Setting this value explicitly
+ from userspace is not allowed, but some firmware versions may
+ return this value on param read.
+ * ``full_csum`` : Calculate full checksum including the pseudo-header
+ * ``l4_only`` : Calculate L4-only checksum, excluding the pseudo-header
+
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``
Info versions
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index c775cababc8c..75db2251649b 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -131,10 +131,7 @@ Contents:
vxlan
x25
x25-iface
- xfrm_device
- xfrm_proc
- xfrm_sync
- xfrm_sysctl
+ xfrm/index
xdp-rx-metadata
xsk-tx-metadata
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 2bae61be1859..bc9a01606daf 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -673,6 +673,16 @@ tcp_moderate_rcvbuf - BOOLEAN
Default: 1 (enabled)
+tcp_rcvbuf_low_rtt - INTEGER
+ rcvbuf autotuning can over estimate final socket rcvbuf, which
+ can lead to cache trashing for high throughput flows.
+
+ For small RTT flows (below tcp_rcvbuf_low_rtt usecs), we can relax
+ rcvbuf growth: Few additional ms to reach the final (and smaller)
+ rcvbuf is a good tradeoff.
+
+ Default : 1000 (1 ms)
+
tcp_mtu_probing - INTEGER
Controls TCP Packetization-Layer Path MTU Discovery. Takes three
values:
@@ -875,8 +885,9 @@ tcp_comp_sack_slack_ns - LONG INTEGER
timer used by SACK compression. This gives extra time
for small RTT flows, and reduces system overhead by allowing
opportunistic reduction of timer interrupts.
+ Too big values might reduce goodput.
- Default : 100,000 ns (100 us)
+ Default : 10,000 ns (10 us)
tcp_comp_sack_nr - INTEGER
Max number of SACK that can be compressed.
diff --git a/Documentation/networking/net_cachelines/inet_connection_sock.rst b/Documentation/networking/net_cachelines/inet_connection_sock.rst
index 8fae85ebb773..cc2000f55c29 100644
--- a/Documentation/networking/net_cachelines/inet_connection_sock.rst
+++ b/Documentation/networking/net_cachelines/inet_connection_sock.rst
@@ -12,8 +12,8 @@ struct inet_sock icsk_inet read_mostly r
struct request_sock_queue icsk_accept_queue
struct inet_bind_bucket icsk_bind_hash read_mostly tcp_set_state
struct inet_bind2_bucket icsk_bind2_hash read_mostly tcp_set_state,inet_put_port
-struct timer_list icsk_retransmit_timer read_write inet_csk_reset_xmit_timer,tcp_connect
struct timer_list icsk_delack_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect
+struct timer_list icsk_keepalive_timer
u32 icsk_rto read_write tcp_cwnd_validate,tcp_schedule_loss_probe,tcp_connect_init,tcp_connect,tcp_write_xmit,tcp_push_one
u32 icsk_rto_min
u32 icsk_rto_max read_mostly tcp_reset_xmit_timer
diff --git a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst
index 6e7b20afd2d4..beaf1880a19b 100644
--- a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst
+++ b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst
@@ -102,7 +102,8 @@ u8 sysctl_tcp_app_win
u8 sysctl_tcp_frto tcp_enter_loss
u8 sysctl_tcp_nometrics_save TCP_LAST_ACK/tcp_update_metrics
u8 sysctl_tcp_no_ssthresh_metrics_save TCP_LAST_ACK/tcp_(update/init)_metrics
-u8 sysctl_tcp_moderate_rcvbuf read_mostly read_mostly tcp_tso_should_defer(tx);tcp_rcv_space_adjust(rx)
+u8 sysctl_tcp_moderate_rcvbuf read_mostly tcp_rcvbuf_grow()
+u32 sysctl_tcp_rcvbuf_low_rtt read_mostly tcp_rcvbuf_grow()
u8 sysctl_tcp_tso_win_divisor read_mostly tcp_tso_should_defer(tcp_write_xmit)
u8 sysctl_tcp_workaround_signed_windows tcp_select_window
int sysctl_tcp_limit_output_bytes read_mostly tcp_small_queue_check(tcp_write_xmit)
diff --git a/Documentation/networking/xfrm/index.rst b/Documentation/networking/xfrm/index.rst
new file mode 100644
index 000000000000..7d866da836fe
--- /dev/null
+++ b/Documentation/networking/xfrm/index.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+XFRM Framework
+==============
+
+.. toctree::
+ :maxdepth: 2
+
+ xfrm_device
+ xfrm_proc
+ xfrm_sync
+ xfrm_sysctl
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm/xfrm_device.rst
index 122204da0fff..b0d85a5f57d1 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm/xfrm_device.rst
@@ -20,11 +20,15 @@ can radically increase throughput and decrease CPU utilization. The XFRM
Device interface allows NIC drivers to offer to the stack access to the
hardware offload.
-Right now, there are two types of hardware offload that kernel supports.
+Right now, there are two types of hardware offload that kernel supports:
+
* IPsec crypto offload:
+
* NIC performs encrypt/decrypt
* Kernel does everything else
+
* IPsec packet offload:
+
* NIC performs encrypt/decrypt
* NIC does encapsulation
* Kernel and NIC have SA and policy in-sync
@@ -34,7 +38,7 @@ Right now, there are two types of hardware offload that kernel supports.
Userland access to the offload is typically through a system such as
libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can
be handy when experimenting. An example command might look something
-like this for crypto offload:
+like this for crypto offload::
ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
reqid 0x07 replay-window 32 \
@@ -42,7 +46,7 @@ like this for crypto offload:
sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
offload dev eth4 dir in
-and for packet offload
+and for packet offload::
ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
reqid 0x07 replay-window 32 \
@@ -153,26 +157,26 @@ the packet's skb. At this point the data should be decrypted but the
IPsec headers are still in the packet data; they are removed later up
the stack in xfrm_input().
- find and hold the SA that was used to the Rx skb::
+1. Find and hold the SA that was used to the Rx skb::
- get spi, protocol, and destination IP from packet headers
+ /* get spi, protocol, and destination IP from packet headers */
xs = find xs from (spi, protocol, dest_IP)
xfrm_state_hold(xs);
- store the state information into the skb::
+2. Store the state information into the skb::
sp = secpath_set(skb);
if (!sp) return;
sp->xvec[sp->len++] = xs;
sp->olen++;
- indicate the success and/or error status of the offload::
+3. Indicate the success and/or error status of the offload::
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
xo->status = crypto_status;
- hand the packet to napi_gro_receive() as usual
+4. Hand the packet to napi_gro_receive() as usual.
In ESN mode, xdo_dev_state_advance_esn() is called from
xfrm_replay_advance_esn() for RX, and xfrm_replay_overflow_offload_esn for TX.
diff --git a/Documentation/networking/xfrm_proc.rst b/Documentation/networking/xfrm/xfrm_proc.rst
index 973d1571acac..973d1571acac 100644
--- a/Documentation/networking/xfrm_proc.rst
+++ b/Documentation/networking/xfrm/xfrm_proc.rst
diff --git a/Documentation/networking/xfrm_sync.rst b/Documentation/networking/xfrm/xfrm_sync.rst
index 6246503ceab2..dfc2ec0df380 100644
--- a/Documentation/networking/xfrm_sync.rst
+++ b/Documentation/networking/xfrm/xfrm_sync.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-====
-XFRM
-====
+=========
+XFRM sync
+=========
The sync patches work is based on initial patches from
Krisztian <hidden@balabit.hu> and others and additional patches
@@ -36,7 +36,7 @@ is not driven by packet arrival.
- the replay sequence for both inbound and outbound
1) Message Structure
-----------------------
+--------------------
nlmsghdr:aevent_id:optional-TLVs.
@@ -83,31 +83,31 @@ when going from kernel to user space)
A program needs to subscribe to multicast group XFRMNLGRP_AEVENTS
to get notified of these events.
-2) TLVS reflect the different parameters:
------------------------------------------
+2) TLVS reflect the different parameters
+----------------------------------------
a) byte value (XFRMA_LTIME_VAL)
-This TLV carries the running/current counter for byte lifetime since
-last event.
+ This TLV carries the running/current counter for byte lifetime since
+ last event.
-b)replay value (XFRMA_REPLAY_VAL)
+b) replay value (XFRMA_REPLAY_VAL)
-This TLV carries the running/current counter for replay sequence since
-last event.
+ This TLV carries the running/current counter for replay sequence since
+ last event.
-c)replay threshold (XFRMA_REPLAY_THRESH)
+c) replay threshold (XFRMA_REPLAY_THRESH)
-This TLV carries the threshold being used by the kernel to trigger events
-when the replay sequence is exceeded.
+ This TLV carries the threshold being used by the kernel to trigger events
+ when the replay sequence is exceeded.
d) expiry timer (XFRMA_ETIMER_THRESH)
-This is a timer value in milliseconds which is used as the nagle
-value to rate limit the events.
+ This is a timer value in milliseconds which is used as the nagle
+ value to rate limit the events.
-3) Default configurations for the parameters:
----------------------------------------------
+3) Default configurations for the parameters
+--------------------------------------------
By default these events should be turned off unless there is
at least one listener registered to listen to the multicast
@@ -121,12 +121,14 @@ in case they are not specified.
the two sysctls/proc entries are:
a) /proc/sys/net/core/sysctl_xfrm_aevent_etime
-used to provide default values for the XFRMA_ETIMER_THRESH in incremental
-units of time of 100ms. The default is 10 (1 second)
+
+ Used to provide default values for the XFRMA_ETIMER_THRESH in incremental
+ units of time of 100ms. The default is 10 (1 second)
b) /proc/sys/net/core/sysctl_xfrm_aevent_rseqth
-used to provide default values for XFRMA_REPLAY_THRESH parameter
-in incremental packet count. The default is two packets.
+
+ Used to provide default values for XFRMA_REPLAY_THRESH parameter
+ in incremental packet count. The default is two packets.
4) Message types
----------------
@@ -134,50 +136,51 @@ in incremental packet count. The default is two packets.
a) XFRM_MSG_GETAE issued by user-->kernel.
XFRM_MSG_GETAE does not carry any TLVs.
-The response is a XFRM_MSG_NEWAE which is formatted based on what
-XFRM_MSG_GETAE queried for.
+ The response is a XFRM_MSG_NEWAE which is formatted based on what
+ XFRM_MSG_GETAE queried for.
+
+ The response will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
-The response will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
-* if XFRM_AE_RTHR flag is set, then XFRMA_REPLAY_THRESH is also retrieved
-* if XFRM_AE_ETHR flag is set, then XFRMA_ETIMER_THRESH is also retrieved
+ * if XFRM_AE_RTHR flag is set, then XFRMA_REPLAY_THRESH is also retrieved
+ * if XFRM_AE_ETHR flag is set, then XFRMA_ETIMER_THRESH is also retrieved
b) XFRM_MSG_NEWAE is issued by either user space to configure
or kernel to announce events or respond to a XFRM_MSG_GETAE.
-i) user --> kernel to configure a specific SA.
+ i) user --> kernel to configure a specific SA.
-any of the values or threshold parameters can be updated by passing the
-appropriate TLV.
+ any of the values or threshold parameters can be updated by passing the
+ appropriate TLV.
-A response is issued back to the sender in user space to indicate success
-or failure.
+ A response is issued back to the sender in user space to indicate success
+ or failure.
-In the case of success, additionally an event with
-XFRM_MSG_NEWAE is also issued to any listeners as described in iii).
+ In the case of success, additionally an event with
+ XFRM_MSG_NEWAE is also issued to any listeners as described in iii).
-ii) kernel->user direction as a response to XFRM_MSG_GETAE
+ ii) kernel->user direction as a response to XFRM_MSG_GETAE
-The response will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
+ The response will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
-The threshold TLVs will be included if explicitly requested in
-the XFRM_MSG_GETAE message.
+ The threshold TLVs will be included if explicitly requested in
+ the XFRM_MSG_GETAE message.
-iii) kernel->user to report as event if someone sets any values or
- thresholds for an SA using XFRM_MSG_NEWAE (as described in #i above).
- In such a case XFRM_AE_CU flag is set to inform the user that
- the change happened as a result of an update.
- The message will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
+ iii) kernel->user to report as event if someone sets any values or
+ thresholds for an SA using XFRM_MSG_NEWAE (as described in #i above).
+ In such a case XFRM_AE_CU flag is set to inform the user that
+ the change happened as a result of an update.
+ The message will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
-iv) kernel->user to report event when replay threshold or a timeout
- is exceeded.
+ iv) kernel->user to report event when replay threshold or a timeout
+ is exceeded.
In such a case either XFRM_AE_CR (replay exceeded) or XFRM_AE_CE (timeout
happened) is set to inform the user what happened.
Note the two flags are mutually exclusive.
The message will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
-Exceptions to threshold settings
---------------------------------
+5) Exceptions to threshold settings
+-----------------------------------
If you have an SA that is getting hit by traffic in bursts such that
there is a period where the timer threshold expires with no packets
diff --git a/Documentation/networking/xfrm_sysctl.rst b/Documentation/networking/xfrm/xfrm_sysctl.rst
index 47b9bbdd0179..7d0c4b17c0bd 100644
--- a/Documentation/networking/xfrm_sysctl.rst
+++ b/Documentation/networking/xfrm/xfrm_sysctl.rst
@@ -4,8 +4,8 @@
XFRM Syscall
============
-/proc/sys/net/core/xfrm_* Variables:
-====================================
+/proc/sys/net/core/xfrm_* Variables
+===================================
xfrm_acq_expires - INTEGER
default 30 - hard timeout in seconds for acquire requests
diff --git a/Documentation/sound/codecs/cs35l56.rst b/Documentation/sound/codecs/cs35l56.rst
index 57d1964453e1..d5363b08f515 100644
--- a/Documentation/sound/codecs/cs35l56.rst
+++ b/Documentation/sound/codecs/cs35l56.rst
@@ -105,10 +105,10 @@ In this example the SSID is 10280c63.
The format of the firmware file names is:
-SoundWire (except CS35L56 Rev B0):
+SoundWire:
cs35lxx-b0-dsp1-misc-SSID[-spkidX]-l?u?
-SoundWire CS35L56 Rev B0:
+SoundWire CS35L56 Rev B0 firmware released before kernel version 6.16:
cs35lxx-b0-dsp1-misc-SSID[-spkidX]-ampN
Non-SoundWire (HDA and I2S):
@@ -127,9 +127,8 @@ Where:
* spkidX is an optional part, used for laptops that have firmware
configurations for different makes and models of internal speakers.
-The CS35L56 Rev B0 continues to use the old filename scheme because a
-large number of firmware files have already been published with these
-names.
+Early firmware for CS35L56 Rev B0 used the ALSA prefix (ampN) as the
+filename qualifier. Support for the l?u? qualifier was added in kernel 6.16.
Sound Open Firmware and ALSA topology files
-------------------------------------------
diff --git a/Documentation/wmi/driver-development-guide.rst b/Documentation/wmi/driver-development-guide.rst
index 99ef21fc1c1e..5680303ae314 100644
--- a/Documentation/wmi/driver-development-guide.rst
+++ b/Documentation/wmi/driver-development-guide.rst
@@ -54,6 +54,7 @@ to matching WMI devices using a struct wmi_device_id table:
::
static const struct wmi_device_id foo_id_table[] = {
+ /* Only use uppercase letters! */
{ "936DA01F-9ABD-4D9D-80C7-02AF85C822A8", NULL },
{ }
};
diff --git a/MAINTAINERS b/MAINTAINERS
index 37f4278db851..09932ab7e0e8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3927,7 +3927,7 @@ F: crypto/async_tx/
F: include/linux/async_tx.h
AT24 EEPROM DRIVER
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-i2c@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -4400,7 +4400,7 @@ BLOCK LAYER
M: Jens Axboe <axboe@kernel.dk>
L: linux-block@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
F: Documentation/ABI/stable/sysfs-block
F: Documentation/block/
F: block/
@@ -9266,7 +9266,6 @@ M: Ido Schimmel <idosch@nvidia.com>
L: bridge@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
-W: http://www.linuxfoundation.org/en/Net:Bridge
F: include/linux/if_bridge.h
F: include/uapi/linux/if_bridge.h
F: include/linux/netfilter_bridge/
@@ -10679,7 +10678,7 @@ F: tools/gpio/gpio-sloppy-logic-analyzer.sh
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-gpio@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -10696,7 +10695,7 @@ K: GPIOD_FLAGS_BIT_NONEXCLUSIVE
K: devm_gpiod_unhinge
GPIO UAPI
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
R: Kent Gibson <warthog618@gmail.com>
L: linux-gpio@vger.kernel.org
S: Maintained
@@ -11528,7 +11527,7 @@ F: include/linux/platform_data/huawei-gaokun-ec.h
HUGETLB SUBSYSTEM
M: Muchun Song <muchun.song@linux.dev>
M: Oscar Salvador <osalvador@suse.de>
-R: David Hildenbrand <david@redhat.com>
+R: David Hildenbrand <david@kernel.org>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
@@ -13735,7 +13734,7 @@ KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <borntraeger@linux.ibm.com>
M: Janosch Frank <frankja@linux.ibm.com>
M: Claudio Imbrenda <imbrenda@linux.ibm.com>
-R: David Hildenbrand <david@redhat.com>
+R: David Hildenbrand <david@kernel.org>
L: kvm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
@@ -13800,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst
F: Documentation/core-api/kho/*
F: include/linux/kexec_handover.h
F: kernel/kexec_handover.c
+F: lib/test_kho.c
F: tools/testing/selftests/kho/
KEYS-ENCRYPTED
@@ -15311,7 +15311,7 @@ F: drivers/pwm/pwm-max7360.c
F: include/linux/mfd/max7360.h
MAXIM MAX77650 PMIC MFD DRIVER
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/*/*max77650.yaml
@@ -15413,14 +15413,12 @@ S: Supported
F: drivers/net/phy/mxl-86110.c
F: drivers/net/phy/mxl-gpy.c
-MCAN MMIO DEVICE DRIVER
-M: Chandrasekar Ramakrishnan <rcsekar@samsung.com>
+MCAN DEVICE DRIVER
+M: Markus Schneider-Pargmann <msp@baylibre.com>
L: linux-can@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
-F: drivers/net/can/m_can/m_can.c
-F: drivers/net/can/m_can/m_can.h
-F: drivers/net/can/m_can/m_can_platform.c
+F: drivers/net/can/m_can/
MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
R: Yasushi SHOJI <yashi@spacecubics.com>
@@ -16207,7 +16205,7 @@ MEMORY CONTROLLER DRIVERS
M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
-B: mailto:krzysztof.kozlowski@linaro.org
+B: mailto:krzk@kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
F: Documentation/devicetree/bindings/memory-controllers/
F: drivers/memory/
@@ -16223,7 +16221,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
F: drivers/devfreq/tegra30-devfreq.c
MEMORY HOT(UN)PLUG
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
M: Oscar Salvador <osalvador@suse.de>
L: linux-mm@kvack.org
S: Maintained
@@ -16248,7 +16246,7 @@ F: tools/mm/
MEMORY MANAGEMENT - CORE
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
@@ -16304,7 +16302,7 @@ F: mm/execmem.c
MEMORY MANAGEMENT - GUP (GET USER PAGES)
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
R: Jason Gunthorpe <jgg@nvidia.com>
R: John Hubbard <jhubbard@nvidia.com>
R: Peter Xu <peterx@redhat.com>
@@ -16320,7 +16318,7 @@ F: tools/testing/selftests/mm/gup_test.c
MEMORY MANAGEMENT - KSM (Kernel Samepage Merging)
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
R: Xu Xin <xu.xin16@zte.com.cn>
R: Chengming Zhou <chengming.zhou@linux.dev>
L: linux-mm@kvack.org
@@ -16336,7 +16334,7 @@ F: mm/mm_slot.h
MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
R: Zi Yan <ziy@nvidia.com>
R: Matthew Brost <matthew.brost@intel.com>
R: Joshua Hahn <joshua.hahnjy@gmail.com>
@@ -16376,7 +16374,7 @@ F: mm/workingset.c
MEMORY MANAGEMENT - MISC
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
@@ -16464,7 +16462,7 @@ F: mm/shuffle.h
MEMORY MANAGEMENT - RECLAIM
M: Andrew Morton <akpm@linux-foundation.org>
M: Johannes Weiner <hannes@cmpxchg.org>
-R: David Hildenbrand <david@redhat.com>
+R: David Hildenbrand <david@kernel.org>
R: Michal Hocko <mhocko@kernel.org>
R: Qi Zheng <zhengqi.arch@bytedance.com>
R: Shakeel Butt <shakeel.butt@linux.dev>
@@ -16477,7 +16475,7 @@ F: mm/workingset.c
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Rik van Riel <riel@surriel.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
@@ -16522,7 +16520,7 @@ F: mm/swapfile.c
MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
M: Andrew Morton <akpm@linux-foundation.org>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Zi Yan <ziy@nvidia.com>
R: Baolin Wang <baolin.wang@linux.alibaba.com>
@@ -16624,7 +16622,7 @@ MEMORY MAPPING - MADVISE (MEMORY ADVICE)
M: Andrew Morton <akpm@linux-foundation.org>
M: Liam R. Howlett <Liam.Howlett@oracle.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
R: Vlastimil Babka <vbabka@suse.cz>
R: Jann Horn <jannh@google.com>
L: linux-mm@kvack.org
@@ -18068,6 +18066,7 @@ L: netdev@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
+F: Documentation/networking/xfrm/
F: include/net/xfrm.h
F: include/uapi/linux/xfrm.h
F: net/ipv4/ah4.c
@@ -18798,6 +18797,10 @@ S: Maintained
F: arch/arm/*omap*/*clock*
OMAP DEVICE TREE SUPPORT
+M: Aaro Koskinen <aaro.koskinen@iki.fi>
+M: Andreas Kemnade <andreas@kemnade.info>
+M: Kevin Hilman <khilman@baylibre.com>
+M: Roger Quadros <rogerq@kernel.org>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
L: devicetree@vger.kernel.org
@@ -19917,7 +19920,7 @@ F: drivers/pci/p2pdma.c
F: include/linux/pci-p2pdma.h
PCI POWER CONTROL
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-pci@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
@@ -20514,7 +20517,7 @@ F: include/linux/powercap.h
F: kernel/configs/nopm.config
POWER SEQUENCING
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -21196,7 +21199,7 @@ F: Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
F: drivers/i2c/busses/i2c-qcom-cci.c
QUALCOMM INTERCONNECT BWMON DRIVER
-M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -21317,7 +21320,7 @@ F: Documentation/tee/qtee.rst
F: drivers/tee/qcomtee/
QUALCOMM TRUST ZONE MEMORY ALLOCATOR
-M: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/firmware/qcom/qcom_tzmem.c
@@ -22668,7 +22671,7 @@ F: arch/s390/mm
S390 NETWORK DRIVERS
M: Alexandra Winter <wintera@linux.ibm.com>
-R: Aswin Karuvally <aswin@linux.ibm.com>
+M: Aswin Karuvally <aswin@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
@@ -25685,7 +25688,7 @@ F: Documentation/devicetree/bindings/crypto/ti,am62l-dthev2.yaml
F: drivers/crypto/ti/
TI DAVINCI MACHINE SUPPORT
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -27107,7 +27110,7 @@ F: net/vmw_vsock/virtio_transport_common.c
VIRTIO BALLOON
M: "Michael S. Tsirkin" <mst@redhat.com>
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
L: virtualization@lists.linux.dev
S: Maintained
F: drivers/virtio/virtio_balloon.c
@@ -27135,7 +27138,7 @@ S: Maintained
F: drivers/char/virtio_console.c
F: include/uapi/linux/virtio_console.h
-VIRTIO CORE AND NET DRIVERS
+VIRTIO CORE
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
@@ -27148,7 +27151,6 @@ F: Documentation/devicetree/bindings/virtio/
F: Documentation/driver-api/virtio/
F: drivers/block/virtio_blk.c
F: drivers/crypto/virtio/
-F: drivers/net/virtio_net.c
F: drivers/vdpa/
F: drivers/virtio/
F: include/linux/vdpa.h
@@ -27157,7 +27159,6 @@ F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
F: net/vmw_vsock/virtio*
F: tools/virtio/
-F: tools/testing/selftests/drivers/net/virtio_net/
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
@@ -27262,13 +27263,26 @@ F: drivers/iommu/virtio-iommu.c
F: include/uapi/linux/virtio_iommu.h
VIRTIO MEM DRIVER
-M: David Hildenbrand <david@redhat.com>
+M: David Hildenbrand <david@kernel.org>
L: virtualization@lists.linux.dev
S: Maintained
W: https://virtio-mem.gitlab.io/
F: drivers/virtio/virtio_mem.c
F: include/uapi/linux/virtio_mem.h
+VIRTIO NET DRIVER
+M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
+R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+R: Eugenio Pérez <eperezma@redhat.com>
+L: netdev@vger.kernel.org
+L: virtualization@lists.linux.dev
+S: Maintained
+F: drivers/net/virtio_net.c
+F: include/linux/virtio_net.h
+F: include/uapi/linux/virtio_net.h
+F: tools/testing/selftests/drivers/net/virtio_net/
+
VIRTIO PMEM DRIVER
M: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
L: virtualization@lists.linux.dev
@@ -27868,7 +27882,7 @@ F: arch/x86/kernel/stacktrace.c
F: arch/x86/kernel/unwind_*.c
X86 TRUST DOMAIN EXTENSIONS (TDX)
-M: Kirill A. Shutemov <kas@kernel.org>
+M: Kiryl Shutsemau <kas@kernel.org>
R: Dave Hansen <dave.hansen@linux.intel.com>
R: Rick Edgecombe <rick.p.edgecombe@intel.com>
L: x86@kernel.org
diff --git a/Makefile b/Makefile
index fb4389aa5d5f..d208066bcbb6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-fuji-data64.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-fuji-data64.dts
index aa9576d8ab56..48ca25f57ef6 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-fuji-data64.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-fuji-data64.dts
@@ -1254,3 +1254,17 @@
max-frequency = <25000000>;
bus-width = <4>;
};
+
+/*
+ * FIXME: rgmii delay is introduced by MAC (configured in u-boot now)
+ * instead of PCB on fuji board, so the "phy-mode" should be updated to
+ * "rgmii-[tx|rx]id" when the aspeed-mac driver can handle the delay
+ * properly.
+ */
+&mac3 {
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii4_default>;
+};
diff --git a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts
index ac44c745bdf8..a39a021a3910 100644
--- a/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts
+++ b/arch/arm/boot/dts/broadcom/bcm47189-luxul-xap-1440.dts
@@ -55,8 +55,8 @@
mdio {
/delete-node/ switch@1e;
- bcm54210e: ethernet-phy@0 {
- reg = <0>;
+ bcm54210e: ethernet-phy@25 {
+ reg = <25>;
};
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts b/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts
index 06545a6052f7..43ff5eafb2bb 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx51-zii-rdu1.dts
@@ -259,7 +259,7 @@
pinctrl-0 = <&pinctrl_audmux>;
status = "okay";
- ssi2 {
+ mux-ssi2 {
fsl,audmux-port = <1>;
fsl,port-config = <
(IMX_AUDMUX_V2_PTCR_SYN |
@@ -271,7 +271,7 @@
>;
};
- aud3 {
+ mux-aud3 {
fsl,audmux-port = <2>;
fsl,port-config = <
IMX_AUDMUX_V2_PTCR_SYN
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-engicam-microgea-rmm.dts b/arch/arm/boot/dts/nxp/imx/imx6ull-engicam-microgea-rmm.dts
index 107b00b9a939..540642e99a41 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-engicam-microgea-rmm.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-engicam-microgea-rmm.dts
@@ -136,7 +136,7 @@
interrupt-parent = <&gpio2>;
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>;
- report-rate-hz = <6>;
+ report-rate-hz = <60>;
/* settings valid only for Hycon touchscreen */
touchscreen-size-x = <1280>;
touchscreen-size-y = <800>;
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
index b8f256545022..3e0319fdb93f 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts
@@ -18,11 +18,21 @@
#include "bcm2712-rpi-5-b-ovl-rp1.dts"
+/ {
+ aliases {
+ ethernet0 = &rp1_eth;
+ };
+};
+
&pcie2 {
#include "rp1-nexus.dtsi"
};
&rp1_eth {
+ assigned-clocks = <&rp1_clocks RP1_CLK_ETH_TSU>,
+ <&rp1_clocks RP1_CLK_ETH>;
+ assigned-clock-rates = <50000000>,
+ <125000000>;
status = "okay";
phy-mode = "rgmii-id";
phy-handle = <&phy1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
index 2cf0f7208350..a72b2f1c4a1b 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi
@@ -67,7 +67,6 @@ img_subsys: bus@58000000 {
power-domains = <&pd IMX_SC_R_CSI_0>;
fsl,channel = <0>;
fsl,num-irqs = <32>;
- status = "disabled";
};
gpio0_mipi_csi0: gpio@58222000 {
@@ -144,7 +143,6 @@ img_subsys: bus@58000000 {
power-domains = <&pd IMX_SC_R_CSI_1>;
fsl,channel = <0>;
fsl,num-irqs = <32>;
- status = "disabled";
};
gpio0_mipi_csi1: gpio@58242000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx8mp-kontron-bl-osm-s.dts
index 614b4ce330b1..0924ac50fd2d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-kontron-bl-osm-s.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-kontron-bl-osm-s.dts
@@ -16,11 +16,20 @@
ethernet1 = &eqos;
};
- extcon_usbc: usbc {
- compatible = "linux,extcon-usb-gpio";
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ label = "Type-C";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1_id>;
- id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ type = "micro";
+ vbus-supply = <&reg_usb1_vbus>;
+
+ port {
+ usb_dr_connector: endpoint {
+ remote-endpoint = <&usb3_dwc>;
+ };
+ };
};
leds {
@@ -244,9 +253,15 @@
hnp-disable;
srp-disable;
dr_mode = "otg";
- extcon = <&extcon_usbc>;
usb-role-switch;
+ role-switch-default-mode = "peripheral";
status = "okay";
+
+ port {
+ usb3_dwc: endpoint {
+ remote-endpoint = <&usb_dr_connector>;
+ };
+ };
};
&usb_dwc3_1 {
@@ -273,7 +288,6 @@
};
&usb3_phy0 {
- vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 1292677cbe4e..6da961eb3fe5 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -1886,7 +1886,7 @@
assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
assigned-clock-parents = <0>, <0>,
<&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
- msi-map = <0x0 &its 0x98 0x1>;
+ msi-map = <0x0 &its 0x10 0x1>;
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
status = "disabled";
};
@@ -1963,6 +1963,7 @@
assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
assigned-clock-parents = <0>, <0>,
<&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ msi-map = <0x0 &its 0x98 0x1>;
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
index a410fc335fa3..c0f17f8189fa 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
@@ -42,6 +42,7 @@
interrupt-parent = <&gpio>;
interrupts = <TEGRA194_MAIN_GPIO(G, 4) IRQ_TYPE_LEVEL_LOW>;
#phy-cells = <0>;
+ wakeup-source;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 283d9cbc4368..03b7c4313750 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -598,7 +598,6 @@
pinctrl-2 = <&otp_pin>;
resets = <&cru SRST_TSADC>;
reset-names = "tsadc-apb";
- rockchip,grf = <&grf>;
rockchip,hw-tshut-temp = <100000>;
#thermal-sensor-cells = <1>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi
index c4f4f1ff6117..9da6fd82e46b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi
@@ -3,7 +3,7 @@
* Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
*/
-#include "rk3399.dtsi"
+#include "rk3399-base.dtsi"
/ {
cluster0_opp: opp-table-0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso
index 5e8f729c2cf2..141a921a06e4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso
@@ -45,11 +45,11 @@
cam_dovdd_1v8: regulator-cam-dovdd-1v8 {
compatible = "regulator-fixed";
- gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microvolt = <1800000>;
- regulator-name = "cam-dovdd-1v8";
- vin-supply = <&vcc1v8_video>;
+ gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "cam-dovdd-1v8";
+ vin-supply = <&vcc1v8_video>;
};
cam_dvdd_1v2: regulator-cam-dvdd-1v2 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
index 7f578c50b4ad..b6cf03a7ba66 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-bigtreetech-cb2.dtsi
@@ -120,7 +120,7 @@
compatible = "regulator-fixed";
regulator-name = "vcc3v3_pcie";
enable-active-high;
- gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio4 RK_PB1 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_drv>;
regulator-always-on;
@@ -187,7 +187,7 @@
vcc5v0_usb2b: regulator-vcc5v0-usb2b {
compatible = "regulator-fixed";
enable-active-high;
- gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_usb2b_en>;
regulator-name = "vcc5v0_usb2b";
@@ -199,7 +199,7 @@
vcc5v0_usb2t: regulator-vcc5v0-usb2t {
compatible = "regulator-fixed";
enable-active-high;
- gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_usb2t_en>;
regulator-name = "vcc5v0_usb2t";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi
index d0e38412d56a..08bf40de17ea 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-pinetab2.dtsi
@@ -789,7 +789,7 @@
vccio1-supply = <&vccio_acodec>;
vccio2-supply = <&vcc_1v8>;
vccio3-supply = <&vccio_sd>;
- vccio4-supply = <&vcc_1v8>;
+ vccio4-supply = <&vcca1v8_pmu>;
vccio5-supply = <&vcc_1v8>;
vccio6-supply = <&vcc1v8_dvp>;
vccio7-supply = <&vcc_3v3>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
index 0f844806ec54..442a2bc43ba8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
@@ -482,6 +482,8 @@
};
&i2s1_8ch {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
rockchip,trcm-sync-tx-only;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576.dtsi b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
index fc4e9e07f1cf..a86fc6b4e8c4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
@@ -276,12 +276,6 @@
opp-microvolt = <900000 900000 950000>;
clock-latency-ns = <40000>;
};
-
- opp-2208000000 {
- opp-hz = /bits/ 64 <2208000000>;
- opp-microvolt = <950000 950000 950000>;
- clock-latency-ns = <40000>;
- };
};
cluster1_opp_table: opp-table-cluster1 {
@@ -348,12 +342,6 @@
opp-microvolt = <925000 925000 950000>;
clock-latency-ns = <40000>;
};
-
- opp-2304000000 {
- opp-hz = /bits/ 64 <2304000000>;
- opp-microvolt = <950000 950000 950000>;
- clock-latency-ns = <40000>;
- };
};
gpu_opp_table: opp-table-gpu {
@@ -2561,8 +2549,6 @@
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&i2c9m0_xfer>;
- resets = <&cru SRST_I2C9>, <&cru SRST_P_I2C9>;
- reset-names = "i2c", "apb";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi
index 0f1a77697351..b5d630d2c879 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-opp.dtsi
@@ -115,7 +115,7 @@
};
};
- gpu_opp_table: opp-table {
+ gpu_opp_table: opp-table-gpu {
compatible = "operating-points-v2";
opp-300000000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
index b44e89e1bb15..365c1d958f2d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
@@ -382,14 +382,12 @@
cap-mmc-highspeed;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
- mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
mmc-pwrseq = <&emmc_pwrseq>;
no-sdio;
no-sd;
non-removable;
pinctrl-names = "default";
- pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>;
+ pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk>;
vmmc-supply = <&vcc_3v3_s3>;
vqmmc-supply = <&vcc_1v8_s3>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
index 9884a5df47df..e1e0e3fc0ca7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588j.dtsi
@@ -66,7 +66,7 @@
};
};
- gpu_opp_table: opp-table {
+ gpu_opp_table: opp-table-gpu {
compatible = "operating-points-v2";
opp-300000000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
index ad6d04793b0a..83b9b6645a1e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
@@ -14,8 +14,8 @@
gpios = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>;
regulator-name = "vcc3v3_pcie20";
regulator-boot-on;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
startup-delay-us = <50000>;
vin-supply = <&vcc5v0_sys>;
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index e3a2d37bd104..1a48faad2473 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1341,7 +1341,7 @@ CONFIG_COMMON_CLK_RS9_PCIE=y
CONFIG_COMMON_CLK_VC3=y
CONFIG_COMMON_CLK_VC5=y
CONFIG_COMMON_CLK_BD718XX=m
-CONFIG_CLK_RASPBERRYPI=m
+CONFIG_CLK_RASPBERRYPI=y
CONFIG_CLK_IMX8MM=y
CONFIG_CLK_IMX8MN=y
CONFIG_CLK_IMX8MP=y
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 2312e6ee595f..258cca4b4873 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -33,8 +33,8 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr);
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
-void tag_clear_highpage(struct page *to);
-#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+bool tag_clear_highpages(struct page *to, int numpages);
+#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index c022c1acb8c7..f1cb2447afc9 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -351,16 +351,6 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
* as long as we take care not to create a writable
* mapping for executable code.
*/
- fallthrough;
-
- case EFI_ACPI_MEMORY_NVS:
- /*
- * ACPI NVS marks an area reserved for use by the
- * firmware, even after exiting the boot service.
- * This may be used by the firmware for sharing dynamic
- * tables/data (e.g., ACPI CCEL) with the OS. Map it
- * as read-only.
- */
prot = PAGE_KERNEL_RO;
break;
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index c7d70d04c164..80a580e019c5 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -1032,6 +1032,8 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
/* No point mitigating Spectre-BHB alone. */
+ } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
+ /* Do nothing */
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
state = SPECTRE_MITIGATED;
set_bit(BHB_HW, &system_bhb_mitigations);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 870953b4a8a7..052bf0d4d0b0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -624,6 +624,7 @@ nommu:
kvm_timer_vcpu_load(vcpu);
kvm_vgic_load(vcpu);
kvm_vcpu_load_debug(vcpu);
+ kvm_vcpu_load_fgt(vcpu);
if (has_vhe())
kvm_vcpu_load_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
@@ -642,7 +643,6 @@ nommu:
vcpu->arch.hcr_el2 |= HCR_TWI;
vcpu_set_pauth_traps(vcpu);
- kvm_vcpu_load_fgt(vcpu);
if (is_protected_kvm_enabled()) {
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 8ae2bca81614..ec3fbe0b8d52 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -5609,7 +5609,11 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
guard(mutex)(&kvm->arch.config_lock);
- if (!irqchip_in_kernel(kvm)) {
+ /*
+ * This hacks into the ID registers, so only perform it when the
+ * first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
+ */
+ if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) {
u64 val;
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 125dfa6c613b..a193b6a5d1e6 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -967,20 +967,21 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
return vma_alloc_folio(flags, 0, vma, vaddr);
}
-void tag_clear_highpage(struct page *page)
+bool tag_clear_highpages(struct page *page, int numpages)
{
/*
* Check if MTE is supported and fall back to clear_highpage().
* get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
- * post_alloc_hook() will invoke tag_clear_highpage().
+ * post_alloc_hook() will invoke tag_clear_highpages().
*/
- if (!system_supports_mte()) {
- clear_highpage(page);
- return;
- }
+ if (!system_supports_mte())
+ return false;
- /* Newly allocated page, shouldn't have been tagged yet */
- WARN_ON_ONCE(!try_page_mte_tagging(page));
- mte_zero_clear_page_tags(page_address(page));
- set_page_mte_tagged(page);
+ /* Newly allocated pages, shouldn't have been tagged yet */
+ for (int i = 0; i < numpages; i++, page++) {
+ WARN_ON_ONCE(!try_page_mte_tagging(page));
+ mte_zero_clear_page_tags(page_address(page));
+ set_page_mte_tagged(page);
+ }
+ return true;
}
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index d4cd4041bee7..f3efb00b6141 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -55,6 +55,27 @@ enum cpu_type_enum {
CPU_LAST
};
+static inline char *id_to_core_name(unsigned int id)
+{
+ if ((id & PRID_COMP_MASK) != PRID_COMP_LOONGSON)
+ return "Unknown";
+
+ switch (id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ return "LA132";
+ case PRID_SERIES_LA264:
+ return "LA264";
+ case PRID_SERIES_LA364:
+ return "LA364";
+ case PRID_SERIES_LA464:
+ return "LA464";
+ case PRID_SERIES_LA664:
+ return "LA664";
+ default:
+ return "Unknown";
+ }
+}
+
#endif /* !__ASSEMBLER__ */
/*
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index aafb3cd9e943..215e0f9e8aa3 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -10,10 +10,6 @@
#include <linux/types.h>
-#ifndef __KERNEL__
-#include <stdint.h>
-#endif
-
/*
* For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
* 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
@@ -41,44 +37,44 @@ struct user_pt_regs {
} __attribute__((aligned(8)));
struct user_fp_state {
- uint64_t fpr[32];
- uint64_t fcc;
- uint32_t fcsr;
+ __u64 fpr[32];
+ __u64 fcc;
+ __u32 fcsr;
};
struct user_lsx_state {
/* 32 registers, 128 bits width per register. */
- uint64_t vregs[32*2];
+ __u64 vregs[32*2];
};
struct user_lasx_state {
/* 32 registers, 256 bits width per register. */
- uint64_t vregs[32*4];
+ __u64 vregs[32*4];
};
struct user_lbt_state {
- uint64_t scr[4];
- uint32_t eflags;
- uint32_t ftop;
+ __u64 scr[4];
+ __u32 eflags;
+ __u32 ftop;
};
struct user_watch_state {
- uint64_t dbg_info;
+ __u64 dbg_info;
struct {
- uint64_t addr;
- uint64_t mask;
- uint32_t ctrl;
- uint32_t pad;
+ __u64 addr;
+ __u64 mask;
+ __u32 ctrl;
+ __u32 pad;
} dbg_regs[8];
};
struct user_watch_state_v2 {
- uint64_t dbg_info;
+ __u64 dbg_info;
struct {
- uint64_t addr;
- uint64_t mask;
- uint32_t ctrl;
- uint32_t pad;
+ __u64 addr;
+ __u64 mask;
+ __u32 ctrl;
+ __u32 pad;
} dbg_regs[14];
};
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 6f943d1391ff..a2060a24b39f 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -277,7 +277,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
uint32_t config;
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
- const char *core_name = "Unknown";
+ const char *core_name = id_to_core_name(c->processor_id);
switch (BIT(fls(c->isa_level) - 1)) {
case LOONGARCH_CPU_ISA_LA32R:
@@ -291,35 +291,23 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
break;
}
- switch (c->processor_id & PRID_SERIES_MASK) {
- case PRID_SERIES_LA132:
- core_name = "LA132";
- break;
- case PRID_SERIES_LA264:
- core_name = "LA264";
- break;
- case PRID_SERIES_LA364:
- core_name = "LA364";
- break;
- case PRID_SERIES_LA464:
- core_name = "LA464";
- break;
- case PRID_SERIES_LA664:
- core_name = "LA664";
- break;
- }
-
pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
- if (!cpu_has_iocsr)
+ if (!cpu_has_iocsr) {
+ __cpu_full_name[cpu] = "Unknown";
return;
-
- if (!__cpu_full_name[cpu])
- __cpu_full_name[cpu] = cpu_full_name;
+ }
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+ if (!__cpu_full_name[cpu]) {
+ if (((char *)vendor)[0] == 0)
+ __cpu_full_name[cpu] = "Unknown";
+ else
+ __cpu_full_name[cpu] = cpu_full_name;
+ }
+
config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
if (config & IOCSRF_CSRIPI)
c->options |= LOONGARCH_CPU_CSRIPI;
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index 2d64b7c81e5e..d7fafda1d541 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -237,6 +237,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_SMP
crash_smp_send_stop();
#endif
+ machine_kexec_mask_interrupts();
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
pr_info("Starting crashdump kernel...\n");
@@ -274,6 +275,7 @@ void machine_kexec(struct kimage *image)
/* We do not want to be bothered. */
local_irq_disable();
+ machine_kexec_mask_interrupts();
pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index ab9c660526a3..8b89898e20df 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -158,35 +158,9 @@ static void __init node_mem_init(unsigned int node)
#ifdef CONFIG_ACPI_NUMA
-/*
- * add_numamem_region
- *
- * Add a uasable memory region described by BIOS. The
- * routine gets each intersection between BIOS's region
- * and node's region, and adds them into node's memblock
- * pool.
- *
- */
-static void __init add_numamem_region(u64 start, u64 end, u32 type)
-{
- u32 node = pa_to_nid(start);
- u64 size = end - start;
- static unsigned long num_physpages;
-
- if (start >= end) {
- pr_debug("Invalid region: %016llx-%016llx\n", start, end);
- return;
- }
+static unsigned long num_physpages;
- num_physpages += (size >> PAGE_SHIFT);
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
- node, type, start, size);
- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
- start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages);
- memblock_set_node(start, size, &memblock.memory, node);
-}
-
-static void __init init_node_memblock(void)
+static void __init info_node_memblock(void)
{
u32 mem_type;
u64 mem_end, mem_start, mem_size;
@@ -206,12 +180,20 @@ static void __init init_node_memblock(void)
case EFI_BOOT_SERVICES_DATA:
case EFI_PERSISTENT_MEMORY:
case EFI_CONVENTIONAL_MEMORY:
- add_numamem_region(mem_start, mem_end, mem_type);
+ num_physpages += (mem_size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ (u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
break;
case EFI_PAL_CODE:
case EFI_UNUSABLE_MEMORY:
case EFI_ACPI_RECLAIM_MEMORY:
- add_numamem_region(mem_start, mem_end, mem_type);
+ num_physpages += (mem_size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ (u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
fallthrough;
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
@@ -249,22 +231,16 @@ int __init init_numa_memory(void)
for (i = 0; i < NR_CPUS; i++)
set_cpuid_to_node(i, NUMA_NO_NODE);
- numa_reset_distance();
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
- WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX));
-
/* Parse SRAT and SLIT if provided by firmware. */
- ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
+ if (!acpi_disabled)
+ ret = numa_memblks_init(acpi_numa_init, false);
+ else
+ ret = numa_memblks_init(fake_numa_init, false);
+
if (ret < 0)
return ret;
- node_possible_map = numa_nodes_parsed;
- if (WARN_ON(nodes_empty(node_possible_map)))
- return -EINVAL;
-
- init_node_memblock();
+ info_node_memblock();
if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index cea30768ae92..63d2b7e7e844 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -17,6 +17,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned int isa = cpu_data[n].isa_level;
+ unsigned int prid = cpu_data[n].processor_id;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
@@ -37,6 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id);
seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
+ seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index cbe53d0b7fb0..f97dc9936401 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1624,6 +1624,9 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
/* Direct jump skips 5 NOP instructions */
else if (is_bpf_text_address((unsigned long)orig_call))
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
+ /* Module tracing not supported - cause kernel lockups */
+ else if (is_module_text_address((unsigned long)orig_call))
+ return -ENOTSUPP;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c
index 5bc9627a6cf9..d9fc5d520b37 100644
--- a/arch/loongarch/pci/pci.c
+++ b/arch/loongarch/pci/pci.c
@@ -50,11 +50,11 @@ static int __init pcibios_init(void)
*/
lsize = cpu_last_level_cache_line_size();
- BUG_ON(!lsize);
+ if (lsize) {
+ pci_dfl_cache_line_size = lsize >> 2;
- pci_dfl_cache_line_size = lsize >> 2;
-
- pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
+ pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
+ }
return 0;
}
diff --git a/arch/mips/boot/dts/econet/en751221.dtsi b/arch/mips/boot/dts/econet/en751221.dtsi
index 66197e73d4f0..2abeef5b744a 100644
--- a/arch/mips/boot/dts/econet/en751221.dtsi
+++ b/arch/mips/boot/dts/econet/en751221.dtsi
@@ -18,7 +18,7 @@
cpu@0 {
device_type = "cpu";
- compatible = "mips,mips24KEc";
+ compatible = "mips,mips34Kc";
reg = <0>;
};
};
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 29191fa1801e..a3101f2268c6 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -692,7 +692,7 @@ unsigned long mips_stack_top(void)
/* Space for the VDSO, data page & GIC user page */
if (current->thread.abi) {
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
- top -= PAGE_SIZE;
+ top -= VDSO_NR_PAGES * PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space to randomize the VDSO base */
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 347126dc010d..3facf7cc6c7d 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
+#include <linux/sort.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
@@ -508,54 +509,78 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb);
-/* Initialise all TLB entries with unique values */
+
+/* Comparison function for EntryHi VPN fields. */
+static int r4k_vpn_cmp(const void *a, const void *b)
+{
+ long v = *(unsigned long *)a - *(unsigned long *)b;
+ int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+ return s ? (v != 0) | v >> s : v;
+}
+
+/*
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
static void r4k_tlb_uniquify(void)
{
- int entry = num_wired_entries();
+ unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
+ int tlbsize = current_cpu_data.tlbsize;
+ int start = num_wired_entries();
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+ unsigned long vpn;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+ vpn = read_c0_entryhi();
+ vpn &= vpn_mask & PAGE_MASK;
+ tlb_vpns[cnt] = vpn;
+
+ /* Prevent any large pages from overlapping regular ones. */
+ write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+
+ sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
+
+ write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- while (entry < current_cpu_data.tlbsize) {
- unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
- unsigned long asid = 0;
- int idx;
+ idx = 0;
+ ent = tlbsize;
+ for (i = start; i < tlbsize; i++)
+ while (1) {
+ unsigned long entryhi, vpn;
- /* Skip wired MMID to make ginvt_mmid work */
- if (cpu_has_mmid)
- asid = MMID_KERNEL_WIRED + 1;
+ entryhi = UNIQUE_ENTRYHI(ent);
+ vpn = entryhi & vpn_mask & PAGE_MASK;
- /* Check for match before using UNIQUE_ENTRYHI */
- do {
- if (cpu_has_mmid) {
- write_c0_memorymapid(asid);
- write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ if (idx >= cnt || vpn < tlb_vpns[idx]) {
+ write_c0_entryhi(entryhi);
+ write_c0_index(i);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ ent++;
+ break;
+ } else if (vpn == tlb_vpns[idx]) {
+ ent++;
} else {
- write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
+ idx++;
}
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- idx = read_c0_index();
- /* No match or match is on current entry */
- if (idx < 0 || idx == entry)
- break;
- /*
- * If we hit a match, we need to try again with
- * a different ASID.
- */
- asid++;
- } while (asid < asid_mask);
-
- if (idx >= 0 && idx != entry)
- panic("Unable to uniquify TLB entry %d", idx);
-
- write_c0_index(entry);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- entry++;
- }
+ }
tlbw_use_hazard();
htw_start();
@@ -602,6 +627,7 @@ static void r4k_tlb_configure(void)
/* From this point on the ARC firmware is dead. */
r4k_tlb_uniquify();
+ local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 000d6d50520a..82b0fd8576a2 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -241,16 +241,22 @@ mips_pci_controller:
#endif
/*
- * Setup the Malta max (2GB) memory for PCI DMA in host bridge
- * in transparent addressing mode.
+ * Set up memory mapping in host bridge for PCI DMA masters,
+ * in transparent addressing mode. For EVA use the Malta
+ * maximum of 2 GiB memory in the alias space at 0x80000000
+ * as per PHYS_OFFSET. Otherwise use 256 MiB of memory in
+ * the regular space, avoiding mapping the PCI MMIO window
+ * for DMA as it seems to confuse the system controller's
+ * logic, causing PCI MMIO to stop working.
*/
- mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
- MSC_WRITE(MSC01_PCI_BAR0, mask);
- MSC_WRITE(MSC01_PCI_HEAD4, mask);
+ mask = PHYS_OFFSET ? PHYS_OFFSET : 0xf0000000;
+ MSC_WRITE(MSC01_PCI_BAR0,
+ mask | PCI_BASE_ADDRESS_MEM_PREFETCH);
+ MSC_WRITE(MSC01_PCI_HEAD4,
+ PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH);
- mask &= MSC01_PCI_BAR0_SIZE_MSK;
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
- MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
+ MSC_WRITE(MSC01_PCI_P2SCMAPL, PHYS_OFFSET);
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e24f4d88885a..9537a61ebae0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -137,6 +137,7 @@ config PPC
select ARCH_HAS_DMA_OPS if PPC64
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE if ARCH_SUPPORTS_HUGETLBFS
select ARCH_HAS_KCOV
select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU
select ARCH_HAS_MEMBARRIER_CALLBACKS
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7b527d18aa5e..4c321a8ea896 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -423,7 +423,6 @@ config PPC_64S_HASH_MMU
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64
- select ARCH_HAS_GIGANTIC_PAGE
default y
help
Enable support for the Power ISA 3.0 Radix style MMU. Currently this
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index 3b09874d7a6d..7f5030ee1fcf 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -7,8 +7,8 @@
#define ANDES_VENDOR_ID 0x31e
#define MICROCHIP_VENDOR_ID 0x029
+#define MIPS_VENDOR_ID 0x127
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
-#define MIPS_VENDOR_ID 0x722
#endif
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 5e8cde055264..c443337056ab 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -648,9 +648,9 @@ int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
void __init sbi_init(void)
{
+ bool srst_power_off = false;
int ret;
- sbi_set_power_off();
ret = sbi_get_spec_version();
if (ret > 0)
sbi_spec_version = ret;
@@ -683,6 +683,7 @@ void __init sbi_init(void)
sbi_probe_extension(SBI_EXT_SRST)) {
pr_info("SBI SRST extension detected\n");
register_platform_power_off(sbi_srst_power_off);
+ srst_power_off = true;
sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
sbi_srst_reboot_nb.priority = 192;
register_restart_handler(&sbi_srst_reboot_nb);
@@ -702,4 +703,7 @@ void __init sbi_init(void)
__sbi_send_ipi = __sbi_send_ipi_v01;
__sbi_rfence = __sbi_rfence_v01;
}
+
+ if (!srst_power_off)
+ sbi_set_power_off();
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b7100c6a4054..6663f1619abb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1154,17 +1154,15 @@ static inline pte_t pte_mkhuge(pte_t pte)
#define IPTE_NODAT 0x400
#define IPTE_GUEST_ASCE 0x800
-static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
- unsigned long opt, unsigned long asce,
- int local)
+static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
{
unsigned long pto;
pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
- asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
+ asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
: "+m" (*ptep)
- : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
- [asce] "a" (asce), [m4] "i" (local));
+ : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
+ [m4] "i" (local));
}
static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
@@ -1348,7 +1346,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
* A local RDP can be used to do the flush.
*/
if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT))
- __ptep_rdp(address, ptep, 0, 0, 1);
+ __ptep_rdp(address, ptep, 1);
}
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0fde20bbc50b..05974304d622 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -274,9 +274,9 @@ void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
preempt_disable();
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __ptep_rdp(addr, ptep, 0, 0, 1);
+ __ptep_rdp(addr, ptep, 1);
else
- __ptep_rdp(addr, ptep, 0, 0, 0);
+ __ptep_rdp(addr, ptep, 0);
/*
* PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
* means it is still valid and active, and must not be changed according
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 745caa6c15a3..fa6c47b50989 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2789,13 +2789,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
- if (perf_callchain_store(entry, regs->ip))
- return;
-
- if (perf_hw_regs(regs))
+ if (perf_hw_regs(regs)) {
+ if (perf_callchain_store(entry, regs->ip))
+ return;
unwind_start(&state, current, regs, NULL);
- else
+ } else {
unwind_start(&state, current, NULL, (void *)regs->sp);
+ }
for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d6c945cc5d07..e228e564b15e 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1325,8 +1325,6 @@ static void uncore_pci_sub_driver_init(void)
continue;
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
- if (!pmu)
- continue;
if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
continue;
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 93156ac4ffe0..b08c95872eed 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -56,6 +56,11 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
return &arch_ftrace_regs(fregs)->regs;
}
+#define arch_ftrace_partial_regs(regs) do { \
+ regs->flags &= ~X86_EFLAGS_FIXED; \
+ regs->cs = __KERNEL_CS; \
+} while (0)
+
#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
(_regs)->ip = arch_ftrace_regs(fregs)->regs.ip; \
(_regs)->sp = arch_ftrace_regs(fregs)->regs.sp; \
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index 7047124490f6..d7c8ef1e354d 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -196,7 +196,7 @@ int amd_detect_prefcore(bool *detected)
break;
}
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
u32 tmp;
int ret;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 2ba9f2d42d8c..5d46709c58d0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1037,7 +1037,14 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
static const struct x86_cpu_id zen5_rdseed_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x08, 0x1, 0x0b008121),
ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x24, 0x0, 0x0b204037),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x44, 0x0, 0x0b404035),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x44, 0x1, 0x0b404108),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x60, 0x0, 0x0b600037),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x68, 0x0, 0x0b608038),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x70, 0x0, 0x0b700037),
{},
};
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index dc82153009da..a881bf4c2011 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -224,6 +224,7 @@ static bool need_sha_check(u32 cur_rev)
case 0xb1010: return cur_rev <= 0xb101046; break;
case 0xb2040: return cur_rev <= 0xb204031; break;
case 0xb4040: return cur_rev <= 0xb404031; break;
+ case 0xb4041: return cur_rev <= 0xb404101; break;
case 0xb6000: return cur_rev <= 0xb600031; break;
case 0xb6080: return cur_rev <= 0xb608031; break;
case 0xb7000: return cur_rev <= 0xb700031; break;
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 367da3638167..823dbdd0eb41 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -354,12 +354,17 @@ SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
+ /* Restore return_to_handler value that got eaten by previous ret instruction. */
+ subq $8, %rsp
+ UNWIND_HINT_FUNC
+
/* Save ftrace_regs for function exit context */
subq $(FRAME_SIZE), %rsp
movq %rax, RAX(%rsp)
movq %rdx, RDX(%rsp)
movq %rbp, RBP(%rsp)
+ movq %rsp, RSP(%rsp)
movq %rsp, %rdi
call ftrace_return_to_handler
@@ -368,7 +373,8 @@ SYM_CODE_START(return_to_handler)
movq RDX(%rsp), %rdx
movq RAX(%rsp), %rax
- addq $(FRAME_SIZE), %rsp
+ addq $(FRAME_SIZE) + 8, %rsp
+
/*
* Jump back to the old return address. This cannot be JMP_NOSPEC rdi
* since IBT would demand that contain ENDBR, which simply isn't so for
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 10c21e4c5406..9d29b2e7e855 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -705,7 +705,11 @@ void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
{
- bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
+ struct vcpu_svm *svm = to_svm(vcpu);
+ bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
+
+ if (intercept == svm->lbr_msrs_intercepted)
+ return;
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
@@ -714,6 +718,8 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
if (sev_es_guest(vcpu->kvm))
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
+
+ svm->lbr_msrs_intercepted = intercept;
}
void svm_vcpu_free_msrpm(void *msrpm)
@@ -1221,6 +1227,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
}
svm->x2avic_msrs_intercepted = true;
+ svm->lbr_msrs_intercepted = true;
svm->vmcb01.ptr = page_address(vmcb01_page);
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index c856d8e0f95e..dd78e6402345 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -336,6 +336,7 @@ struct vcpu_svm {
bool guest_state_loaded;
bool x2avic_msrs_intercepted;
+ bool lbr_msrs_intercepted;
/* Guest GIF value, used when vGIF is not enabled */
bool guest_gif;
diff --git a/block/bdev.c b/block/bdev.c
index 810707cca970..638f0cd458ae 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -231,7 +231,7 @@ int sb_set_blocksize(struct super_block *sb, int size)
EXPORT_SYMBOL(sb_set_blocksize);
-int sb_min_blocksize(struct super_block *sb, int size)
+int __must_check sb_min_blocksize(struct super_block *sb, int size)
{
int minsize = bdev_logical_block_size(sb->s_bdev);
if (size < minsize)
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index a6dbf623e557..6d69554c940e 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -152,26 +152,49 @@ ATTRIBUTE_GROUPS(memory_range);
static __init int add_boot_memory_ranges(void)
{
- struct kobject *pkobj, *kobj;
+ struct kobject *pkobj, *kobj, **kobjs;
int ret = -EINVAL;
- char *name;
+ char name[16];
+ int i;
pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+ if (!pkobj)
+ return -ENOMEM;
- for (int i = 0; i < mrrm_mem_entry_num; i++) {
- name = kasprintf(GFP_KERNEL, "range%d", i);
- if (!name) {
- ret = -ENOMEM;
- break;
- }
+ kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
+ if (!kobjs) {
+ kobject_put(pkobj);
+ return -ENOMEM;
+ }
+ for (i = 0; i < mrrm_mem_entry_num; i++) {
+ scnprintf(name, sizeof(name), "range%d", i);
kobj = kobject_create_and_add(name, pkobj);
+ if (!kobj) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
ret = sysfs_create_groups(kobj, memory_range_groups);
- if (ret)
- return ret;
+ if (ret) {
+ kobject_put(kobj);
+ goto cleanup;
+ }
+ kobjs[i] = kobj;
}
+ kfree(kobjs);
+ return 0;
+
+cleanup:
+ for (int j = 0; j < i; j++) {
+ if (kobjs[j]) {
+ sysfs_remove_groups(kobjs[j], memory_range_groups);
+ kobject_put(kobjs[j]);
+ }
+ }
+ kfree(kobjs);
+ kobject_put(pkobj);
return ret;
}
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 3c87953dbd19..305c240a303f 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -182,6 +182,7 @@ bool einj_initialized __ro_after_init;
static void __iomem *einj_param;
static u32 v5param_size;
+static u32 v66param_size;
static bool is_v2;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
@@ -283,6 +284,24 @@ static void check_vendor_extension(u64 paddr,
acpi_os_unmap_iomem(p, sizeof(v));
}
+static u32 einjv2_init(struct einjv2_extension_struct *e)
+{
+ if (e->revision != 1) {
+ pr_info("Unknown v2 extension revision %u\n", e->revision);
+ return 0;
+ }
+ if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
+ pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length);
+ return 0;
+ }
+ if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) {
+ pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length);
+ return 0;
+ }
+
+ return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]);
+}
+
static void __iomem *einj_get_parameter_address(void)
{
int i;
@@ -310,28 +329,21 @@ static void __iomem *einj_get_parameter_address(void)
v5param_size = sizeof(v5param);
p = acpi_os_map_iomem(pa_v5, sizeof(*p));
if (p) {
- int offset, len;
-
memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
check_vendor_extension(pa_v5, &v5param);
- if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
- len = v5param.einjv2_struct.length;
- offset = offsetof(struct einjv2_extension_struct, component_arr);
- max_nr_components = (len - offset) /
- sizeof(v5param.einjv2_struct.component_arr[0]);
- /*
- * The first call to acpi_os_map_iomem above does not include the
- * component array, instead it is used to read and calculate maximum
- * number of components supported by the system. Below, the mapping
- * is expanded to include the component array.
- */
+ if (available_error_type & ACPI65_EINJV2_SUPP) {
+ struct einjv2_extension_struct *e;
+
+ e = &v5param.einjv2_struct;
+ max_nr_components = einjv2_init(e);
+
+ /* remap including einjv2_extension_struct */
acpi_os_unmap_iomem(p, v5param_size);
- offset = offsetof(struct set_error_type_with_address, einjv2_struct);
- v5param_size = offset + struct_size(&v5param.einjv2_struct,
- component_arr, max_nr_components);
- p = acpi_os_map_iomem(pa_v5, v5param_size);
+ v66param_size = v5param_size - sizeof(*e) + e->length;
+ p = acpi_os_map_iomem(pa_v5, v66param_size);
}
+
return p;
}
}
@@ -527,6 +539,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
struct apei_exec_context ctx;
+ u32 param_size = is_v2 ? v66param_size : v5param_size;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
int i, rc;
@@ -539,11 +552,11 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
if (acpi5) {
struct set_error_type_with_address *v5param;
- v5param = kmalloc(v5param_size, GFP_KERNEL);
+ v5param = kmalloc(param_size, GFP_KERNEL);
if (!v5param)
return -ENOMEM;
- memcpy_fromio(v5param, einj_param, v5param_size);
+ memcpy_fromio(v5param, einj_param, param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
switch (vendor_flags) {
@@ -601,7 +614,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
break;
}
}
- memcpy_toio(einj_param, v5param, v5param_size);
+ memcpy_toio(einj_param, v5param, param_size);
kfree(v5param);
} else {
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
@@ -1132,9 +1145,14 @@ static void einj_remove(struct faux_device *fdev)
struct apei_exec_context ctx;
if (einj_param) {
- acpi_size size = (acpi5) ?
- v5param_size :
- sizeof(struct einj_parameter);
+ acpi_size size;
+
+ if (v66param_size)
+ size = v66param_size;
+ else if (acpi5)
+ size = v5param_size;
+ else
+ size = sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
if (vendor_errors.size)
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index fd995a1d3d24..8cc8af8fd408 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -430,10 +430,10 @@ static int __init gtdt_platform_timer_init(void)
continue;
pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
- gwdt_count, &atm,
+ mmio_timer_count, &atm,
sizeof(atm));
if (IS_ERR(pdev)) {
- pr_err("Can't register timer %d\n", gwdt_count);
+ pr_err("Can't register timer %d\n", mmio_timer_count);
continue;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6c684e54fe01..3bdeeee3414e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -460,7 +460,7 @@ bool acpi_cpc_valid(void)
if (acpi_disabled)
return false;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -1435,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 5a36d57289b4..11e4483685c9 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -874,11 +874,33 @@ static void hmat_register_target_devices(struct memory_target *target)
}
}
-static void hmat_register_target(struct memory_target *target)
+static void hmat_hotplug_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP,
+ * "specific purpose", is applied to all the memory in a proximity
+ * domain leading to * the node being marked offline / unplugged, or if
+ * memory-only "hotplug" node is offline.
+ */
+ if (nid == NUMA_NO_NODE || !node_online(nid))
+ return;
+
+ guard(mutex)(&target_lock);
+ if (target->registered)
+ return;
+
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
+ target->registered = true;
+}
+
+static void hmat_register_target(struct memory_target *target)
+{
+ /*
* Devices may belong to either an offline or online
* node, so unconditionally add them.
*/
@@ -895,25 +917,7 @@ static void hmat_register_target(struct memory_target *target)
}
mutex_unlock(&target_lock);
- /*
- * Skip offline nodes. This can happen when memory
- * marked EFI_MEMORY_SP, "specific purpose", is applied
- * to all the memory in a proximity domain leading to
- * the node being marked offline / unplugged, or if
- * memory-only "hotplug" node is offline.
- */
- if (nid == NUMA_NO_NODE || !node_online(nid))
- return;
-
- mutex_lock(&target_lock);
- if (!target->registered) {
- hmat_register_target_initiators(target);
- hmat_register_target_cache(target);
- hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
- hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
- target->registered = true;
- }
- mutex_unlock(&target_lock);
+ hmat_hotplug_target(target);
}
static void hmat_register_targets(void)
@@ -939,7 +943,7 @@ static int hmat_callback(struct notifier_block *self,
if (!target)
return NOTIFY_OK;
- hmat_register_target(target);
+ hmat_hotplug_target(target);
return NOTIFY_OK;
}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 53816dfab645..aa87ee1583a4 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -237,7 +237,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_generic_affinity *p =
(struct acpi_srat_generic_affinity *)header;
- if (p->device_handle_type == 0) {
+ if (p->device_handle_type == 1) {
/*
* For pci devices this may be the only place they
* are assigned a proximity domain
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 5d824435b26b..7644de24d2fa 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -166,7 +166,8 @@ static int __acpi_processor_start(struct acpi_device *device)
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
- acpi_processor_power_init(pr);
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ acpi_processor_power_init(pr);
acpi_pss_perf_init(pr);
@@ -262,8 +263,6 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
- acpi_processor_register_idle_driver();
-
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"acpi/cpu-drv:online",
acpi_soft_cpu_online, NULL);
@@ -302,7 +301,6 @@ static void __exit acpi_processor_driver_exit(void)
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
- acpi_processor_unregister_idle_driver();
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 22b051b94a86..4166090db642 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -51,7 +51,7 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-static struct cpuidle_driver acpi_idle_driver = {
+struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
@@ -1357,102 +1357,79 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
return 0;
}
-void acpi_processor_register_idle_driver(void)
-{
- struct acpi_processor *pr;
- int ret = -ENODEV;
- int cpu;
-
- /*
- * Acpi idle driver is used by all possible CPUs.
- * Install the idle handler by the processor power info of one in them.
- * Note that we use previously set idle handler will be used on
- * platforms that only support C1.
- */
- for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) {
- pr = per_cpu(processors, cpu);
- if (!pr)
- continue;
-
- ret = acpi_processor_get_power_info(pr);
- if (!ret) {
- pr->flags.power_setup_done = 1;
- acpi_processor_setup_cpuidle_states(pr);
- break;
- }
- }
-
- if (ret) {
- pr_debug("No ACPI power information from any CPUs.\n");
- return;
- }
+static int acpi_processor_registered;
- ret = cpuidle_register_driver(&acpi_idle_driver);
- if (ret) {
- pr_debug("register %s failed.\n", acpi_idle_driver.name);
- return;
- }
- pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name);
-}
-
-void acpi_processor_unregister_idle_driver(void)
-{
- cpuidle_unregister_driver(&acpi_idle_driver);
-}
-
-void acpi_processor_power_init(struct acpi_processor *pr)
+int acpi_processor_power_init(struct acpi_processor *pr)
{
+ int retval;
struct cpuidle_device *dev;
- /*
- * The code below only works if the current cpuidle driver is the ACPI
- * idle driver.
- */
- if (cpuidle_get_driver() != &acpi_idle_driver)
- return;
-
if (disabled_by_idle_boot_param())
- return;
+ return 0;
acpi_processor_cstate_first_run_checks();
if (!acpi_processor_get_power_info(pr))
pr->flags.power_setup_done = 1;
- if (!pr->flags.power)
- return;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return;
+ /*
+ * Install the idle handler if processor power management is supported.
+ * Note that we use previously set idle handler will be used on
+ * platforms that only support C1.
+ */
+ if (pr->flags.power) {
+ /* Register acpi_idle_driver if not already registered */
+ if (!acpi_processor_registered) {
+ acpi_processor_setup_cpuidle_states(pr);
+ retval = cpuidle_register_driver(&acpi_idle_driver);
+ if (retval)
+ return retval;
+ pr_debug("%s registered with cpuidle\n",
+ acpi_idle_driver.name);
+ }
- per_cpu(acpi_cpuidle_device, pr->id) = dev;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ per_cpu(acpi_cpuidle_device, pr->id) = dev;
- acpi_processor_setup_cpuidle_dev(pr, dev);
+ acpi_processor_setup_cpuidle_dev(pr, dev);
- /*
- * Register a cpuidle device for this CPU. The cpuidle driver using
- * this device is expected to be registered.
- */
- if (cpuidle_register_device(dev)) {
- per_cpu(acpi_cpuidle_device, pr->id) = NULL;
- kfree(dev);
+ /* Register per-cpu cpuidle_device. Cpuidle driver
+ * must already be registered before registering device
+ */
+ retval = cpuidle_register_device(dev);
+ if (retval) {
+ if (acpi_processor_registered == 0)
+ cpuidle_unregister_driver(&acpi_idle_driver);
+
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
+ return retval;
+ }
+ acpi_processor_registered++;
}
+ return 0;
}
-void acpi_processor_power_exit(struct acpi_processor *pr)
+int acpi_processor_power_exit(struct acpi_processor *pr)
{
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (disabled_by_idle_boot_param())
- return;
+ return 0;
if (pr->flags.power) {
cpuidle_unregister_device(dev);
+ acpi_processor_registered--;
+ if (acpi_processor_registered == 0)
+ cpuidle_unregister_driver(&acpi_idle_driver);
+
kfree(dev);
}
pr->flags.power_setup_done = 0;
+ return 0;
}
MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
index d05397a50ca6..81e8432b5904 100644
--- a/drivers/android/binder_netlink.c
+++ b/drivers/android/binder_netlink.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/binder.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
index 882c7a6b537e..57399942a5e3 100644
--- a/drivers/android/binder_netlink.h
+++ b/drivers/android/binder_netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/binder.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_BINDER_GEN_H
#define _LINUX_BINDER_GEN_H
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2a210719c4ce..f48fb63d7e85 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3006,6 +3006,16 @@ int ata_dev_configure(struct ata_device *dev)
}
dev->n_sectors = ata_id_n_sectors(id);
+ if (ata_id_is_locked(id)) {
+ /*
+ * If Security locked, set capacity to zero to prevent
+ * any I/O, e.g. partition scanning, as any I/O to a
+ * locked drive will result in user visible errors.
+ */
+ ata_dev_info(dev,
+ "Security locked, setting capacity to zero\n");
+ dev->n_sectors = 0;
+ }
/* get current R/W Multiple count setting */
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b43a3196e2be..434774e71fe6 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -992,6 +992,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
return;
}
+ if (ata_id_is_locked(dev->id)) {
+ /* Security locked */
+ /* LOGICAL UNIT ACCESS NOT AUTHORIZED */
+ ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
+ return;
+ }
+
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"Missing result TF: reporting aborted command\n");
@@ -4894,8 +4901,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
if (do_resume) {
ret = scsi_resume_device(sdev);
- if (ret == -EWOULDBLOCK)
+ if (ret == -EWOULDBLOCK) {
+ scsi_device_put(sdev);
goto unlock_scan;
+ }
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 4fea1149e003..f62e38571440 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1374,7 +1374,9 @@ fore200e_open(struct atm_vcc *vcc)
vcc->dev_data = NULL;
+ mutex_lock(&fore200e->rate_mtx);
fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
+ mutex_unlock(&fore200e->rate_mtx);
kfree(fore200e_vcc);
return -EINVAL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e83503bdc1fd..1de1cd72b616 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -888,12 +888,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
+ if (dev->power.syscore)
+ goto Skip;
+
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -926,11 +929,11 @@ Run:
Skip:
dev->power.is_late_suspended = false;
+ pm_runtime_enable(dev);
Out:
TRACE_RESUME(error);
- pm_runtime_enable(dev);
complete_all(&dev->power.completion);
if (error) {
@@ -1615,12 +1618,6 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- /*
- * Disable runtime PM for the device without checking if there is a
- * pending resume request for it.
- */
- __pm_runtime_disable(dev, false);
-
dpm_wait_for_subordinate(dev, async);
if (READ_ONCE(async_error))
@@ -1631,9 +1628,18 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
goto Complete;
}
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Complete;
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
+ __pm_runtime_disable(dev, false);
+
+ if (dev->power.syscore)
+ goto Skip;
+
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -1664,6 +1670,7 @@ Run:
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
+ pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a722446ec73d..fa683bb7f0b4 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2711,9 +2711,21 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
- struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ struct btmtk_data *btmtk_data;
int err;
+ if (!data->hdev)
+ return;
+
+ btmtk_data = hci_get_priv(data->hdev);
+ if (!btmtk_data)
+ return;
+
+ if (!btmtk_data->isopkt_intf) {
+ bt_dev_err(data->hdev, "Can't claim NULL iso interface");
+ return;
+ }
+
/*
* The function usb_driver_claim_interface() is documented to need
* locks held if it's not called from a probe routine. The code here
@@ -2735,17 +2747,30 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
- struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct btmtk_data *btmtk_data;
+
+ if (!hdev)
+ return;
+
+ btmtk_data = hci_get_priv(hdev);
+ if (!btmtk_data)
+ return;
if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
- dev_kfree_skb_irq(btmtk_data->isopkt_skb);
- btmtk_data->isopkt_skb = NULL;
- usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
- usb_driver_release_interface(&btusb_driver,
- btmtk_data->isopkt_intf);
+ if (btmtk_data->isopkt_skb) {
+ dev_kfree_skb_irq(btmtk_data->isopkt_skb);
+ btmtk_data->isopkt_skb = NULL;
+ }
+
+ if (btmtk_data->isopkt_intf) {
+ usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
+ usb_driver_release_interface(&btusb_driver,
+ btmtk_data->isopkt_intf);
+ btmtk_data->isopkt_intf = NULL;
+ }
}
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
index 70ce0ca0cb7d..0339c4af0fe5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
@@ -121,11 +121,11 @@ static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
&r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
- &r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
+ &r_apb0_clk.common.hw, 0x1dc, BIT(0), CLK_IS_CRITICAL);
static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
&r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
- &r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
+ &r_apb0_clk.common.hw, 0x22c, BIT(0), CLK_IS_CRITICAL);
static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
&r_ahb_clk.common,
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
index acb532f8361b..20dad06b37ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -300,7 +300,7 @@ static struct ccu_nm pll_audio0_4x_clk = {
.m = _SUNXI_CCU_DIV(16, 6),
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
0x178, BIT(31)),
- .min_rate = 180000000U,
+ .min_rate = 90000000U,
.max_rate = 3000000000U,
.common = {
.reg = 0x078,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 38897bb14a2c..492a10f1bdbf 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -603,9 +603,6 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- if (!cpu_feature_enabled(X86_FEATURE_IDA))
- return true;
-
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@@ -2106,7 +2103,8 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -2271,7 +2269,8 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
val |= (u64)1 << 32;
return val;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index a5b96adf2d1e..3b391a146635 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -3871,10 +3871,12 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
pdev = container_of(dev, struct pci_dev, dev);
if (pci_physfn(pdev) != qm->pdev) {
pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ put_device(dev);
return -EINVAL;
}
*fun_index = pdev->devfn;
+ put_device(dev);
return 0;
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index 215a1a8ba7e9..07a74f702c3a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -24,7 +24,8 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vstr[0] = '\0';
@@ -32,7 +33,8 @@ static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index b06fee1978ba..41b64d871c5a 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -3702,6 +3702,7 @@ static int cxl_region_debugfs_poison_inject(void *data, u64 offset)
if (validate_region_offset(cxlr, offset))
return -EINVAL;
+ offset -= cxlr->params.cache_size;
rc = region_offset_to_dpa_result(cxlr, offset, &result);
if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
dev_dbg(&cxlr->dev,
@@ -3734,6 +3735,7 @@ static int cxl_region_debugfs_poison_clear(void *data, u64 offset)
if (validate_region_offset(cxlr, offset))
return -EINVAL;
+ offset -= cxlr->params.cache_size;
rc = region_offset_to_dpa_result(cxlr, offset, &result);
if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
dev_dbg(&cxlr->dev,
diff --git a/drivers/dibs/dibs_main.c b/drivers/dibs/dibs_main.c
index dac14d843af7..b8c16586706c 100644
--- a/drivers/dibs/dibs_main.c
+++ b/drivers/dibs/dibs_main.c
@@ -6,8 +6,7 @@
*
* Copyright IBM Corp. 2025
*/
-#define KMSG_COMPONENT "dibs"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "dibs: " fmt
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 3c6d570babf8..36d11ff195df 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index 3da10cfe9a6e..7419679b6977 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_DPLL_GEN_H
#define _LINUX_DPLL_GEN_H
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
index 84e22aae57e5..bd324c7fe710 100644
--- a/drivers/dpll/zl3073x/Makefile
+++ b/drivers/dpll/zl3073x/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZL3073X) += zl3073x.o
-zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o prop.o
+zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o \
+ out.o prop.o ref.o synth.o
obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
zl3073x_i2c-objs := i2c.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
index e42e527813cf..383e2397dd03 100644
--- a/drivers/dpll/zl3073x/core.c
+++ b/drivers/dpll/zl3073x/core.c
@@ -129,47 +129,6 @@ const struct regmap_config zl3073x_regmap_config = {
};
EXPORT_SYMBOL_NS_GPL(zl3073x_regmap_config, "ZL3073X");
-/**
- * zl3073x_ref_freq_factorize - factorize given frequency
- * @freq: input frequency
- * @base: base frequency
- * @mult: multiplier
- *
- * Checks if the given frequency can be factorized using one of the
- * supported base frequencies. If so the base frequency and multiplier
- * are stored into appropriate parameters if they are not NULL.
- *
- * Return: 0 on success, -EINVAL if the frequency cannot be factorized
- */
-int
-zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
-{
- static const u16 base_freqs[] = {
- 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
- 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
- 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
- 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
- 32000, 40000, 50000, 62500,
- };
- u32 div;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
- div = freq / base_freqs[i];
-
- if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
- if (base)
- *base = base_freqs[i];
- if (mult)
- *mult = div;
-
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
static bool
zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
{
@@ -593,190 +552,6 @@ int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
return rc;
}
-/**
- * zl3073x_ref_state_fetch - get input reference state
- * @zldev: pointer to zl3073x_dev structure
- * @index: input reference index to fetch state for
- *
- * Function fetches information for the given input reference that are
- * invariant and stores them for later use.
- *
- * Return: 0 on success, <0 on error
- */
-static int
-zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
-{
- struct zl3073x_ref *input = &zldev->ref[index];
- u8 ref_config;
- int rc;
-
- /* If the input is differential then the configuration for N-pin
- * reference is ignored and P-pin config is used for both.
- */
- if (zl3073x_is_n_pin(index) &&
- zl3073x_ref_is_diff(zldev, index - 1)) {
- input->enabled = zl3073x_ref_is_enabled(zldev, index - 1);
- input->diff = true;
-
- return 0;
- }
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(index));
- if (rc)
- return rc;
-
- /* Read ref_config register */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref_config);
- if (rc)
- return rc;
-
- input->enabled = FIELD_GET(ZL_REF_CONFIG_ENABLE, ref_config);
- input->diff = FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref_config);
-
- dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
- str_enabled_disabled(input->enabled),
- input->diff ? "differential" : "single-ended");
-
- return rc;
-}
-
-/**
- * zl3073x_out_state_fetch - get output state
- * @zldev: pointer to zl3073x_dev structure
- * @index: output index to fetch state for
- *
- * Function fetches information for the given output (not output pin)
- * that are invariant and stores them for later use.
- *
- * Return: 0 on success, <0 on error
- */
-static int
-zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
-{
- struct zl3073x_out *out = &zldev->out[index];
- u8 output_ctrl, output_mode;
- int rc;
-
- /* Read output configuration */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &output_ctrl);
- if (rc)
- return rc;
-
- /* Store info about output enablement and synthesizer the output
- * is connected to.
- */
- out->enabled = FIELD_GET(ZL_OUTPUT_CTRL_EN, output_ctrl);
- out->synth = FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, output_ctrl);
-
- dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
- str_enabled_disabled(out->enabled), out->synth);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(index));
- if (rc)
- return rc;
-
- /* Read output_mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
- if (rc)
- return rc;
-
- /* Extract and store output signal format */
- out->signal_format = FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT,
- output_mode);
-
- dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
- out->signal_format);
-
- return rc;
-}
-
-/**
- * zl3073x_synth_state_fetch - get synth state
- * @zldev: pointer to zl3073x_dev structure
- * @index: synth index to fetch state for
- *
- * Function fetches information for the given synthesizer that are
- * invariant and stores them for later use.
- *
- * Return: 0 on success, <0 on error
- */
-static int
-zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
-{
- struct zl3073x_synth *synth = &zldev->synth[index];
- u16 base, m, n;
- u8 synth_ctrl;
- u32 mult;
- int rc;
-
- /* Read synth control register */
- rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth_ctrl);
- if (rc)
- return rc;
-
- /* Store info about synth enablement and DPLL channel the synth is
- * driven by.
- */
- synth->enabled = FIELD_GET(ZL_SYNTH_CTRL_EN, synth_ctrl);
- synth->dpll = FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth_ctrl);
-
- dev_dbg(zldev->dev, "SYNTH%u is %s and driven by DPLL%u\n", index,
- str_enabled_disabled(synth->enabled), synth->dpll);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read synth configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
- ZL_REG_SYNTH_MB_MASK, BIT(index));
- if (rc)
- return rc;
-
- /* The output frequency is determined by the following formula:
- * base * multiplier * numerator / denominator
- *
- * Read registers with these values
- */
- rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &base);
- if (rc)
- return rc;
-
- rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &mult);
- if (rc)
- return rc;
-
- rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &m);
- if (rc)
- return rc;
-
- rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &n);
- if (rc)
- return rc;
-
- /* Check denominator for zero to avoid div by 0 */
- if (!n) {
- dev_err(zldev->dev,
- "Zero divisor for SYNTH%u retrieved from device\n",
- index);
- return -EINVAL;
- }
-
- /* Compute and store synth frequency */
- zldev->synth[index].freq = div_u64(mul_u32_u32(base * m, mult), n);
-
- dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
- zldev->synth[index].freq);
-
- return rc;
-}
-
static int
zl3073x_dev_state_fetch(struct zl3073x_dev *zldev)
{
@@ -816,6 +591,21 @@ zl3073x_dev_state_fetch(struct zl3073x_dev *zldev)
return rc;
}
+static void
+zl3073x_dev_ref_status_update(struct zl3073x_dev *zldev)
+{
+ int i, rc;
+
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(i),
+ &zldev->ref[i].mon_status);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to get REF%u status: %pe\n", i,
+ ERR_PTR(rc));
+ }
+}
+
/**
* zl3073x_ref_phase_offsets_update - update reference phase offsets
* @zldev: pointer to zl3073x_dev structure
@@ -935,6 +725,9 @@ zl3073x_dev_periodic_work(struct kthread_work *work)
struct zl3073x_dpll *zldpll;
int rc;
+ /* Update input references status */
+ zl3073x_dev_ref_status_update(zldev);
+
/* Update DPLL-to-connected-ref phase offsets registers */
rc = zl3073x_ref_phase_offsets_update(zldev, -1);
if (rc)
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
index 1dca4ddcf235..09bca2d0926d 100644
--- a/drivers/dpll/zl3073x/core.h
+++ b/drivers/dpll/zl3073x/core.h
@@ -9,7 +9,10 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "out.h"
+#include "ref.h"
#include "regs.h"
+#include "synth.h"
struct device;
struct regmap;
@@ -28,42 +31,6 @@ struct zl3073x_dpll;
ZL3073X_NUM_OUTPUT_PINS)
/**
- * struct zl3073x_ref - input reference invariant info
- * @enabled: input reference is enabled or disabled
- * @diff: true if input reference is differential
- * @ffo: current fractional frequency offset
- */
-struct zl3073x_ref {
- bool enabled;
- bool diff;
- s64 ffo;
-};
-
-/**
- * struct zl3073x_out - output invariant info
- * @enabled: out is enabled or disabled
- * @synth: synthesizer the out is connected to
- * @signal_format: out signal format
- */
-struct zl3073x_out {
- bool enabled;
- u8 synth;
- u8 signal_format;
-};
-
-/**
- * struct zl3073x_synth - synthesizer invariant info
- * @freq: synthesizer frequency
- * @dpll: ID of DPLL the synthesizer is driven by
- * @enabled: synth is enabled or disabled
- */
-struct zl3073x_synth {
- u32 freq;
- u8 dpll;
- bool enabled;
-};
-
-/**
* struct zl3073x_dev - zl3073x device
* @dev: pointer to device
* @regmap: regmap to access device registers
@@ -175,7 +142,6 @@ int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
* Misc operations
*****************/
-int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel);
static inline bool
@@ -217,172 +183,141 @@ zl3073x_output_pin_out_get(u8 id)
}
/**
- * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * zl3073x_dev_ref_freq_get - get input reference frequency
* @zldev: pointer to zl3073x device
* @index: input reference index
*
- * Return: the latest measured fractional frequency offset
+ * Return: frequency of given input reference
*/
-static inline s64
-zl3073x_ref_ffo_get(struct zl3073x_dev *zldev, u8 index)
+static inline u32
+zl3073x_dev_ref_freq_get(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->ref[index].ffo;
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_freq_get(ref);
}
/**
- * zl3073x_ref_is_diff - check if the given input reference is differential
+ * zl3073x_dev_ref_is_diff - check if the given input reference is differential
* @zldev: pointer to zl3073x device
* @index: input reference index
*
* Return: true if reference is differential, false if reference is single-ended
*/
static inline bool
-zl3073x_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->ref[index].diff;
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_is_diff(ref);
}
-/**
- * zl3073x_ref_is_enabled - check if the given input reference is enabled
+/*
+ * zl3073x_dev_ref_is_status_ok - check the given input reference status
* @zldev: pointer to zl3073x device
* @index: input reference index
*
- * Return: true if input refernce is enabled, false otherwise
+ * Return: true if the status is ok, false otherwise
*/
static inline bool
-zl3073x_ref_is_enabled(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_ref_is_status_ok(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->ref[index].enabled;
-}
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
-/**
- * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
- * @zldev: pointer to zl3073x device
- * @index: synth index
- *
- * Return: ID of DPLL the given synthetizer is driven by
- */
-static inline u8
-zl3073x_synth_dpll_get(struct zl3073x_dev *zldev, u8 index)
-{
- return zldev->synth[index].dpll;
+ return zl3073x_ref_is_status_ok(ref);
}
/**
- * zl3073x_synth_freq_get - get synth current freq
+ * zl3073x_dev_synth_freq_get - get synth current freq
* @zldev: pointer to zl3073x device
* @index: synth index
*
* Return: frequency of given synthetizer
*/
static inline u32
-zl3073x_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->synth[index].freq;
-}
+ const struct zl3073x_synth *synth;
-/**
- * zl3073x_synth_is_enabled - check if the given synth is enabled
- * @zldev: pointer to zl3073x device
- * @index: synth index
- *
- * Return: true if synth is enabled, false otherwise
- */
-static inline bool
-zl3073x_synth_is_enabled(struct zl3073x_dev *zldev, u8 index)
-{
- return zldev->synth[index].enabled;
+ synth = zl3073x_synth_state_get(zldev, index);
+ return zl3073x_synth_freq_get(synth);
}
/**
- * zl3073x_out_synth_get - get synth connected to given output
+ * zl3073x_dev_out_synth_get - get synth connected to given output
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: index of synth connected to given output.
*/
static inline u8
-zl3073x_out_synth_get(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_out_synth_get(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->out[index].synth;
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+
+ return zl3073x_out_synth_get(out);
}
/**
- * zl3073x_out_is_enabled - check if the given output is enabled
+ * zl3073x_dev_out_is_enabled - check if the given output is enabled
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: true if the output is enabled, false otherwise
*/
static inline bool
-zl3073x_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
{
- u8 synth;
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+ const struct zl3073x_synth *synth;
+ u8 synth_id;
/* Output is enabled only if associated synth is enabled */
- synth = zl3073x_out_synth_get(zldev, index);
- if (zl3073x_synth_is_enabled(zldev, synth))
- return zldev->out[index].enabled;
+ synth_id = zl3073x_out_synth_get(out);
+ synth = zl3073x_synth_state_get(zldev, synth_id);
- return false;
+ return zl3073x_synth_is_enabled(synth) && zl3073x_out_is_enabled(out);
}
/**
- * zl3073x_out_signal_format_get - get output signal format
- * @zldev: pointer to zl3073x device
- * @index: output index
- *
- * Return: signal format of given output
- */
-static inline u8
-zl3073x_out_signal_format_get(struct zl3073x_dev *zldev, u8 index)
-{
- return zldev->out[index].signal_format;
-}
-
-/**
- * zl3073x_out_dpll_get - get DPLL ID the output is driven by
+ * zl3073x_dev_out_dpll_get - get DPLL ID the output is driven by
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: ID of DPLL the given output is driven by
*/
static inline
-u8 zl3073x_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
+u8 zl3073x_dev_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
{
- u8 synth;
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+ const struct zl3073x_synth *synth;
+ u8 synth_id;
/* Get synthesizer connected to given output */
- synth = zl3073x_out_synth_get(zldev, index);
+ synth_id = zl3073x_out_synth_get(out);
+ synth = zl3073x_synth_state_get(zldev, synth_id);
/* Return DPLL that drives the synth */
- return zl3073x_synth_dpll_get(zldev, synth);
+ return zl3073x_synth_dpll_get(synth);
}
/**
- * zl3073x_out_is_diff - check if the given output is differential
+ * zl3073x_dev_out_is_diff - check if the given output is differential
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: true if output is differential, false if output is single-ended
*/
static inline bool
-zl3073x_out_is_diff(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_out_is_diff(struct zl3073x_dev *zldev, u8 index)
{
- switch (zl3073x_out_signal_format_get(zldev, index)) {
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
- return true;
- default:
- break;
- }
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
- return false;
+ return zl3073x_out_is_diff(out);
}
/**
- * zl3073x_output_pin_is_enabled - check if the given output pin is enabled
+ * zl3073x_dev_output_pin_is_enabled - check if the given output pin is enabled
* @zldev: pointer to zl3073x device
* @id: output pin id
*
@@ -392,16 +327,21 @@ zl3073x_out_is_diff(struct zl3073x_dev *zldev, u8 index)
* Return: true if output pin is enabled, false if output pin is disabled
*/
static inline bool
-zl3073x_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
+zl3073x_dev_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
{
- u8 output = zl3073x_output_pin_out_get(id);
+ u8 out_id = zl3073x_output_pin_out_get(id);
+ const struct zl3073x_out *out;
+
+ out = zl3073x_out_state_get(zldev, out_id);
- /* Check if the whole output is enabled */
- if (!zl3073x_out_is_enabled(zldev, output))
+ /* Check if the output is enabled - call _dev_ helper that
+ * additionally checks for attached synth enablement.
+ */
+ if (!zl3073x_dev_out_is_enabled(zldev, out_id))
return false;
/* Check signal format */
- switch (zl3073x_out_signal_format_get(zldev, output)) {
+ switch (zl3073x_out_signal_format_get(out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED:
/* Both output pins are disabled by signal format */
return false;
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index d90150671d37..9879d85d29af 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -100,60 +100,6 @@ zl3073x_dpll_pin_direction_get(const struct dpll_pin *dpll_pin, void *pin_priv,
return 0;
}
-/**
- * zl3073x_dpll_input_ref_frequency_get - get input reference frequency
- * @zldpll: pointer to zl3073x_dpll
- * @ref_id: reference id
- * @frequency: pointer to variable to store frequency
- *
- * Reads frequency of given input reference.
- *
- * Return: 0 on success, <0 on error
- */
-static int
-zl3073x_dpll_input_ref_frequency_get(struct zl3073x_dpll *zldpll, u8 ref_id,
- u32 *frequency)
-{
- struct zl3073x_dev *zldev = zldpll->dev;
- u16 base, mult, num, denom;
- int rc;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref_id));
- if (rc)
- return rc;
-
- /* Read registers to compute resulting frequency */
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &base);
- if (rc)
- return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &mult);
- if (rc)
- return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &num);
- if (rc)
- return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &denom);
- if (rc)
- return rc;
-
- /* Sanity check that HW has not returned zero denominator */
- if (!denom) {
- dev_err(zldev->dev,
- "Zero divisor for ref %u frequency got from device\n",
- ref_id);
- return -EINVAL;
- }
-
- /* Compute the frequency */
- *frequency = mul_u64_u32_div(base * mult, num, denom);
-
- return rc;
-}
-
static int
zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
void *pin_priv,
@@ -165,39 +111,15 @@ zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 ref, ref_sync_ctrl, sync_mode;
- u32 esync_div, ref_freq;
- int rc;
-
- /* Get reference frequency */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, pin->id, &ref_freq);
- if (rc)
- return rc;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
+ const struct zl3073x_ref *ref;
+ u8 ref_id;
- /* Get ref sync mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
- if (rc)
- return rc;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
- /* Get esync divisor */
- rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &esync_div);
- if (rc)
- return rc;
-
- sync_mode = FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref_sync_ctrl);
-
- switch (sync_mode) {
+ switch (FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref->sync_ctrl)) {
case ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75:
- esync->freq = (esync_div == ZL_REF_ESYNC_DIV_1HZ) ? 1 : 0;
+ esync->freq = ref->esync_n_div == ZL_REF_ESYNC_DIV_1HZ ? 1 : 0;
esync->pulse = 25;
break;
default:
@@ -209,7 +131,7 @@ zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
/* If the pin supports esync control expose its range but only
* if the current reference frequency is > 1 Hz.
*/
- if (pin->esync_control && ref_freq > 1) {
+ if (pin->esync_control && zl3073x_ref_freq_get(ref) > 1) {
esync->range = esync_freq_ranges;
esync->range_num = ARRAY_SIZE(esync_freq_ranges);
} else {
@@ -217,7 +139,7 @@ zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
esync->range_num = 0;
}
- return rc;
+ return 0;
}
static int
@@ -230,22 +152,11 @@ zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 ref, ref_sync_ctrl, sync_mode;
- int rc;
+ struct zl3073x_ref ref;
+ u8 ref_id, sync_mode;
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration into mailbox */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
-
- /* Get ref sync mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
- if (rc)
- return rc;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
/* Use freq == 0 to disable esync */
if (!freq)
@@ -253,25 +164,16 @@ zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
else
sync_mode = ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75;
- ref_sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
- ref_sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
-
- /* Update ref sync control register */
- rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL, ref_sync_ctrl);
- if (rc)
- return rc;
+ ref.sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
+ ref.sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
if (freq) {
- /* 1 Hz is only supported frequnecy currently */
- rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
- ZL_REF_ESYNC_DIV_1HZ);
- if (rc)
- return rc;
+ /* 1 Hz is only supported frequency now */
+ ref.esync_n_div = ZL_REF_ESYNC_DIV_1HZ;
}
- /* Commit reference configuration */
- return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
- ZL_REG_REF_MB_MASK, BIT(ref));
+ /* Update reference configuration */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
}
static int
@@ -295,17 +197,12 @@ zl3073x_dpll_input_pin_frequency_get(const struct dpll_pin *dpll_pin,
{
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dpll_pin *pin = pin_priv;
- u32 ref_freq;
- u8 ref;
- int rc;
+ u8 ref_id;
- /* Read and return ref frequency */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref, &ref_freq);
- if (!rc)
- *frequency = ref_freq;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ *frequency = zl3073x_dev_ref_freq_get(zldpll->dev, ref_id);
- return rc;
+ return 0;
}
static int
@@ -318,39 +215,18 @@ zl3073x_dpll_input_pin_frequency_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u16 base, mult;
- u8 ref;
- int rc;
+ struct zl3073x_ref ref;
+ u8 ref_id;
- /* Get base frequency and multiplier for the requested frequency */
- rc = zl3073x_ref_freq_factorize(frequency, &base, &mult);
- if (rc)
- return rc;
+ /* Get reference state */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
- guard(mutex)(&zldev->multiop_lock);
+ /* Update frequency */
+ zl3073x_ref_freq_set(&ref, frequency);
- /* Load reference configuration */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
-
- /* Update base frequency, multiplier, numerator & denominator */
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE, base);
- if (rc)
- return rc;
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT, mult);
- if (rc)
- return rc;
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M, 1);
- if (rc)
- return rc;
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N, 1);
- if (rc)
- return rc;
-
- /* Commit reference configuration */
- return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
- ZL_REG_REF_MB_MASK, BIT(ref));
+ /* Commit reference state */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
}
/**
@@ -497,19 +373,10 @@ zl3073x_dpll_connected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
if (rc)
return rc;
- if (ZL3073X_DPLL_REF_IS_VALID(*ref)) {
- u8 ref_status;
-
- /* Read the reference monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(*ref),
- &ref_status);
- if (rc)
- return rc;
-
- /* If the monitor indicates an error nothing is connected */
- if (ref_status != ZL_REF_MON_STATUS_OK)
- *ref = ZL3073X_DPLL_REF_NONE;
- }
+ /* If the monitor indicates an error nothing is connected */
+ if (ZL3073X_DPLL_REF_IS_VALID(*ref) &&
+ !zl3073x_dev_ref_is_status_ok(zldev, *ref))
+ *ref = ZL3073X_DPLL_REF_NONE;
return 0;
}
@@ -524,34 +391,25 @@ zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 conn_ref, ref, ref_status;
+ const struct zl3073x_ref *ref;
+ u8 conn_id, ref_id;
s64 ref_phase;
int rc;
/* Get currently connected reference */
- rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_ref);
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_id);
if (rc)
return rc;
/* Report phase offset only for currently connected pin if the phase
- * monitor feature is disabled.
+ * monitor feature is disabled and only if the input pin signal is
+ * present.
*/
- ref = zl3073x_input_pin_ref_get(pin->id);
- if (!zldpll->phase_monitor && ref != conn_ref) {
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+ if ((!zldpll->phase_monitor && ref_id != conn_id) ||
+ !zl3073x_ref_is_status_ok(ref)) {
*phase_offset = 0;
-
- return 0;
- }
-
- /* Get this pin monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &ref_status);
- if (rc)
- return rc;
-
- /* Report phase offset only if the input pin signal is present */
- if (ref_status != ZL_REF_MON_STATUS_OK) {
- *phase_offset = 0;
-
return 0;
}
@@ -561,20 +419,12 @@ zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
* the phase offset is modded to the period of the signal
* the dpll is locked to.
*/
- if (ZL3073X_DPLL_REF_IS_VALID(conn_ref) && conn_ref != ref) {
+ if (ZL3073X_DPLL_REF_IS_VALID(conn_id) && conn_id != ref_id) {
u32 conn_freq, ref_freq;
- /* Get frequency of connected ref */
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, conn_ref,
- &conn_freq);
- if (rc)
- return rc;
-
- /* Get frequency of given ref */
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref,
- &ref_freq);
- if (rc)
- return rc;
+ /* Get frequency of connected and given ref */
+ conn_freq = zl3073x_dev_ref_freq_get(zldev, conn_id);
+ ref_freq = zl3073x_ref_freq_get(ref);
if (conn_freq > ref_freq) {
s64 conn_period, div_factor;
@@ -601,33 +451,23 @@ zl3073x_dpll_input_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_ref *ref;
s64 phase_comp;
- u8 ref;
- int rc;
-
- guard(mutex)(&zldev->multiop_lock);
+ u8 ref_id;
/* Read reference configuration */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
-
- /* Read current phase offset compensation */
- rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, &phase_comp);
- if (rc)
- return rc;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
/* Perform sign extension for 48bit signed value */
- phase_comp = sign_extend64(phase_comp, 47);
+ phase_comp = sign_extend64(ref->phase_comp, 47);
/* Reverse two's complement negation applied during set and convert
* to 32bit signed int
*/
*phase_adjust = (s32)-phase_comp;
- return rc;
+ return 0;
}
static int
@@ -641,32 +481,20 @@ zl3073x_dpll_input_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- s64 phase_comp;
- u8 ref;
- int rc;
+ struct zl3073x_ref ref;
+ u8 ref_id;
+
+ /* Read reference configuration */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
/* The value in the register is stored as two's complement negation
* of requested value.
*/
- phase_comp = -phase_adjust;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
-
- /* Write the requested value into the compensation register */
- rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, phase_comp);
- if (rc)
- return rc;
+ ref.phase_comp = -phase_adjust;
- /* Commit reference configuration */
- return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
- ZL_REG_REF_MB_MASK, BIT(ref));
+ /* Update reference configuration */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
}
/**
@@ -777,7 +605,7 @@ zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
{
struct zl3073x_dpll *zldpll = pin->dpll;
struct zl3073x_dev *zldev = zldpll->dev;
- u8 ref, ref_conn, status;
+ u8 ref, ref_conn;
int rc;
ref = zl3073x_input_pin_ref_get(pin->id);
@@ -797,20 +625,9 @@ zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
* pin as selectable.
*/
if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
- pin->selectable) {
- /* Read reference monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
- &status);
- if (rc)
- return rc;
-
- /* If the monitor indicates errors report the reference
- * as disconnected
- */
- if (status == ZL_REF_MON_STATUS_OK) {
- *state = DPLL_PIN_STATE_SELECTABLE;
- return 0;
- }
+ zl3073x_dev_ref_is_status_ok(zldev, ref) && pin->selectable) {
+ *state = DPLL_PIN_STATE_SELECTABLE;
+ return 0;
}
/* Otherwise report the pin as disconnected */
@@ -953,21 +770,19 @@ zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- struct device *dev = zldev->dev;
- u32 esync_period, esync_width;
- u8 clock_type, synth;
- u8 out, output_mode;
- u32 output_div;
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u8 clock_type, out_id;
u32 synth_freq;
- int rc;
- out = zl3073x_output_pin_out_get(pin->id);
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
/* If N-division is enabled, esync is not supported. The register used
* for N-division is also used for the esync divider so both cannot
* be used.
*/
- switch (zl3073x_out_signal_format_get(zldev, out)) {
+ switch (zl3073x_out_signal_format_get(out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
return -EOPNOTSUPP;
@@ -975,38 +790,11 @@ zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
break;
}
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Read output mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
- if (rc)
- return rc;
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ synth_freq = zl3073x_synth_freq_get(synth);
- /* Read output divisor */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
- if (rc)
- return rc;
-
- /* Check output divisor for zero */
- if (!output_div) {
- dev_err(dev, "Zero divisor for OUTPUT%u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Get synth attached to output pin */
- synth = zl3073x_out_synth_get(zldev, out);
-
- /* Get synth frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, output_mode);
+ clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, out->mode);
if (clock_type != ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC) {
/* No need to read esync data if it is not enabled */
esync->freq = 0;
@@ -1015,38 +803,21 @@ zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
goto finish;
}
- /* Read esync period */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &esync_period);
- if (rc)
- return rc;
-
- /* Check esync divisor for zero */
- if (!esync_period) {
- dev_err(dev, "Zero esync divisor for OUTPUT%u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Get esync pulse width in units of half synth cycles */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, &esync_width);
- if (rc)
- return rc;
-
/* Compute esync frequency */
- esync->freq = synth_freq / output_div / esync_period;
+ esync->freq = synth_freq / out->div / out->esync_n_period;
/* By comparing the esync_pulse_width to the half of the pulse width
* the esync pulse percentage can be determined.
* Note that half pulse width is in units of half synth cycles, which
* is why it reduces down to be output_div.
*/
- esync->pulse = (50 * esync_width) / output_div;
+ esync->pulse = (50 * out->esync_n_width) / out->div;
finish:
/* Set supported esync ranges if the pin supports esync control and
* if the output frequency is > 1 Hz.
*/
- if (pin->esync_control && (synth_freq / output_div) > 1) {
+ if (pin->esync_control && (synth_freq / out->div) > 1) {
esync->range = esync_freq_ranges;
esync->range_num = ARRAY_SIZE(esync_freq_ranges);
} else {
@@ -1064,21 +835,22 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
void *dpll_priv, u64 freq,
struct netlink_ext_ack *extack)
{
- u32 esync_period, esync_width, output_div;
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 clock_type, out, output_mode, synth;
+ const struct zl3073x_synth *synth;
+ struct zl3073x_out out;
+ u8 clock_type, out_id;
u32 synth_freq;
- int rc;
- out = zl3073x_output_pin_out_get(pin->id);
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
/* If N-division is enabled, esync is not supported. The register used
* for N-division is also used for the esync divider so both cannot
* be used.
*/
- switch (zl3073x_out_signal_format_get(zldev, out)) {
+ switch (zl3073x_out_signal_format_get(&out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
return -EOPNOTSUPP;
@@ -1086,19 +858,6 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
break;
}
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Read output mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
- if (rc)
- return rc;
-
/* Select clock type */
if (freq)
clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC;
@@ -1106,38 +865,19 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL;
/* Update clock type in output mode */
- output_mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
- output_mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
- rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, output_mode);
- if (rc)
- return rc;
+ out.mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
+ out.mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
/* If esync is being disabled just write mailbox and finish */
if (!freq)
goto write_mailbox;
- /* Get synth attached to output pin */
- synth = zl3073x_out_synth_get(zldev, out);
-
- /* Get synth frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
- if (rc)
- return rc;
-
- /* Check output divisor for zero */
- if (!output_div) {
- dev_err(zldev->dev,
- "Zero divisor for OUTPUT%u got from device\n", out);
- return -EINVAL;
- }
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(&out));
+ synth_freq = zl3073x_synth_freq_get(synth);
/* Compute and update esync period */
- esync_period = synth_freq / (u32)freq / output_div;
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, esync_period);
- if (rc)
- return rc;
+ out.esync_n_period = synth_freq / (u32)freq / out.div;
/* Half of the period in units of 1/2 synth cycle can be represented by
* the output_div. To get the supported esync pulse width of 25% of the
@@ -1145,15 +885,11 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
* assumes that output_div is even, otherwise some resolution will be
* lost.
*/
- esync_width = output_div / 2;
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, esync_width);
- if (rc)
- return rc;
+ out.esync_n_width = out.div / 2;
write_mailbox:
/* Commit output configuration */
- return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
static int
@@ -1166,83 +902,46 @@ zl3073x_dpll_output_pin_frequency_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- struct device *dev = zldev->dev;
- u8 out, signal_format, synth;
- u32 output_div, synth_freq;
- int rc;
-
- out = zl3073x_output_pin_out_get(pin->id);
- synth = zl3073x_out_synth_get(zldev, out);
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u32 synth_freq;
+ u8 out_id;
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
- if (rc)
- return rc;
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
- /* Check output divisor for zero */
- if (!output_div) {
- dev_err(dev, "Zero divisor for output %u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Read used signal format for the given output */
- signal_format = zl3073x_out_signal_format_get(zldev, out);
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ synth_freq = zl3073x_synth_freq_get(synth);
- switch (signal_format) {
+ switch (zl3073x_out_signal_format_get(out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
/* In case of divided format we have to distiguish between
* given output pin type.
+ *
+ * For P-pin the resulting frequency is computed as simple
+ * division of synth frequency and output divisor.
+ *
+ * For N-pin we have to divide additionally by divisor stored
+ * in esync_n_period output mailbox register that is used as
+ * N-pin divisor for these modes.
*/
- if (zl3073x_dpll_is_p_pin(pin)) {
- /* For P-pin the resulting frequency is computed as
- * simple division of synth frequency and output
- * divisor.
- */
- *frequency = synth_freq / output_div;
- } else {
- /* For N-pin we have to divide additionally by
- * divisor stored in esync_period output mailbox
- * register that is used as N-pin divisor for these
- * modes.
- */
- u32 ndiv;
-
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
- &ndiv);
- if (rc)
- return rc;
+ *frequency = synth_freq / out->div;
- /* Check N-pin divisor for zero */
- if (!ndiv) {
- dev_err(dev,
- "Zero N-pin divisor for output %u got from device\n",
- out);
- return -EINVAL;
- }
+ if (!zl3073x_dpll_is_p_pin(pin))
+ *frequency = (u32)*frequency / out->esync_n_period;
- /* Compute final divisor for N-pin */
- *frequency = synth_freq / output_div / ndiv;
- }
break;
default:
/* In other modes the resulting frequency is computed as
* division of synth frequency and output divisor.
*/
- *frequency = synth_freq / output_div;
+ *frequency = synth_freq / out->div;
break;
}
- return rc;
+ return 0;
}
static int
@@ -1255,28 +954,21 @@ zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- struct device *dev = zldev->dev;
- u32 output_n_freq, output_p_freq;
- u8 out, signal_format, synth;
- u32 cur_div, new_div, ndiv;
- u32 synth_freq;
- int rc;
+ const struct zl3073x_synth *synth;
+ u8 out_id, signal_format;
+ u32 new_div, synth_freq;
+ struct zl3073x_out out;
- out = zl3073x_output_pin_out_get(pin->id);
- synth = zl3073x_out_synth_get(zldev, out);
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
+
+ /* Get attached synth frequency and compute new divisor */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(&out));
+ synth_freq = zl3073x_synth_freq_get(synth);
new_div = synth_freq / (u32)frequency;
/* Get used signal format for the given output */
- signal_format = zl3073x_out_signal_format_get(zldev, out);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Load output configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
+ signal_format = zl3073x_out_signal_format_get(&out);
/* Check signal format */
if (signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV &&
@@ -1284,99 +976,50 @@ zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
/* For non N-divided signal formats the frequency is computed
* as division of synth frequency and output divisor.
*/
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
- if (rc)
- return rc;
+ out.div = new_div;
/* For 50/50 duty cycle the divisor is equal to width */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
- if (rc)
- return rc;
+ out.width = new_div;
/* Commit output configuration */
- return zl3073x_mb_op(zldev,
- ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- }
-
- /* For N-divided signal format get current divisor */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &cur_div);
- if (rc)
- return rc;
-
- /* Check output divisor for zero */
- if (!cur_div) {
- dev_err(dev, "Zero divisor for output %u got from device\n",
- out);
- return -EINVAL;
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
- /* Get N-pin divisor (shares the same register with esync */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &ndiv);
- if (rc)
- return rc;
-
- /* Check N-pin divisor for zero */
- if (!ndiv) {
- dev_err(dev,
- "Zero N-pin divisor for output %u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Compute current output frequency for P-pin */
- output_p_freq = synth_freq / cur_div;
-
- /* Compute current N-pin frequency */
- output_n_freq = output_p_freq / ndiv;
-
if (zl3073x_dpll_is_p_pin(pin)) {
/* We are going to change output frequency for P-pin but
* if the requested frequency is less than current N-pin
* frequency then indicate a failure as we are not able
* to compute N-pin divisor to keep its frequency unchanged.
+ *
+ * Update divisor for N-pin to keep N-pin frequency.
*/
- if (frequency <= output_n_freq)
+ out.esync_n_period = (out.esync_n_period * out.div) / new_div;
+ if (!out.esync_n_period)
return -EINVAL;
/* Update the output divisor */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
- if (rc)
- return rc;
+ out.div = new_div;
/* For 50/50 duty cycle the divisor is equal to width */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
- if (rc)
- return rc;
-
- /* Compute new divisor for N-pin */
- ndiv = (u32)frequency / output_n_freq;
+ out.width = out.div;
} else {
/* We are going to change frequency of N-pin but if
* the requested freq is greater or equal than freq of P-pin
* in the output pair we cannot compute divisor for the N-pin.
* In this case indicate a failure.
+ *
+ * Update divisor for N-pin
*/
- if (output_p_freq <= frequency)
+ out.esync_n_period = div64_u64(synth_freq, frequency * out.div);
+ if (!out.esync_n_period)
return -EINVAL;
-
- /* Compute new divisor for N-pin */
- ndiv = output_p_freq / (u32)frequency;
}
- /* Update divisor for the N-pin */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, ndiv);
- if (rc)
- return rc;
-
/* For 50/50 duty cycle the divisor is equal to width */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, ndiv);
- if (rc)
- return rc;
+ out.esync_n_width = out.esync_n_period;
/* Commit output configuration */
- return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
static int
@@ -1390,30 +1033,18 @@ zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- s32 phase_comp;
- u8 out;
- int rc;
-
- guard(mutex)(&zldev->multiop_lock);
+ const struct zl3073x_out *out;
+ u8 out_id;
- /* Read output configuration */
- out = zl3073x_output_pin_out_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Read current output phase compensation */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, &phase_comp);
- if (rc)
- return rc;
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
/* Convert value to ps and reverse two's complement negation applied
* during 'set'
*/
- *phase_adjust = -phase_comp * pin->phase_gran;
+ *phase_adjust = -out->phase_comp * pin->phase_gran;
- return rc;
+ return 0;
}
static int
@@ -1427,31 +1058,19 @@ zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 out;
- int rc;
+ struct zl3073x_out out;
+ u8 out_id;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
/* The value in the register is stored as two's complement negation
* of requested value and expressed in half synth clock cycles.
*/
- phase_adjust = -phase_adjust / pin->phase_gran;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration */
- out = zl3073x_output_pin_out_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Write the requested value into the compensation register */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, phase_adjust);
- if (rc)
- return rc;
+ out.phase_comp = -phase_adjust / pin->phase_gran;
/* Update output configuration from mailbox */
- return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
static int
@@ -1848,33 +1467,32 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
const char *name;
if (dir == DPLL_PIN_DIRECTION_INPUT) {
- u8 ref = zl3073x_input_pin_ref_get(index);
-
- name = "REF";
+ u8 ref_id = zl3073x_input_pin_ref_get(index);
+ const struct zl3073x_ref *ref;
/* Skip the pin if the DPLL is running in NCO mode */
if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_NCO)
return false;
- is_diff = zl3073x_ref_is_diff(zldev, ref);
- is_enabled = zl3073x_ref_is_enabled(zldev, ref);
+ name = "REF";
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+ is_diff = zl3073x_ref_is_diff(ref);
+ is_enabled = zl3073x_ref_is_enabled(ref);
} else {
/* Output P&N pair shares single HW output */
u8 out = zl3073x_output_pin_out_get(index);
- name = "OUT";
-
/* Skip the pin if it is connected to different DPLL channel */
- if (zl3073x_out_dpll_get(zldev, out) != zldpll->id) {
+ if (zl3073x_dev_out_dpll_get(zldev, out) != zldpll->id) {
dev_dbg(zldev->dev,
- "%s%u is driven by different DPLL\n", name,
- out);
+ "OUT%u is driven by different DPLL\n", out);
return false;
}
- is_diff = zl3073x_out_is_diff(zldev, out);
- is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
+ name = "OUT";
+ is_diff = zl3073x_dev_out_is_diff(zldev, out);
+ is_enabled = zl3073x_dev_output_pin_is_enabled(zldev, index);
}
/* Skip N-pin if the corresponding input/output is differential */
@@ -2031,42 +1649,26 @@ zl3073x_dpll_pin_phase_offset_check(struct zl3073x_dpll_pin *pin)
struct zl3073x_dev *zldev = zldpll->dev;
unsigned int reg;
s64 phase_offset;
- u8 ref;
+ u8 ref_id;
int rc;
- ref = zl3073x_input_pin_ref_get(pin->id);
+ /* No phase offset if the ref monitor reports signal errors */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ if (!zl3073x_dev_ref_is_status_ok(zldev, ref_id))
+ return false;
/* Select register to read phase offset value depending on pin and
* phase monitor state:
* 1) For connected pin use dpll_phase_err_data register
* 2) For other pins use appropriate ref_phase register if the phase
- * monitor feature is enabled and reference monitor does not
- * report signal errors for given input pin
+ * monitor feature is enabled.
*/
- if (pin->pin_state == DPLL_PIN_STATE_CONNECTED) {
+ if (pin->pin_state == DPLL_PIN_STATE_CONNECTED)
reg = ZL_REG_DPLL_PHASE_ERR_DATA(zldpll->id);
- } else if (zldpll->phase_monitor) {
- u8 status;
-
- /* Get reference monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
- &status);
- if (rc) {
- dev_err(zldev->dev,
- "Failed to read %s refmon status: %pe\n",
- pin->label, ERR_PTR(rc));
-
- return false;
- }
-
- if (status != ZL_REF_MON_STATUS_OK)
- return false;
-
- reg = ZL_REG_REF_PHASE(ref);
- } else {
- /* The pin is not connected or phase monitor disabled */
+ else if (zldpll->phase_monitor)
+ reg = ZL_REG_REF_PHASE(ref_id);
+ else
return false;
- }
/* Read measured phase offset value */
rc = zl3073x_read_u48(zldev, reg, &phase_offset);
@@ -2105,32 +1707,22 @@ zl3073x_dpll_pin_ffo_check(struct zl3073x_dpll_pin *pin)
{
struct zl3073x_dpll *zldpll = pin->dpll;
struct zl3073x_dev *zldev = zldpll->dev;
- u8 ref, status;
- s64 ffo;
- int rc;
+ const struct zl3073x_ref *ref;
+ u8 ref_id;
/* Get reference monitor status */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &status);
- if (rc) {
- dev_err(zldev->dev, "Failed to read %s refmon status: %pe\n",
- pin->label, ERR_PTR(rc));
-
- return false;
- }
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
/* Do not report ffo changes if the reference monitor report errors */
- if (status != ZL_REF_MON_STATUS_OK)
+ if (!zl3073x_ref_is_status_ok(ref))
return false;
- /* Get the latest measured ref's ffo */
- ffo = zl3073x_ref_ffo_get(zldev, ref);
-
/* Compare with previous value */
- if (pin->freq_offset != ffo) {
+ if (pin->freq_offset != ref->ffo) {
dev_dbg(zldev->dev, "%s freq offset changed: %lld -> %lld\n",
- pin->label, pin->freq_offset, ffo);
- pin->freq_offset = ffo;
+ pin->label, pin->freq_offset, ref->ffo);
+ pin->freq_offset = ref->ffo;
return true;
}
diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c
index def37fe8d9b0..55b638247f4b 100644
--- a/drivers/dpll/zl3073x/fw.c
+++ b/drivers/dpll/zl3073x/fw.c
@@ -352,12 +352,12 @@ struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
}
/**
- * zl3073x_flash_bundle_flash - Flash all components
+ * zl3073x_fw_component_flash - Flash all components
* @zldev: zl3073x device structure
- * @components: pointer to components array
+ * @comp: pointer to components array
* @extack: netlink extack pointer to report errors
*
- * Returns 0 in case of success or negative number otherwise.
+ * Return: 0 in case of success or negative number otherwise.
*/
static int
zl3073x_fw_component_flash(struct zl3073x_dev *zldev,
diff --git a/drivers/dpll/zl3073x/out.c b/drivers/dpll/zl3073x/out.c
new file mode 100644
index 000000000000..86829a0c1c02
--- /dev/null
+++ b/drivers/dpll/zl3073x/out.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "out.h"
+
+/**
+ * zl3073x_out_state_fetch - fetch output state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to fetch state for
+ *
+ * Function fetches state of the given output from hardware and stores it
+ * for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_out *out = &zldev->out[index];
+ int rc;
+
+ /* Read output configuration */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &out->ctrl);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
+ str_enabled_disabled(zl3073x_out_is_enabled(out)),
+ zl3073x_out_synth_get(out));
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &out->mode);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
+ zl3073x_out_signal_format_get(out));
+
+ /* Read output divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &out->div);
+ if (rc)
+ return rc;
+
+ if (!out->div) {
+ dev_err(zldev->dev, "Zero divisor for OUT%u got from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ dev_dbg(zldev->dev, "OUT%u divisor: %u\n", index, out->div);
+
+ /* Read output width */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_WIDTH, &out->width);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ &out->esync_n_period);
+ if (rc)
+ return rc;
+
+ if (!out->esync_n_period) {
+ dev_err(zldev->dev,
+ "Zero esync divisor for OUT%u got from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH,
+ &out->esync_n_width);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP,
+ &out->phase_comp);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * zl3073x_out_state_get - get current output state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to get state for
+ *
+ * Return: pointer to given output state
+ */
+const struct zl3073x_out *zl3073x_out_state_get(struct zl3073x_dev *zldev,
+ u8 index)
+{
+ return &zldev->out[index];
+}
+
+int zl3073x_out_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_out *out)
+{
+ struct zl3073x_out *dout = &zldev->out[index];
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Update mailbox with changed values */
+ if (dout->div != out->div)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, out->div);
+ if (!rc && dout->width != out->width)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, out->width);
+ if (!rc && dout->esync_n_period != out->esync_n_period)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ out->esync_n_period);
+ if (!rc && dout->esync_n_width != out->esync_n_width)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH,
+ out->esync_n_width);
+ if (!rc && dout->mode != out->mode)
+ rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, out->mode);
+ if (!rc && dout->phase_comp != out->phase_comp)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP,
+ out->phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* After successful commit store new state */
+ dout->div = out->div;
+ dout->width = out->width;
+ dout->esync_n_period = out->esync_n_period;
+ dout->esync_n_width = out->esync_n_width;
+ dout->mode = out->mode;
+ dout->phase_comp = out->phase_comp;
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/out.h b/drivers/dpll/zl3073x/out.h
new file mode 100644
index 000000000000..e8ea7a0e0f07
--- /dev/null
+++ b/drivers/dpll/zl3073x/out.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_OUT_H
+#define _ZL3073X_OUT_H
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_out - output state
+ * @div: output divisor
+ * @width: output pulse width
+ * @esync_n_period: embedded sync or n-pin period (for n-div formats)
+ * @esync_n_width: embedded sync or n-pin pulse width
+ * @phase_comp: phase compensation
+ * @ctrl: output control
+ * @mode: output mode
+ */
+struct zl3073x_out {
+ u32 div;
+ u32 width;
+ u32 esync_n_period;
+ u32 esync_n_width;
+ s32 phase_comp;
+ u8 ctrl;
+ u8 mode;
+};
+
+int zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index);
+const struct zl3073x_out *zl3073x_out_state_get(struct zl3073x_dev *zldev,
+ u8 index);
+
+int zl3073x_out_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_out *out);
+
+/**
+ * zl3073x_out_signal_format_get - get output signal format
+ * @out: pointer to out state
+ *
+ * Return: signal format of given output
+ */
+static inline u8 zl3073x_out_signal_format_get(const struct zl3073x_out *out)
+{
+ return FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT, out->mode);
+}
+
+/**
+ * zl3073x_out_is_diff - check if the given output is differential
+ * @out: pointer to out state
+ *
+ * Return: true if output is differential, false if output is single-ended
+ */
+static inline bool zl3073x_out_is_diff(const struct zl3073x_out *out)
+{
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_out_is_enabled - check if the given output is enabled
+ * @out: pointer to out state
+ *
+ * Return: true if output is enabled, false if output is disabled
+ */
+static inline bool zl3073x_out_is_enabled(const struct zl3073x_out *out)
+{
+ return !!FIELD_GET(ZL_OUTPUT_CTRL_EN, out->ctrl);
+}
+
+/**
+ * zl3073x_out_synth_get - get synth connected to given output
+ * @out: pointer to out state
+ *
+ * Return: index of synth connected to given output.
+ */
+static inline u8 zl3073x_out_synth_get(const struct zl3073x_out *out)
+{
+ return FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, out->ctrl);
+}
+
+#endif /* _ZL3073X_OUT_H */
diff --git a/drivers/dpll/zl3073x/prop.c b/drivers/dpll/zl3073x/prop.c
index 9e1fca5cdaf1..4ed153087570 100644
--- a/drivers/dpll/zl3073x/prop.c
+++ b/drivers/dpll/zl3073x/prop.c
@@ -46,10 +46,10 @@ zl3073x_pin_check_freq(struct zl3073x_dev *zldev, enum dpll_pin_direction dir,
/* Get output pin synthesizer */
out = zl3073x_output_pin_out_get(id);
- synth = zl3073x_out_synth_get(zldev, out);
+ synth = zl3073x_dev_out_synth_get(zldev, out);
/* Get synth frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
+ synth_freq = zl3073x_dev_synth_freq_get(zldev, synth);
/* Check the frequency divides synth frequency */
if (synth_freq % (u32)freq)
@@ -93,13 +93,13 @@ zl3073x_prop_pin_package_label_set(struct zl3073x_dev *zldev,
prefix = "REF";
ref = zl3073x_input_pin_ref_get(id);
- is_diff = zl3073x_ref_is_diff(zldev, ref);
+ is_diff = zl3073x_dev_ref_is_diff(zldev, ref);
} else {
u8 out;
prefix = "OUT";
out = zl3073x_output_pin_out_get(id);
- is_diff = zl3073x_out_is_diff(zldev, out);
+ is_diff = zl3073x_dev_out_is_diff(zldev, out);
}
if (!is_diff)
@@ -217,8 +217,8 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
* the synth frequency count.
*/
out = zl3073x_output_pin_out_get(index);
- synth = zl3073x_out_synth_get(zldev, out);
- f = 2 * zl3073x_synth_freq_get(zldev, synth);
+ synth = zl3073x_dev_out_synth_get(zldev, out);
+ f = 2 * zl3073x_dev_synth_freq_get(zldev, synth);
props->dpll_props.phase_gran = f ? div_u64(PSEC_PER_SEC, f) : 1;
}
diff --git a/drivers/dpll/zl3073x/ref.c b/drivers/dpll/zl3073x/ref.c
new file mode 100644
index 000000000000..aa2de13effa8
--- /dev/null
+++ b/drivers/dpll/zl3073x/ref.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "ref.h"
+
+/**
+ * zl3073x_ref_freq_factorize - factorize given frequency
+ * @freq: input frequency
+ * @base: base frequency
+ * @mult: multiplier
+ *
+ * Checks if the given frequency can be factorized using one of the
+ * supported base frequencies. If so the base frequency and multiplier
+ * are stored into appropriate parameters if they are not NULL.
+ *
+ * Return: 0 on success, -EINVAL if the frequency cannot be factorized
+ */
+int
+zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
+{
+ static const u16 base_freqs[] = {
+ 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
+ 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
+ 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
+ 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
+ 32000, 40000, 50000, 62500,
+ };
+ u32 div;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
+ div = freq / base_freqs[i];
+
+ if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
+ if (base)
+ *base = base_freqs[i];
+ if (mult)
+ *mult = div;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * zl3073x_ref_state_fetch - fetch input reference state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to fetch state for
+ *
+ * Function fetches state for the given input reference from hardware and
+ * stores it for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_ref *ref = &zldev->ref[index];
+ int rc;
+
+ /* For differential type inputs the N-pin reference shares
+ * part of the configuration with the P-pin counterpart.
+ */
+ if (zl3073x_is_n_pin(index) && zl3073x_ref_is_diff(ref - 1)) {
+ struct zl3073x_ref *p_ref = ref - 1; /* P-pin counterpart*/
+
+ /* Copy the shared items from the P-pin */
+ ref->config = p_ref->config;
+ ref->esync_n_div = p_ref->esync_n_div;
+ ref->freq_base = p_ref->freq_base;
+ ref->freq_mult = p_ref->freq_mult;
+ ref->freq_ratio_m = p_ref->freq_ratio_m;
+ ref->freq_ratio_n = p_ref->freq_ratio_n;
+ ref->phase_comp = p_ref->phase_comp;
+ ref->sync_ctrl = p_ref->sync_ctrl;
+
+ return 0; /* Finish - no non-shared items for now */
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read ref_config register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref->config);
+ if (rc)
+ return rc;
+
+ /* Read frequency related registers */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &ref->freq_base);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &ref->freq_mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &ref->freq_ratio_m);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &ref->freq_ratio_n);
+ if (rc)
+ return rc;
+
+ /* Read eSync and N-div rated registers */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &ref->esync_n_div);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref->sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Read phase compensation register */
+ rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP,
+ &ref->phase_comp);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
+ str_enabled_disabled(zl3073x_ref_is_enabled(ref)),
+ zl3073x_ref_is_diff(ref) ? "differential" : "single-ended");
+
+ return rc;
+}
+
+/**
+ * zl3073x_ref_state_get - get current input reference state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to get state for
+ *
+ * Return: pointer to given input reference state
+ */
+const struct zl3073x_ref *
+zl3073x_ref_state_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return &zldev->ref[index];
+}
+
+int zl3073x_ref_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_ref *ref)
+{
+ struct zl3073x_ref *dref = &zldev->ref[index];
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Update mailbox with changed values */
+ if (dref->freq_base != ref->freq_base)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE,
+ ref->freq_base);
+ if (!rc && dref->freq_mult != ref->freq_mult)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT,
+ ref->freq_mult);
+ if (!rc && dref->freq_ratio_m != ref->freq_ratio_m)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M,
+ ref->freq_ratio_m);
+ if (!rc && dref->freq_ratio_n != ref->freq_ratio_n)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N,
+ ref->freq_ratio_n);
+ if (!rc && dref->esync_n_div != ref->esync_n_div)
+ rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
+ ref->esync_n_div);
+ if (!rc && dref->sync_ctrl != ref->sync_ctrl)
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL,
+ ref->sync_ctrl);
+ if (!rc && dref->phase_comp != ref->phase_comp)
+ rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP,
+ ref->phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* After successful commit store new state */
+ dref->freq_base = ref->freq_base;
+ dref->freq_mult = ref->freq_mult;
+ dref->freq_ratio_m = ref->freq_ratio_m;
+ dref->freq_ratio_n = ref->freq_ratio_n;
+ dref->esync_n_div = ref->esync_n_div;
+ dref->sync_ctrl = ref->sync_ctrl;
+ dref->phase_comp = ref->phase_comp;
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/ref.h b/drivers/dpll/zl3073x/ref.h
new file mode 100644
index 000000000000..efc7f59cd9f9
--- /dev/null
+++ b/drivers/dpll/zl3073x/ref.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_REF_H
+#define _ZL3073X_REF_H
+
+#include <linux/bitfield.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_ref - input reference state
+ * @ffo: current fractional frequency offset
+ * @phase_comp: phase compensation
+ * @esync_n_div: divisor for embedded sync or n-divided signal formats
+ * @freq_base: frequency base
+ * @freq_mult: frequnecy multiplier
+ * @freq_ratio_m: FEC mode multiplier
+ * @freq_ratio_n: FEC mode divisor
+ * @config: reference config
+ * @sync_ctrl: reference sync control
+ * @mon_status: reference monitor status
+ */
+struct zl3073x_ref {
+ s64 ffo;
+ u64 phase_comp;
+ u32 esync_n_div;
+ u16 freq_base;
+ u16 freq_mult;
+ u16 freq_ratio_m;
+ u16 freq_ratio_n;
+ u8 config;
+ u8 sync_ctrl;
+ u8 mon_status;
+};
+
+int zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index);
+
+const struct zl3073x_ref *zl3073x_ref_state_get(struct zl3073x_dev *zldev,
+ u8 index);
+
+int zl3073x_ref_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_ref *ref);
+
+int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
+
+/**
+ * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * @ref: pointer to ref state
+ *
+ * Return: the latest measured fractional frequency offset
+ */
+static inline s64
+zl3073x_ref_ffo_get(const struct zl3073x_ref *ref)
+{
+ return ref->ffo;
+}
+
+/**
+ * zl3073x_ref_freq_get - get given input reference frequency
+ * @ref: pointer to ref state
+ *
+ * Return: frequency of the given input reference
+ */
+static inline u32
+zl3073x_ref_freq_get(const struct zl3073x_ref *ref)
+{
+ return mul_u64_u32_div(ref->freq_base * ref->freq_mult,
+ ref->freq_ratio_m, ref->freq_ratio_n);
+}
+
+/**
+ * zl3073x_ref_freq_set - set given input reference frequency
+ * @ref: pointer to ref state
+ * @freq: frequency to be set
+ *
+ * Return: 0 on success, <0 when frequency cannot be factorized
+ */
+static inline int
+zl3073x_ref_freq_set(struct zl3073x_ref *ref, u32 freq)
+{
+ u16 base, mult;
+ int rc;
+
+ rc = zl3073x_ref_freq_factorize(freq, &base, &mult);
+ if (rc)
+ return rc;
+
+ ref->freq_base = base;
+ ref->freq_mult = mult;
+
+ return 0;
+}
+
+/**
+ * zl3073x_ref_is_diff - check if the given input reference is differential
+ * @ref: pointer to ref state
+ *
+ * Return: true if reference is differential, false if reference is single-ended
+ */
+static inline bool
+zl3073x_ref_is_diff(const struct zl3073x_ref *ref)
+{
+ return !!FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref->config);
+}
+
+/**
+ * zl3073x_ref_is_enabled - check if the given input reference is enabled
+ * @ref: pointer to ref state
+ *
+ * Return: true if input refernce is enabled, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_enabled(const struct zl3073x_ref *ref)
+{
+ return !!FIELD_GET(ZL_REF_CONFIG_ENABLE, ref->config);
+}
+
+/**
+ * zl3073x_ref_is_status_ok - check the given input reference status
+ * @ref: pointer to ref state
+ *
+ * Return: true if the status is ok, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_status_ok(const struct zl3073x_ref *ref)
+{
+ return ref->mon_status == ZL_REF_MON_STATUS_OK;
+}
+
+#endif /* _ZL3073X_REF_H */
diff --git a/drivers/dpll/zl3073x/synth.c b/drivers/dpll/zl3073x/synth.c
new file mode 100644
index 000000000000..da839572dab2
--- /dev/null
+++ b/drivers/dpll/zl3073x/synth.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "synth.h"
+
+/**
+ * zl3073x_synth_state_fetch - fetch synth state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to fetch state for
+ *
+ * Function fetches state of the given synthesizer from the hardware and
+ * stores it for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_synth *synth = &zldev->synth[index];
+ int rc;
+
+ /* Read synth control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth->ctrl);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read synth configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
+ ZL_REG_SYNTH_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* The output frequency is determined by the following formula:
+ * base * multiplier * numerator / denominator
+ *
+ * Read registers with these values
+ */
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &synth->freq_base);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &synth->freq_mult);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &synth->freq_m);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &synth->freq_n);
+ if (rc)
+ return rc;
+
+ /* Check denominator for zero to avoid div by 0 */
+ if (!synth->freq_n) {
+ dev_err(zldev->dev,
+ "Zero divisor for SYNTH%u retrieved from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
+ zl3073x_synth_freq_get(synth));
+
+ return rc;
+}
+
+/**
+ * zl3073x_synth_state_get - get current synth state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to get state for
+ *
+ * Return: pointer to given synth state
+ */
+const struct zl3073x_synth *zl3073x_synth_state_get(struct zl3073x_dev *zldev,
+ u8 index)
+{
+ return &zldev->synth[index];
+}
diff --git a/drivers/dpll/zl3073x/synth.h b/drivers/dpll/zl3073x/synth.h
new file mode 100644
index 000000000000..6c55eb8a888c
--- /dev/null
+++ b/drivers/dpll/zl3073x/synth.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_SYNTH_H
+#define _ZL3073X_SYNTH_H
+
+#include <linux/bitfield.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_synth - synthesizer state
+ * @freq_mult: frequency multiplier
+ * @freq_base: frequency base
+ * @freq_m: frequency numerator
+ * @freq_n: frequency denominator
+ * @ctrl: synth control
+ */
+struct zl3073x_synth {
+ u32 freq_mult;
+ u16 freq_base;
+ u16 freq_m;
+ u16 freq_n;
+ u8 ctrl;
+};
+
+int zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 synth_id);
+
+const struct zl3073x_synth *zl3073x_synth_state_get(struct zl3073x_dev *zldev,
+ u8 synth_id);
+
+int zl3073x_synth_state_set(struct zl3073x_dev *zldev, u8 synth_id,
+ const struct zl3073x_synth *synth);
+
+/**
+ * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
+ * @synth: pointer to synth state
+ *
+ * Return: ID of DPLL the given synthetizer is driven by
+ */
+static inline u8 zl3073x_synth_dpll_get(const struct zl3073x_synth *synth)
+{
+ return FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth->ctrl);
+}
+
+/**
+ * zl3073x_synth_freq_get - get synth current freq
+ * @synth: pointer to synth state
+ *
+ * Return: frequency of given synthetizer
+ */
+static inline u32 zl3073x_synth_freq_get(const struct zl3073x_synth *synth)
+{
+ return mul_u64_u32_div(synth->freq_base * synth->freq_m,
+ synth->freq_mult, synth->freq_n);
+}
+
+/**
+ * zl3073x_synth_is_enabled - check if the given synth is enabled
+ * @synth: pointer to synth state
+ *
+ * Return: true if synth is enabled, false otherwise
+ */
+static inline bool zl3073x_synth_is_enabled(const struct zl3073x_synth *synth)
+{
+ return FIELD_GET(ZL_SYNTH_CTRL_EN, synth->ctrl);
+}
+
+#endif /* _ZL3073X_SYNTH_H */
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 103b2c2eba2a..0c5b94e64ea1 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1184,10 +1184,22 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
if (ret)
return ret;
- /* Verify OCRAM has been initialized */
+ /*
+ * Verify that OCRAM has been initialized.
+ * During a warm reset, OCRAM contents are retained, but the control
+ * and status registers are reset to their default values. Therefore,
+ * ECC must be explicitly re-enabled in the control register.
+ * Error condition: if INITCOMPLETEA is clear and ECC_EN is already set.
+ */
if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
- (base + ALTR_A10_ECC_INITSTAT_OFST)))
- return -ENODEV;
+ (base + ALTR_A10_ECC_INITSTAT_OFST))) {
+ if (!ecc_test_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST)))
+ ecc_set_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST));
+ else
+ return -ENODEV;
+ }
/* Enable IRQ on Single Bit Error */
writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
@@ -1357,7 +1369,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
@@ -1447,7 +1459,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_USB */
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
index 1ded4c3f0213..1a1092793092 100644
--- a/drivers/edac/versalnet_edac.c
+++ b/drivers/edac/versalnet_edac.c
@@ -605,21 +605,23 @@ static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
length = result[MSG_ERR_LENGTH];
offset = result[MSG_ERR_OFFSET];
+ /*
+ * The data can come in two stretches. Construct the regs from two
+ * messages. The offset indicates the offset from which the data is to
+ * be taken.
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+
if (result[TOTAL_ERR_LENGTH] > length) {
if (!mc_priv->part_len)
mc_priv->part_len = length;
else
mc_priv->part_len += length;
- /*
- * The data can come in 2 stretches. Construct the regs from 2
- * messages the offset indicates the offset from which the data is to
- * be taken
- */
- for (i = 0 ; i < length; i++) {
- k = offset + i;
- j = ERROR_DATA + i;
- mc_priv->regs[k] = result[j];
- }
+
if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
return 0;
mc_priv->part_len = 0;
@@ -705,7 +707,7 @@ static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
/* Convert to bytes */
length = result[TOTAL_ERR_LENGTH] * 4;
log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
- sec_sev, (void *)&result[ERROR_DATA], length);
+ sec_sev, (void *)&mc_priv->regs, length);
return 0;
}
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index e5e0174a0335..66e1106db5e7 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -577,6 +577,8 @@ void fw_card_initialize(struct fw_card *card,
INIT_LIST_HEAD(&card->transactions.list);
spin_lock_init(&card->transactions.lock);
+ spin_lock_init(&card->topology_map.lock);
+
card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 2f73bcd5696f..ed3ae8cdb0cd 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -441,12 +441,13 @@ static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_nod
const u32 *self_ids, int self_id_count)
{
__be32 *map = buffer;
+ u32 next_generation = be32_to_cpu(buffer[1]) + 1;
int node_count = (root_node_id & 0x3f) + 1;
memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
+ *map++ = cpu_to_be32(next_generation);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 175836467f21..d8d93059ac04 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2548,10 +2548,17 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
struct lineinfo_changed_ctx *ctx;
struct gpio_desc *desc = data;
+ struct file *fp;
if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
return NOTIFY_DONE;
+ /* Keep the file descriptor alive for the duration of the notification. */
+ fp = get_file_active(&cdev->fp);
+ if (!fp)
+ /* Chardev file descriptor was or is being released. */
+ return NOTIFY_DONE;
+
/*
* If this is called from atomic context (for instance: with a spinlock
* taken by the atomic notifier chain), any sleeping calls must be done
@@ -2575,8 +2582,6 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
/* Keep the GPIO device alive until we emit the event. */
ctx->gdev = gpio_device_get(desc->gdev);
ctx->cdev = cdev;
- /* Keep the file descriptor alive too. */
- get_file(ctx->cdev->fp);
INIT_WORK(&ctx->work, lineinfo_changed_func);
queue_work(ctx->gdev->line_state_wq, &ctx->work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f5d5c45ddc0d..afedea02188d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
- goto cleanup_entity;
+ goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2819aceaab74..076bbc09f30c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3414,10 +3414,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD, it's handled specially */
+ /* skip CG for VCE/UVD/VPE, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8561ad7f6180..ed3bef1edfe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -82,6 +82,18 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
+
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 9cddbf50442a..37270c4dab8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -280,6 +280,8 @@ int isp_kernel_buffer_alloc(struct device *dev, u64 size,
if (ret)
return ret;
+ /* Ensure *bo is NULL so a new BO will be created */
+ *bo = NULL;
ret = amdgpu_bo_create_kernel(adev,
size,
ISP_MC_ADDR_ALIGN,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index aa9ee5dffa45..9d568c16beb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1372,7 +1372,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching == ttm_cached)
+ if (ttm && ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index 761bad98da3e..4d0096d0baa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -151,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
+ unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
- spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
@@ -174,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
list_del(&userq_fence->link);
dma_fence_put(fence);
}
- spin_unlock(&fence_drv->fence_list_lock);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c1a801203949..b1aaef962ad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2078,7 +2078,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = before->bo_va->base.bo;
amdgpu_vm_it_insert(before, &vm->va);
- if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (before->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2093,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = after->bo_va->base.bo;
amdgpu_vm_it_insert(after, &vm->va);
- if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (after->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d61eb9f187c6..f2be16e700c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -5872,9 +5872,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (vmid)
+ if (vmid && !ring->adev->gfx.rs64_enable)
gfx_v11_0_ring_emit_de_meta(ring,
- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
+ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
}
amdgpu_ring_write(ring, header);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index baf097d2e1ac..ab0bf880d3d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -878,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index eacf4e93ba2f..cb7123ec1a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- if (amdgpu_dpm_reset_vcn_is_supported(adev))
+ if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 714350cabf2f..8bd457dea4cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 12):
- if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
+ if ((adev->psp.sos.fw_version >= 0x00450025) &&
+ amdgpu_dpm_reset_vcn_is_supported(adev) &&
+ !amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index a65c67cf56ff..f1e7583650c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
- if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
- pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d72411c3379..74a1d3e1d52b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3687,6 +3687,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
+ update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
+
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 91c0188a29b2..2a7a491a62e0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3859,6 +3859,97 @@ void amdgpu_dm_update_connector_after_detect(
update_subconnector_property(aconnector);
}
+static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
+{
+ if (!sink1 || !sink2)
+ return false;
+ if (sink1->sink_signal != sink2->sink_signal)
+ return false;
+
+ if (sink1->dc_edid.length != sink2->dc_edid.length)
+ return false;
+
+ if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
+ sink1->dc_edid.length) != 0)
+ return false;
+ return true;
+}
+
+
+/**
+ * DOC: hdmi_hpd_debounce_work
+ *
+ * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
+ * (such as during power save transitions), this delay determines how long to
+ * wait before processing the HPD event. This allows distinguishing between a
+ * physical unplug (>hdmi_hpd_debounce_delay)
+ * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
+ *
+ * If the toggle is less than this delay, the driver compares sink capabilities
+ * and permits a hotplug event if they changed.
+ *
+ * The default value of 1500ms was chosen based on experimental testing with
+ * various monitors that exhibit spontaneous HPD toggling behavior.
+ */
+static void hdmi_hpd_debounce_work(struct work_struct *work)
+{
+ struct amdgpu_dm_connector *aconnector =
+ container_of(to_delayed_work(work), struct amdgpu_dm_connector,
+ hdmi_hpd_debounce_work);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
+ bool fake_reconnect = false;
+ bool reallow_idle = false;
+ bool ret = false;
+ guard(mutex)(&aconnector->hpd_lock);
+
+ /* Re-detect the display */
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reallow_idle = true;
+ }
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
+
+ if (ret) {
+ /* Apply workaround delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ /* Compare sinks to determine if this was a spontaneous HPD toggle */
+ if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
+ /*
+ * Sinks match - this was a spontaneous HDMI HPD toggle.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
+ fake_reconnect = true;
+ }
+
+ /* Update connector state */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ /* Only notify OS if sink actually changed */
+ if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+ /* Release the cached sink reference */
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (reallow_idle && dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, true);
+ }
+}
+
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
@@ -3868,6 +3959,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
+ bool debounce_required = false;
if (adev->dm.disable_hpd_irq)
return;
@@ -3890,6 +3982,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
+ /*
+ * Check for HDMI disconnect with debounce enabled.
+ */
+ debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
+ dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
+ new_connection_type == dc_connection_none &&
+ aconnector->dc_link->local_sink != NULL);
+
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3899,7 +3999,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else if (debounce_required) {
+ /*
+ * HDMI disconnect detected - schedule delayed work instead of
+ * processing immediately. This allows us to coalesce spurious
+ * HDMI signals from physical unplugs.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
+ aconnector->hdmi_hpd_debounce_delay_ms);
+
+ /* Cache the current sink for later comparison */
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_retain(aconnector->hdmi_prev_sink);
+
+ /* Schedule delayed detection. */
+ if (mod_delayed_work(system_wq,
+ &aconnector->hdmi_hpd_debounce_work,
+ msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
+ drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
+
} else {
+
+ /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
+ if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
+ return;
+
scoped_guard(mutex, &adev->dm.dc_lock) {
dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
@@ -7388,6 +7515,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+ /* Cancel and flush any pending HDMI HPD debounce work */
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
if (aconnector->bl_idx != -1) {
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
dm->backlight_dev[aconnector->bl_idx] = NULL;
@@ -8549,6 +8683,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
+ aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+
/*
* configure support HPD hot plug connector_>polled default value is 0
* which means HPD hot plug not supported
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index db75e991ac7b..8ca738957598 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,6 +59,7 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -819,6 +820,11 @@ struct amdgpu_dm_connector {
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
+
+ /* HDMI HPD debounce support */
+ unsigned int hdmi_hpd_debounce_delay_ms;
+ struct delayed_work hdmi_hpd_debounce_work;
+ struct dc_sink *hdmi_prev_sink;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5e92eaa67aa3..dbd1da4d85d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+ uint64_t effective_kbps = (uint64_t)kbps;
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+ effective_kbps *= 1006;
+ effective_kbps = div_u64(effective_kbps, 1000);
+ }
- return fec_overhead_multiplier_x1000;
+ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
}
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
{
- u64 peak_kbps = kbps;
+ uint64_t pbn_effective = (uint64_t)pbn;
+
+ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+ pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+ else
+ pbn_effective *= 1000;
- peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
}
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
@@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ kbps = pbn_to_kbps(pbn, false);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
@@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
int var_pbn;
for (i = 0; i < count; i++) {
@@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
var_pbn = vars[next_index].pbn;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1763,18 +1762,6 @@ clean_exit:
return ret;
}
-static uint32_t kbps_from_pbn(unsigned int pbn)
-{
- uint64_t kbps = (uint64_t)pbn;
-
- kbps *= (1000000 / PEAK_FACTOR_X1000);
- kbps *= 8;
- kbps *= 54;
- kbps /= 64;
-
- return (uint32_t)kbps;
-}
-
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
@@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link));
cur_link_settings = stream->link->verified_link_cap;
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
/* pick the end to end bw bottleneck */
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
@@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) {
- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
} else {
/* For topology LCT 1 case - only one mstb*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b11383fba35f..1eb04772f5da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
+ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
+ new_clocks->ref_dtbclk_khz = 0;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
- if (actual_dtbclk) {
+ if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index de6d62401362..c899c09ea31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -1411,7 +1411,7 @@ static void dccg35_set_dtbclk_dto(
__func__, params->otg_inst, params->pixclk_khz,
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
- } else {
+ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 9477c9f9e196..59c42db5382e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
+
+ /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
+ if (!power_on) {
+ struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
+ dpp5->funcs->dpp_force_disable_cursor(dpp5);
+ }
+
break;
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index b12c11bd6a14..eb262ce42e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1691,7 +1691,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t read_dpcd_retry_cnt = 20;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -1734,12 +1734,13 @@ static bool retrieve_link_cap(struct dc_link *link)
}
dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ /*
+ * Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
status = core_link_read_dpcd(
link,
DP_DPCD_REV,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ce421bcddcb0..1aae46d703ba 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -1260,6 +1260,17 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
+
+ /*
+ * If VRR is inactive, set vtotal min and max to nominal vtotal
+ */
+ if (in_out_vrr->state == VRR_STATE_INACTIVE) {
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(stream,
+ in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min;
+ return;
+ }
}
unsigned long long mod_freesync_calc_nominal_field_rate(
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index 72480db1f00d..515aceac22b1 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -13,8 +13,8 @@
static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
MODULE_PARM_DESC(active,
- "Choose which drm client to start, default is"
- CONFIG_DRM_CLIENT_DEFAULT "]");
+ "Choose which drm client to start, default is "
+ CONFIG_DRM_CLIENT_DEFAULT);
/**
* drm_client_setup() - Setup in-kernel DRM clients
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 38f82391bfda..a30493ed9715 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
formats_size = sizeof(__u32) * plane->format_count;
if (WARN_ON(!formats_size)) {
/* 0 formats are never expected */
- return 0;
+ return ERR_PTR(-EINVAL);
}
modifiers_size =
@@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return NULL;
+ return blob;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 801235a5bc0a..a2d2cecf7121 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -39,14 +39,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- /* PTL doesn't have a PHY connected to PORT B; as such,
- * there will never be a case where PTL uses PHY B.
- * WCL uses PORT A and B with the C10 PHY.
- * Reusing the condition for WCL and extending it for PORT B
- * should not cause any issues for PTL.
- */
- if (display->platform.pantherlake && phy < PHY_C)
- return true;
+ if (display->platform.pantherlake) {
+ if (display->platform.pantherlake_wildcatlake)
+ return phy <= PHY_B;
+ else
+ return phy == PHY_A;
+ }
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index a002bc6ce7b0..f3f1f25b0f38 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
PLATFORM_GROUP(dgfx),
};
+static const u16 wcl_ids[] = {
+ INTEL_WCL_IDS(ID),
+ 0
+};
+
static const struct platform_desc ptl_desc = {
PLATFORM(pantherlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(pantherlake, wildcatlake),
+ .pciidlist = wcl_ids,
+ },
+ {},
+ }
};
__diag_pop();
@@ -1482,6 +1494,7 @@ static const struct {
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
};
static const struct {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index f329f1beafef..a910642d589c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -101,7 +101,9 @@ struct pci_dev;
/* Display ver 14.1 (based on GMD ID) */ \
func(battlemage) \
/* Display ver 30 (based on GMD ID) */ \
- func(pantherlake)
+ func(pantherlake) \
+ func(pantherlake_wildcatlake)
+
#define __MEMBER(name) unsigned long name:1;
#define __COUNT(x) 1 +
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 4a4cace1f879..e1455fd7277f 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
+MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
+
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
@@ -183,9 +186,10 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
{
const char *fw_path = NULL;
u32 max_fw_size = 0;
-
- if (DISPLAY_VERx100(display) == 3002 ||
- DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3002) {
+ fw_path = XE3LPD_3002_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 10eb93a34cf2..4619237f1346 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -585,6 +585,10 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
int ret;
+ /* TODO: Enable Panel Replay on MST once it's properly implemented. */
+ if (intel_dp->mst_detect == DRM_DP_MST)
+ return;
+
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
&intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
if (ret < 0)
@@ -888,7 +892,8 @@ static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
u32 current_dc_state = intel_display_power_get_current_dc_state(display);
- struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
current_dc_state != DC_STATE_EN_UPTO_DC6) ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
index cac6d64ab67d..4e8b3f1c7e25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
@@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
nvkm_memory_unref(&fw->inst);
nvkm_falcon_fw_dtor_sigs(fw);
nvkm_firmware_dtor(&fw->fw);
+ kfree(fw->boot);
+ fw->boot = NULL;
}
static const struct nvkm_firmware_func
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 156c7a0b62a2..3f43686f0195 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -288,6 +288,23 @@ panthor_gem_create_with_handle(struct drm_file *file,
panthor_gem_debugfs_set_usage_flags(bo, 0);
+ /* If this is a write-combine mapping, we query the sgt to force a CPU
+ * cache flush (dma_map_sgtable() is called when the sgt is created).
+ * This ensures the zero-ing is visible to any uncached mapping created
+ * by vmap/mmap.
+ * FIXME: Ideally this should be done when pages are allocated, not at
+ * BO creation time.
+ */
+ if (shmem->map_wc) {
+ struct sg_table *sgt;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto out_put_gem;
+ }
+ }
+
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -296,6 +313,7 @@ panthor_gem_create_with_handle(struct drm_file *file,
if (!ret)
*size = bo->base.base.size;
+out_put_gem:
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5b5b54e876d4..167d6f122b8e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
return true;
- if (down_read_trylock(&rdev->exclusive_lock)) {
- radeon_fence_process(rdev, ring);
- up_read(&rdev->exclusive_lock);
-
- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
- return true;
- }
return false;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 59d5c1ba145a..6c84bd69b11f 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -3148,6 +3148,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
dc->client.parent = &parent->client;
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
+ put_device(companion);
}
return 0;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index b5089b772267..ddfb2858acbf 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -913,15 +913,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
u32 value;
int err;
- /* If the bootloader enabled DSI it needs to be disabled
- * in order for the panel initialization commands to be
- * properly sent.
- */
- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
-
- if (value & DSI_POWER_CONTROL_ENABLE)
- tegra_dsi_disable(dsi);
-
err = tegra_dsi_prepare(dsi);
if (err < 0) {
dev_err(dsi->dev, "failed to prepare: %d\n", err);
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index 5adab6b22916..d0b6a1fa6efa 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
if (err)
goto put_channel;
- if (supported)
+ if (supported) {
+ struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
context->memory_context = host1x_memory_context_alloc(
- host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
+ host, client->base.dev, pid);
+ put_pid(pid);
+ }
if (IS_ERR(context->memory_context)) {
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
index 718832b08d96..c46f17ba7236 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -100,8 +100,10 @@ vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
if (vmw->has_mob) {
if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
return VMW_CURSOR_UPDATE_MOB;
+ else
+ return VMW_CURSOR_UPDATE_GB_ONLY;
}
-
+ drm_warn_once(&vmw->drm, "Unknown Cursor Type!\n");
return VMW_CURSOR_UPDATE_NONE;
}
@@ -139,6 +141,7 @@ static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
{
switch (update_type) {
case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_NONE:
return 0;
case VMW_CURSOR_UPDATE_MOB:
@@ -623,6 +626,7 @@ int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
if (!surface || vps->cursor.legacy.id == surface->snooper.id)
vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
break;
+ case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_MOB: {
bo = vmw_user_object_buffer(&vps->uo);
if (bo) {
@@ -737,6 +741,7 @@ void
vmw_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vmw_bo *bo;
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_state =
@@ -762,6 +767,15 @@ vmw_cursor_plane_atomic_update(struct drm_plane *plane,
case VMW_CURSOR_UPDATE_MOB:
vmw_cursor_update_mob(dev_priv, vps);
break;
+ case VMW_CURSOR_UPDATE_GB_ONLY:
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo)
+ vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual,
+ vps->base.crtc_w,
+ vps->base.crtc_h,
+ vps->base.hotspot_x,
+ vps->base.hotspot_y);
+ break;
case VMW_CURSOR_UPDATE_NONE:
/* do nothing */
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
index 40694925a70e..0c2cc0699b0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -33,6 +33,7 @@ static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
enum vmw_cursor_update_type {
VMW_CURSOR_UPDATE_NONE = 0,
VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_GB_ONLY,
VMW_CURSOR_UPDATE_MOB,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d539f25b5fbe..3057f8baa7d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3668,6 +3668,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id = header->id;
+ if (header->size > SVGA_CMD_MAX_DATASIZE) {
+ VMW_DEBUG_USER("SVGA3D command: %d is too big.\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -E2BIG;
+ }
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 7de20e56082c..fd4e76486f2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -32,22 +32,22 @@ enum vmw_bo_dirty_method {
/**
* struct vmw_bo_dirty - Dirty information for buffer objects
+ * @ref_count: Reference count for this structure. Must be first member!
* @start: First currently dirty bit
* @end: Last currently dirty bit + 1
* @method: The currently used dirty method
* @change_count: Number of consecutive method change triggers
- * @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
struct vmw_bo_dirty {
+ struct kref ref_count;
unsigned long start;
unsigned long end;
enum vmw_bo_dirty_method method;
unsigned int change_count;
- unsigned int ref_count;
unsigned long bitmap_size;
unsigned long bitmap[];
};
@@ -221,7 +221,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
int ret;
if (dirty) {
- dirty->ref_count++;
+ kref_get(&dirty->ref_count);
return 0;
}
@@ -235,7 +235,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
- dirty->ref_count = 1;
+ kref_init(&dirty->ref_count);
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
@@ -274,10 +274,8 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- if (dirty && --dirty->ref_count == 0) {
- kvfree(dirty);
+ if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
vbo->dirty = NULL;
- }
}
/**
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 7219f6b884b6..4b288eb3f5b0 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -13,7 +13,6 @@ config DRM_XE
select TMPFS
select DRM_BUDDY
select DRM_CLIENT_SELECTION
- select DRM_EXEC
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 51f2a03847f9..f680c8b8f258 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -168,6 +168,7 @@
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define FAST_CLEAR_VALIGN_FIX REG_BIT(13)
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 0e502feaca81..6bb278167aaf 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt,
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
- KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n");
}
for (i = 0; i < info->num_mocs_regs; i++) {
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 870edaf69388..06976cc77918 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -847,22 +847,6 @@ static int xe_irq_msix_init(struct xe_device *xe)
return 0;
}
-static irqreturn_t guc2host_irq_handler(int irq, void *arg)
-{
- struct xe_device *xe = arg;
- struct xe_tile *tile;
- u8 id;
-
- if (!atomic_read(&xe->irq.enabled))
- return IRQ_NONE;
-
- for_each_tile(tile, xe, id)
- xe_guc_irq_handler(&tile->primary_gt->uc.guc,
- GUC_INTR_GUC2HOST);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
{
unsigned int tile_id, gt_id;
@@ -979,7 +963,7 @@ int xe_irq_msix_request_irqs(struct xe_device *xe)
u16 msix;
msix = GUC2HOST_MSIX;
- err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
DRIVER_NAME "-guc2host", false, &msix);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 9a6df79fc5b6..89cc6d32f041 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -375,6 +375,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ccb09ef4ec9e..cdd1dc540a59 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3369,8 +3369,10 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, prefetch_region &&
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
- !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
+ XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
+ /* Guard against undefined shift in BIT(prefetch_region) */
+ (prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
+ !(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_UNMAP) ||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index cd03891654a1..3cf30718b200 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -679,6 +679,8 @@ static const struct xe_rtp_entry_sr engine_was[] = {
},
{ XE_RTP_NAME("14023061436"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute), OR,
+ GRAPHICS_VERSION_RANGE(3003, 3005),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
@@ -916,6 +918,15 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
+ { XE_RTP_NAME("14024681466"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX))
+ },
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 0a9b44ce4904..b0bab2a1ddcc 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -194,6 +194,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
if (rc)
goto cleanup;
+ mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ amd_sfh_wait_for_response(privdata, cl_data->sensor_idx[i], DISABLE_SENSOR);
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
mp2_ops->start(privdata, info);
status = amd_sfh_wait_for_response
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 61404d7a43ee..57da4f86a9fa 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -355,6 +355,7 @@ static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "SONiX USB DEVICE" },
+ { "SONiX AK870 PRO" },
{ "Keychron" },
{ "AONE" },
{ "GANSS" },
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index fee134a7eba3..5e9a5b8f7f16 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -553,9 +553,8 @@ static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
if (IS_ERR(new_supply)) {
hid_err(drvdata->hid_dev,
- "failed to register battery '%s' (reason: %ld)\n",
- drvdata->battery_desc.name,
- PTR_ERR(new_supply));
+ "failed to register battery '%s' (reason: %pe)\n",
+ drvdata->battery_desc.name, new_supply);
return;
}
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 69771fd35006..981d1b6e9658 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -75,7 +75,8 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 20, 28, 22, 14, 8);
break;
- case USB_DEVICE_ID_ELECOM_M_XT3URBK:
+ case USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB:
+ case USB_DEVICE_ID_ELECOM_M_XT3URBK_018F:
case USB_DEVICE_ID_ELECOM_M_XT3DRBK:
case USB_DEVICE_ID_ELECOM_M_XT4DRBK:
/*
@@ -119,7 +120,8 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
diff --git a/drivers/hid/hid-haptic.c b/drivers/hid/hid-haptic.c
index aa090684c1f2..fc8a9997f815 100644
--- a/drivers/hid/hid-haptic.c
+++ b/drivers/hid/hid-haptic.c
@@ -86,7 +86,7 @@ int hid_haptic_input_configured(struct hid_device *hdev,
if (hi->application == HID_DG_TOUCHPAD) {
if (haptic->auto_trigger_report &&
haptic->manual_trigger_report) {
- __set_bit(INPUT_PROP_HAPTIC_TOUCHPAD, hi->input->propbit);
+ __set_bit(INPUT_PROP_PRESSUREPAD, hi->input->propbit);
return 1;
}
return 0;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0723b4b1c9ec..c4589075a5ed 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -449,7 +449,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_M_XGL20DLBK 0x00e6
-#define USB_DEVICE_ID_ELECOM_M_XT3URBK 0x00fb
+#define USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB 0x00fb
+#define USB_DEVICE_ID_ELECOM_M_XT3URBK_018F 0x018f
#define USB_DEVICE_ID_ELECOM_M_XT3DRBK 0x00fc
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
@@ -718,6 +719,7 @@
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
+#define I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD 0x8987
#define USB_DEVICE_ID_ITE8595 0x8595
#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
@@ -1543,7 +1545,7 @@
#define USB_VENDOR_ID_SIGNOTEC 0x2133
#define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018
-#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
-#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
+#define USB_VENDOR_ID_JIELI_SDK_DEFAULT 0x4c4a
+#define USB_DEVICE_ID_JIELI_SDK_4155 0x4155
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e56e7de53279..2bbb645c2ff4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -399,10 +399,11 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
HID_BATTERY_QUIRK_AVOID_QUERY },
/*
- * Elan I2C-HID touchscreens seem to all report a non present battery,
- * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices.
+ * Elan HID touchscreens seem to all report a non present battery,
+ * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices.
*/
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 654879814f97..9cc3e029e9f6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -148,6 +148,14 @@ static const __u8 lenovo_tpIIbtkbd_need_fixup_collection[] = {
0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
};
+static const __u8 lenovo_yoga7x_kbd_need_fixup_collection[] = {
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x65, // Logical Maximum (101)
+ 0x05, 0x07, // Usage Page (Keyboard)
+ 0x19, 0x00, // Usage Minimum (0)
+ 0x29, 0xDD, // Usage Maximum (221)
+};
+
static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -177,6 +185,13 @@ static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[260] = 0x01; /* report count (2) = 0x01 */
}
break;
+ case I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD:
+ if (*rsize == 176 &&
+ memcmp(&rdesc[52], lenovo_yoga7x_kbd_need_fixup_collection,
+ sizeof(lenovo_yoga7x_kbd_need_fixup_collection)) == 0) {
+ rdesc[55] = rdesc[61]; // logical maximum = usage maximum
+ }
+ break;
}
return rdesc;
}
@@ -1538,6 +1553,8 @@ static const struct hid_device_id lenovo_devices[] = {
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) },
+ { HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD) },
{ }
};
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 0f76e241e0af..a7f10c45f62b 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -142,13 +142,13 @@ static void ntrig_report_version(struct hid_device *hdev)
int ret;
char buf[20];
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
- unsigned char *data = kmalloc(8, GFP_KERNEL);
+ unsigned char *data __free(kfree) = kmalloc(8, GFP_KERNEL);
if (!hid_is_usb(hdev))
return;
if (!data)
- goto err_free;
+ return;
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_CLEAR_FEATURE,
@@ -163,9 +163,6 @@ static void ntrig_report_version(struct hid_device *hdev)
hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n",
buf, data[2], data[3], data[4], data[5]);
}
-
-err_free:
- kfree(data);
}
static ssize_t show_phys_width(struct device *dev,
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index 63f6eb9030d1..128aa6abd10b 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -1942,6 +1942,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
"Failed to retrieve DualShock4 calibration info: %d\n",
ret);
ret = -EILSEQ;
+ kfree(buf);
goto transfer_failed;
} else {
break;
@@ -1959,6 +1960,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
if (ret) {
hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ kfree(buf);
goto transfer_failed;
}
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index bcd4bccf1a7c..c89a015686c0 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -410,7 +410,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_ELECOM)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
@@ -915,7 +916,6 @@ static const struct hid_device_id hid_ignore_list[] = {
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
{ }
};
@@ -1064,6 +1064,18 @@ bool hid_ignore(struct hid_device *hdev)
strlen(elan_acpi_id[i].id)))
return true;
break;
+ case USB_VENDOR_ID_JIELI_SDK_DEFAULT:
+ /*
+ * Multiple USB devices with identical IDs (mic & touchscreen).
+ * The touch screen requires hid core processing, but the
+ * microphone does not. They can be distinguished by manufacturer
+ * and serial number.
+ */
+ if (hdev->product == USB_DEVICE_ID_JIELI_SDK_4155 &&
+ strncmp(hdev->name, "SmartlinkTechnology", 19) == 0 &&
+ strncmp(hdev->uniq, "20201111000001", 14) == 0)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index ffa14a4621ef..4c4bac6f792b 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -1369,8 +1369,10 @@ static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev,
event_hook->hdev = hdev;
event_hook->size = ARRAY_SIZE(reconnect_event);
event_hook->event = kmemdup(reconnect_event, event_hook->size, GFP_KERNEL);
- if (!event_hook->event)
+ if (!event_hook->event) {
+ kfree(event_hook);
return -ENOMEM;
+ }
list_add_tail(&event_hook->list, &p->event_hooks->list);
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index edd61ef50e16..95377c5f6335 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -806,8 +806,8 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
static int pidff_needs_playback(struct pidff_device *pidff, int effect_id, int n)
{
- return pidff->effect[effect_id].is_infinite ||
- pidff->effect[effect_id].loop_count != n;
+ return !pidff->effect[effect_id].is_infinite ||
+ pidff->effect[effect_id].loop_count != n;
}
/*
diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c
index 321794807e8d..f81c3bc422f4 100644
--- a/drivers/hwmon/gpd-fan.c
+++ b/drivers/hwmon/gpd-fan.c
@@ -12,9 +12,9 @@
* Copyright (c) 2024 Cryolitia PukNgae
*/
-#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/hwmon.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -276,31 +276,6 @@ static int gpd_generic_read_rpm(void)
return (u16)high << 8 | low;
}
-static void gpd_win4_init_ec(void)
-{
- u8 chip_id, chip_ver;
-
- gpd_ecram_read(0x2000, &chip_id);
-
- if (chip_id == 0x55) {
- gpd_ecram_read(0x1060, &chip_ver);
- gpd_ecram_write(0x1060, chip_ver | 0x80);
- }
-}
-
-static int gpd_win4_read_rpm(void)
-{
- int ret;
-
- ret = gpd_generic_read_rpm();
-
- if (ret == 0)
- // Re-init EC when speed is 0
- gpd_win4_init_ec();
-
- return ret;
-}
-
static int gpd_wm2_read_rpm(void)
{
for (u16 pwm_ctr_offset = GPD_PWM_CTR_OFFSET;
@@ -320,11 +295,10 @@ static int gpd_wm2_read_rpm(void)
static int gpd_read_rpm(void)
{
switch (gpd_driver_priv.drvdata->board) {
+ case win4_6800u:
case win_mini:
case duo:
return gpd_generic_read_rpm();
- case win4_6800u:
- return gpd_win4_read_rpm();
case win_max_2:
return gpd_wm2_read_rpm();
}
@@ -607,6 +581,28 @@ static struct hwmon_chip_info gpd_fan_chip_info = {
.info = gpd_fan_hwmon_channel_info
};
+static void gpd_win4_init_ec(void)
+{
+ u8 chip_id, chip_ver;
+
+ gpd_ecram_read(0x2000, &chip_id);
+
+ if (chip_id == 0x55) {
+ gpd_ecram_read(0x1060, &chip_ver);
+ gpd_ecram_write(0x1060, chip_ver | 0x80);
+ }
+}
+
+static void gpd_init_ec(void)
+{
+ // The buggy firmware won't initialize EC properly on boot.
+ // Before its initialization, reading RPM will always return 0,
+ // and writing PWM will have no effect.
+ // Initialize it manually on driver load.
+ if (gpd_driver_priv.drvdata->board == win4_6800u)
+ gpd_win4_init_ec();
+}
+
static int gpd_fan_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -634,6 +630,8 @@ static int gpd_fan_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(hwdev),
"Failed to register hwmon device\n");
+ gpd_init_ec();
+
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fc1e86f6c409..90daa58126f4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -842,7 +842,7 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
break;
case MLX5_VPORT_ACCESS_METHOD_NIC:
- err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
+ err = mlx5_query_nic_vport_node_guid(dev->mdev, 0, false, &tmp);
break;
default:
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index f7209c8ebbcc..1c6b0461dc35 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -261,6 +261,12 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_KEY_MATRIX:
pm_wakeup_event(ckdev->dev, 0);
+ if (!ckdev->idev) {
+ dev_warn_once(ckdev->dev,
+ "Unexpected key matrix event\n");
+ return NOTIFY_OK;
+ }
+
if (ckdev->ec->event_size != ckdev->cols) {
dev_err(ckdev->dev,
"Discarded incomplete key matrix event.\n");
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
index d18839f1f4f6..b620cd310cdb 100644
--- a/drivers/input/keyboard/imx_sc_key.c
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -158,7 +158,7 @@ static int imx_sc_key_probe(struct platform_device *pdev)
return error;
}
- error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
+ error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, priv);
if (error)
return error;
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 8d6b71d59793..eabb4a0b8a0d 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -63,6 +63,9 @@
#define BUTTON_PRESSED 0xb5
#define COMMAND_VERSION 0xa9
+/* 1 Status + 1 Color + 2 X + 2 Y = 6 bytes */
+#define NOTETAKER_PACKET_SIZE 6
+
/* in xy data packet */
#define BATTERY_NO_REPORT 0x40
#define BATTERY_LOW 0x41
@@ -311,6 +314,12 @@ static int pegasus_probe(struct usb_interface *intf,
}
pegasus->data_len = usb_maxpacket(dev, pipe);
+ if (pegasus->data_len < NOTETAKER_PACKET_SIZE) {
+ dev_err(&intf->dev, "packet size is too small (%d)\n",
+ pegasus->data_len);
+ error = -EINVAL;
+ goto err_free_mem;
+ }
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
&pegasus->data_dma);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 252dcae039f8..f8798d11ec03 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -796,17 +796,6 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
usleep_range(6000, 10000); /* T4: > 5ms */
- /*
- * Put the reset pin back in to input / high-impedance mode to save
- * power. Only do this in the non ACPI case since some ACPI boards
- * don't have a pull-up, so there the reset pin must stay active-high.
- */
- if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
- error = gpiod_direction_input(ts->gpiod_rst);
- if (error)
- goto error;
- }
-
return 0;
error:
@@ -957,14 +946,6 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
return -EINVAL;
}
- /*
- * Normally we put the reset pin in input / high-impedance mode to save
- * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
- * case, leave the pin as is. This results in the pin not being touched
- * at all on x86/ACPI boards, except when needed for error-recover.
- */
- ts->gpiod_rst_flags = GPIOD_ASIS;
-
return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
}
#else
@@ -989,12 +970,6 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
- /*
- * By default we request the reset pin as input, leaving it in
- * high-impedance when not resetting the controller to save power.
- */
- ts->gpiod_rst_flags = GPIOD_IN;
-
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
if (IS_ERR(ts->avdd28))
return dev_err_probe(dev, PTR_ERR(ts->avdd28), "Failed to get AVDD28 regulator\n");
@@ -1019,7 +994,7 @@ retry_get_irq_gpio:
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
- gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
+ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_ASIS);
if (IS_ERR(gpiod))
return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
GOODIX_GPIO_RST_NAME);
@@ -1557,6 +1532,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ "GDIX1002", 0 },
+ { "GDIX1003", 0 },
{ "GDX9110", 0 },
{ }
};
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index 87797cc88b32..0d1e8a8d2cba 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -88,7 +88,6 @@ struct goodix_ts_data {
struct gpio_desc *gpiod_rst;
int gpio_count;
int gpio_int_idx;
- enum gpiod_flags gpiod_rst_flags;
char id[GOODIX_ID_MAX_LEN + 1];
char cfg_name[64];
u16 version;
diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c
index 6f1010da221c..21d4a35538f6 100644
--- a/drivers/iommu/iommufd/driver.c
+++ b/drivers/iommu/iommufd/driver.c
@@ -161,8 +161,8 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
vevent = &veventq->lost_events_header;
goto out_set_header;
}
- memcpy(vevent->event_data, event_data, data_len);
vevent->data_len = data_len;
+ memcpy(vevent->event_data, event_data, data_len);
veventq->num_events++;
out_set_header:
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 627f9b78483a..85d0843ed07b 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -614,7 +614,6 @@ struct iommufd_veventq {
struct iommufd_eventq common;
struct iommufd_viommu *viommu;
struct list_head node; /* for iommufd_viommu::veventqs */
- struct iommufd_vevent lost_events_header;
enum iommu_veventq_type type;
unsigned int depth;
@@ -622,6 +621,9 @@ struct iommufd_veventq {
/* Use common.lock for protection */
u32 num_events;
u32 sequence;
+
+ /* Must be last as it ends in a flexible-array member. */
+ struct iommufd_vevent lost_events_header;
};
static inline struct iommufd_veventq *
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index e5805885394e..70290b35b317 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -166,7 +166,8 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
static const struct irq_domain_ops riscv_intc_domain_ops = {
.map = riscv_intc_domain_map,
.xlate = irq_domain_xlate_onecell,
- .alloc = riscv_intc_domain_alloc
+ .alloc = riscv_intc_domain_alloc,
+ .free = irq_domain_free_irqs_top,
};
static struct fwnode_handle *riscv_intc_hwnode(void)
diff --git a/drivers/md/dm-pcache/Makefile b/drivers/md/dm-pcache/Makefile
index 86776e4acad2..cedfd38854f6 100644
--- a/drivers/md/dm-pcache/Makefile
+++ b/drivers/md/dm-pcache/Makefile
@@ -1,3 +1,3 @@
dm-pcache-y := dm_pcache.o cache_dev.o segment.o backing_dev.o cache.o cache_gc.o cache_writeback.o cache_segment.o cache_key.o cache_req.o
-obj-m += dm-pcache.o
+obj-$(CONFIG_DM_PCACHE) += dm-pcache.o
diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c
index d8e92367d947..698697a7a73c 100644
--- a/drivers/md/dm-pcache/cache.c
+++ b/drivers/md/dm-pcache/cache.c
@@ -181,7 +181,7 @@ static void cache_info_init_default(struct pcache_cache *cache)
{
struct pcache_cache_info *cache_info = &cache->cache_info;
- cache_info->header.seq = 0;
+ memset(cache_info, 0, sizeof(*cache_info));
cache_info->n_segs = cache->cache_dev->seg_num;
cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
}
@@ -411,7 +411,7 @@ void pcache_cache_stop(struct dm_pcache *pcache)
{
struct pcache_cache *cache = &pcache->cache;
- cache_flush(cache);
+ pcache_cache_flush(cache);
cancel_delayed_work_sync(&cache->gc_work);
flush_work(&cache->clean_work);
diff --git a/drivers/md/dm-pcache/cache.h b/drivers/md/dm-pcache/cache.h
index 1136d86958c8..27613b56be54 100644
--- a/drivers/md/dm-pcache/cache.h
+++ b/drivers/md/dm-pcache/cache.h
@@ -339,7 +339,7 @@ void cache_seg_put(struct pcache_cache_segment *cache_seg);
void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
/* cache request*/
-int cache_flush(struct pcache_cache *cache);
+int pcache_cache_flush(struct pcache_cache *cache);
void miss_read_end_work_fn(struct work_struct *work);
int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
diff --git a/drivers/md/dm-pcache/cache_req.c b/drivers/md/dm-pcache/cache_req.c
index 27f94c1fa968..7854a30e07b7 100644
--- a/drivers/md/dm-pcache/cache_req.c
+++ b/drivers/md/dm-pcache/cache_req.c
@@ -790,7 +790,7 @@ err:
}
/**
- * cache_flush - Flush all ksets to persist any pending cache data
+ * pcache_cache_flush - Flush all ksets to persist any pending cache data
* @cache: Pointer to the cache structure
*
* This function iterates through all ksets associated with the provided `cache`
@@ -802,7 +802,7 @@ err:
* the respective error code, preventing the flush operation from proceeding to
* subsequent ksets.
*/
-int cache_flush(struct pcache_cache *cache)
+int pcache_cache_flush(struct pcache_cache *cache)
{
struct pcache_cache_kset *kset;
int ret;
@@ -827,7 +827,7 @@ int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *p
struct bio *bio = pcache_req->bio;
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
- return cache_flush(cache);
+ return pcache_cache_flush(cache);
if (bio_data_dir(bio) == READ)
return cache_read(cache, pcache_req);
diff --git a/drivers/md/dm-pcache/pcache_internal.h b/drivers/md/dm-pcache/pcache_internal.h
index d427e534727c..b7a3319d2bd3 100644
--- a/drivers/md/dm-pcache/pcache_internal.h
+++ b/drivers/md/dm-pcache/pcache_internal.h
@@ -99,7 +99,7 @@ static inline void __must_check *pcache_meta_find_latest(struct pcache_meta_head
/* Update latest if a more recent sequence is found */
if (!latest || pcache_meta_seq_after(meta->seq, seq_latest)) {
seq_latest = meta->seq;
- latest = (void *)header + (i * meta_max_size);
+ latest = meta_addr;
}
}
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index d382a390d39a..72047b47a7a0 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -320,11 +320,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT);
- if (unlikely(!fio->bufs[n])) {
- DMERR("failed to allocate FEC buffer");
- return -ENOMEM;
- }
+ fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOIO);
}
/* try to allocate the maximum number of buffers */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f5e5e59b232b..6c83ab940af7 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2005,7 +2005,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
* linear target or multiple linear targets pointing to the same
* device), we can send the flush with data directly to it.
*/
- if (map->flush_bypasses_map) {
+ if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
struct list_head *devices = dm_table_get_devices(map);
if (devices->next == devices->prev)
goto send_preflush_with_data;
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index cfa61dd88557..3c2949c16fde 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -1015,7 +1015,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = TEGRA210_MC_SESRD,
+ .id = TEGRA210_MC_SESWR,
.name = "seswr",
.swgroup = TEGRA_SWGROUP_SE,
.regs = {
@@ -1079,7 +1079,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = TEGRA210_MC_ETRR,
+ .id = TEGRA210_MC_ETRW,
.name = "etrw",
.swgroup = TEGRA_SWGROUP_ETR,
.regs = {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2c963cb6724b..10d0ef58ef49 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -950,7 +950,7 @@ config MMC_USHC
config MMC_WMT
tristate "Wondermedia SD/MMC Host Controller support"
depends on ARCH_VT8500 || COMPILE_TEST
- default y
+ default ARCH_VT8500
help
This selects support for the SD/MMC Host Controller on
Wondermedia WM8505/WM8650 based SoCs.
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 82dd906bb002..681354942e97 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -42,7 +42,7 @@ struct dw_mci_rockchip_priv_data {
*/
static int rockchip_mmc_get_internal_phase(struct dw_mci *host, bool sample)
{
- unsigned long rate = clk_get_rate(host->ciu_clk);
+ unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
u32 raw_value;
u16 degrees;
u32 delay_num = 0;
@@ -85,7 +85,7 @@ static int rockchip_mmc_get_phase(struct dw_mci *host, bool sample)
static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int degrees)
{
- unsigned long rate = clk_get_rate(host->ciu_clk);
+ unsigned long rate = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
u8 nineties, remainder;
u8 delay_num;
u32 raw_value;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 26d03352af63..b5ea058ed467 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -652,10 +652,9 @@ static int pxamci_probe(struct platform_device *pdev)
host->clkrt = CLKRT_OFF;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk)) {
- host->clk = NULL;
- return PTR_ERR(host->clk);
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(dev, PTR_ERR(host->clk),
+ "Failed to acquire clock\n");
host->clkrate = clk_get_rate(host->clk);
@@ -703,46 +702,37 @@ static int pxamci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(host->dma_chan_rx)) {
- host->dma_chan_rx = NULL;
+ host->dma_chan_rx = devm_dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx))
return dev_err_probe(dev, PTR_ERR(host->dma_chan_rx),
"unable to request rx dma channel\n");
- }
- host->dma_chan_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(host->dma_chan_tx)) {
- dev_err(dev, "unable to request tx dma channel\n");
- ret = PTR_ERR(host->dma_chan_tx);
- host->dma_chan_tx = NULL;
- goto out;
- }
+
+ host->dma_chan_tx = devm_dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx))
+ return dev_err_probe(dev, PTR_ERR(host->dma_chan_tx),
+ "unable to request tx dma channel\n");
if (host->pdata) {
host->detect_delay_ms = host->pdata->detect_delay_ms;
host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
- if (IS_ERR(host->power)) {
- ret = PTR_ERR(host->power);
- dev_err(dev, "Failed requesting gpio_power\n");
- goto out;
- }
+ if (IS_ERR(host->power))
+ return dev_err_probe(dev, PTR_ERR(host->power),
+ "Failed requesting gpio_power\n");
/* FIXME: should we pass detection delay to debounce? */
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
- if (ret && ret != -ENOENT) {
- dev_err(dev, "Failed requesting gpio_cd\n");
- goto out;
- }
+ if (ret && ret != -ENOENT)
+ return dev_err_probe(dev, ret, "Failed requesting gpio_cd\n");
if (!host->pdata->gpio_card_ro_invert)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
- if (ret && ret != -ENOENT) {
- dev_err(dev, "Failed requesting gpio_ro\n");
- goto out;
- }
+ if (ret && ret != -ENOENT)
+ return dev_err_probe(dev, ret, "Failed requesting gpio_ro\n");
+
if (!ret)
host->use_ro_gpio = true;
@@ -759,16 +749,8 @@ static int pxamci_probe(struct platform_device *pdev)
if (ret) {
if (host->pdata && host->pdata->exit)
host->pdata->exit(dev, mmc);
- goto out;
}
- return 0;
-
-out:
- if (host->dma_chan_rx)
- dma_release_channel(host->dma_chan_rx);
- if (host->dma_chan_tx)
- dma_release_channel(host->dma_chan_tx);
return ret;
}
@@ -791,8 +773,6 @@ static void pxamci_remove(struct platform_device *pdev)
dmaengine_terminate_all(host->dma_chan_rx);
dmaengine_terminate_all(host->dma_chan_tx);
- dma_release_channel(host->dma_chan_rx);
- dma_release_channel(host->dma_chan_tx);
}
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index eebd45389956..5b61401a7f3d 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -94,7 +94,7 @@
#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x4
#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
#define DLL_STRBIN_DELAY_NUM_OFFSET 16
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 8dc4f5c493fc..335c702633ff 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -599,6 +599,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
int ret = 0;
+ u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -618,7 +619,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
- if (req.start + req.len > mtd->size)
+ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
@@ -698,6 +699,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
size_t datbuf_len, oobbuf_len;
size_t orig_len, orig_ooblen;
int ret = 0;
+ u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -724,7 +726,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
- if (req.start + req.len > mtd->size) {
+ if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4a17271076bc..1e57c8de8578 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -63,7 +63,7 @@ config MTD_NAND_ECC_MEDIATEK
config MTD_NAND_ECC_REALTEK
tristate "Realtek RTL93xx hardware ECC engine"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
depends on MACH_REALTEK_RTL || COMPILE_TEST
select MTD_NAND_ECC
help
diff --git a/drivers/mtd/nand/ecc-realtek.c b/drivers/mtd/nand/ecc-realtek.c
index 7d718934c909..0046da37ea3e 100644
--- a/drivers/mtd/nand/ecc-realtek.c
+++ b/drivers/mtd/nand/ecc-realtek.c
@@ -380,7 +380,7 @@ static void rtl_ecc_cleanup_ctx(struct nand_device *nand)
nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
}
-static struct nand_ecc_engine_ops rtl_ecc_engine_ops = {
+static const struct nand_ecc_engine_ops rtl_ecc_engine_ops = {
.init_ctx = rtl_ecc_init_ctx,
.cleanup_ctx = rtl_ecc_cleanup_ctx,
.prepare_io_req = rtl_ecc_prepare_io_req,
@@ -418,8 +418,8 @@ static int rtl_ecc_probe(struct platform_device *pdev)
rtlc->buf = dma_alloc_noncoherent(dev, RTL_ECC_DMA_SIZE, &rtlc->buf_dma,
DMA_BIDIRECTIONAL, GFP_KERNEL);
- if (IS_ERR(rtlc->buf))
- return PTR_ERR(rtlc->buf);
+ if (!rtlc->buf)
+ return -ENOMEM;
rtlc->dev = dev;
rtlc->engine.dev = dev;
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index f37a6138e461..6d6aa709a21f 100644
--- a/drivers/mtd/nand/onenand/onenand_samsung.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -906,7 +906,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, r->start,
s5pc110_onenand_irq,
IRQF_SHARED, "onenand",
- &onenand);
+ onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
return err;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 6667eea95597..32ed38b89394 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2871,7 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
- struct dma_device *dma_dev = cdns_ctrl->dmac->device;
+ struct dma_device *dma_dev;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2915,6 +2915,7 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
}
}
+ dma_dev = cdns_ctrl->dmac->device;
cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
cdns_ctrl->io.size,
DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/mtd/nand/spi/fmsh.c b/drivers/mtd/nand/spi/fmsh.c
index 8b2097bfc771..c2b9a8c113cb 100644
--- a/drivers/mtd/nand/spi/fmsh.c
+++ b/drivers/mtd/nand/spi/fmsh.c
@@ -58,7 +58,7 @@ static const struct spinand_info fmsh_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
+ 0,
SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 49717b7b82a2..1a8de2bf8655 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -76,6 +76,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
AD_LINK_SPEED_800000MBPS,
+ AD_LINK_SPEED_1600000MBPS,
};
/* compare MAC addresses */
@@ -300,6 +301,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
* %AD_LINK_SPEED_800000MBPS
+ * %AD_LINK_SPEED_1600000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -379,6 +381,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_800000MBPS;
break;
+ case SPEED_1600000:
+ speed = AD_LINK_SPEED_1600000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
@@ -822,6 +828,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_800000MBPS:
bandwidth = nports * 800000;
break;
+ case AD_LINK_SPEED_1600000MBPS:
+ bandwidth = nports * 1600000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d43d56694667..e15e320db476 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -124,6 +124,23 @@ config CAN_CAN327
If this driver is built as a module, it will be called can327.
+config CAN_DUMMY
+ tristate "Dummy CAN"
+ help
+ A dummy CAN module supporting Classical CAN, CAN FD and CAN XL. It
+ exposes bittiming values which can be configured through the netlink
+ interface.
+
+ The module will simply echo any frame sent to it. If debug messages
+ are activated, it prints all the CAN bittiming information in the
+ kernel log. Aside from that it does nothing.
+
+ This is convenient for testing the CAN netlink interface. Most of the
+ users will never need this. If unsure, say NO.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dummy-can.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 56138d8ddfd2..d7bc10a6b8ea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
+obj-$(CONFIG_CAN_DUMMY) += dummy_can.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 0b93900b1dfa..8f82418230ce 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -151,3 +152,65 @@ int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
return -EINVAL;
}
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack)
+{
+ const struct can_priv *priv = netdev_priv(dev);
+ u32 xl_bit_time_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 nom_bit_time_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ u32 pwms_ns = can_tqmin_to_ns(pwm->pwms, priv->clock.freq);
+ u32 pwml_ns = can_tqmin_to_ns(pwm->pwml, priv->clock.freq);
+
+ if (pwms_ns + pwml_ns > CAN_PWM_NS_MAX) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "The PWM symbol duration: %u ns may not exceed %u ns",
+ pwms_ns + pwml_ns, CAN_PWM_NS_MAX);
+ return -EINVAL;
+ }
+
+ if (pwms_ns < CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u ns shall be at least %u ns",
+ pwms_ns, CAN_PWM_DECODE_NS);
+ return -EINVAL;
+ }
+
+ if (pwm->pwms >= pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin shall be smaller than PWML: %u tqmin",
+ pwm->pwms, pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (pwml_ns - pwms_ns < 2 * CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "At least %u ns shall separate PWMS: %u ns from PMWL: %u ns",
+ 2 * CAN_PWM_DECODE_NS, pwms_ns, pwml_ns);
+ return -EINVAL;
+ }
+
+ if (xl_bit_time_tqmin % (pwm->pwms + pwm->pwml) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWM duration: %u tqmin does not divide XL's bit time: %u tqmin",
+ pwm->pwms + pwm->pwml, xl_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ if (pwm->pwmo >= pwm->pwms + pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin can not be greater than PWMS + PWML: %u tqmin",
+ pwm->pwmo, pwm->pwms + pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (nom_bit_time_tqmin % (pwm->pwms + pwm->pwml) != pwm->pwmo) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not assemble nominal bit time: %u tqmin out of PWMS + PMWL and PWMO",
+ nom_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index 394d6974f481..cc4022241553 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/units.h>
@@ -9,6 +10,33 @@
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+/* CiA recommended sample points for Non Return to Zero encoding. */
+static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ return 750;
+
+ if (bt->bitrate > 500 * KILO /* BPS */)
+ return 800;
+
+ return 875;
+}
+
+/* Sample points for Pulse-Width Modulation encoding. */
+static int can_calc_sample_point_pwm(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 15 * MEGA /* BPS */)
+ return 625;
+
+ if (bt->bitrate > 9 * MEGA /* BPS */)
+ return 600;
+
+ if (bt->bitrate > 4 * MEGA /* BPS */)
+ return 560;
+
+ return 520;
+}
+
/* Bit-timing calculation derived from:
*
* Code based on LinCAN sources and H8S2638 project
@@ -23,7 +51,7 @@
*/
static int
can_update_sample_point(const struct can_bittiming_const *btc,
- const unsigned int sample_point_nominal, const unsigned int tseg,
+ const unsigned int sample_point_reference, const unsigned int tseg,
unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
unsigned int *sample_point_error_ptr)
{
@@ -34,7 +62,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
for (i = 0; i <= 1; i++) {
tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ (sample_point_reference * (tseg + CAN_SYNC_SEG)) /
1000 - i;
tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
tseg1 = tseg - tseg2;
@@ -45,9 +73,9 @@ can_update_sample_point(const struct can_bittiming_const *btc,
sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
(tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
+ sample_point_error = abs(sample_point_reference - sample_point);
- if (sample_point <= sample_point_nominal &&
+ if (sample_point <= sample_point_reference &&
sample_point_error < best_sample_point_error) {
best_sample_point = sample_point;
best_sample_point_error = sample_point_error;
@@ -67,28 +95,24 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int bitrate_error; /* diff between calculated and reference value */
unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int sample_point_error; /* diff between calculated and reference value */
unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int sample_point_reference; /* reference sample point */
unsigned int best_tseg = 0; /* current best value for tseg */
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
int err;
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * KILO /* BPS */)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * KILO /* BPS */)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
+ if (bt->sample_point)
+ sample_point_reference = bt->sample_point;
+ else if (btc == priv->xl.data_bittiming_const &&
+ (priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ sample_point_reference = can_calc_sample_point_pwm(bt);
+ else
+ sample_point_reference = can_calc_sample_point_nrz(bt);
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
@@ -114,7 +138,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
if (bitrate_error < best_bitrate_error)
best_sample_point_error = UINT_MAX;
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ can_update_sample_point(btc, sample_point_reference, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
if (sample_point_error >= best_sample_point_error)
continue;
@@ -129,23 +153,26 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
}
if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
+ /* Error in one-hundredth of a percent */
+ v64 = (u64)best_bitrate_error * 10000;
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
+ /* print at least 0.01% if the error is smaller */
+ bitrate_error = max(bitrate_error, 1U);
if (bitrate_error > CAN_CALC_MAX_ERROR) {
NL_SET_ERR_MSG_FMT(extack,
- "bitrate error: %u.%u%% too high",
- bitrate_error / 10, bitrate_error % 10);
+ "bitrate error: %u.%02u%% too high",
+ bitrate_error / 100,
+ bitrate_error % 100);
return -EINVAL;
}
NL_SET_ERR_MSG_FMT(extack,
- "bitrate error: %u.%u%%",
- bitrate_error / 10, bitrate_error % 10);
+ "bitrate error: %u.%02u%%",
+ bitrate_error / 100, bitrate_error % 100);
}
/* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ bt->sample_point = can_update_sample_point(btc, sample_point_reference,
best_tseg, &tseg1, &tseg2,
NULL);
@@ -198,3 +225,38 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
*ctrlmode |= tdc_auto;
}
}
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct can_pwm *pwm = &priv->xl.pwm;
+ u32 xl_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 xl_ns = can_tqmin_to_ns(xl_tqmin, priv->clock.freq);
+ u32 nom_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ int pwm_per_bit_max = xl_tqmin / (pwm_const->pwms_min + pwm_const->pwml_min);
+ int pwm_per_bit;
+ u32 pwm_tqmin;
+
+ /* For 5 MB/s databitrate or greater, xl_ns < CAN_PWM_NS_MAX
+ * giving us a pwm_per_bit of 1 and the loop immediately breaks
+ */
+ for (pwm_per_bit = DIV_ROUND_UP(xl_ns, CAN_PWM_NS_MAX);
+ pwm_per_bit <= pwm_per_bit_max; pwm_per_bit++)
+ if (xl_tqmin % pwm_per_bit == 0)
+ break;
+
+ if (pwm_per_bit > pwm_per_bit_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not divide the XL data phase's bit time: %u tqmin into multiple PWM symbols",
+ xl_tqmin);
+ return -EINVAL;
+ }
+
+ pwm_tqmin = xl_tqmin / pwm_per_bit;
+ pwm->pwms = DIV_ROUND_UP_POW2(pwm_tqmin, 4);
+ pwm->pwml = pwm_tqmin - pwm->pwms;
+ pwm->pwmo = nom_tqmin % pwm_tqmin;
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 80e1ab18de87..091f30e94c61 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -92,29 +92,39 @@ const char *can_get_ctrlmode_str(u32 ctrlmode)
{
switch (ctrlmode & ~(ctrlmode - 1)) {
case 0:
- return "none";
+ return "(none)";
case CAN_CTRLMODE_LOOPBACK:
- return "loopback";
+ return "LOOPBACK";
case CAN_CTRLMODE_LISTENONLY:
- return "listen-only";
+ return "LISTEN-ONLY";
case CAN_CTRLMODE_3_SAMPLES:
- return "triple-sampling";
+ return "TRIPLE-SAMPLING";
case CAN_CTRLMODE_ONE_SHOT:
- return "one-shot";
+ return "ONE-SHOT";
case CAN_CTRLMODE_BERR_REPORTING:
- return "berr-reporting";
+ return "BERR-REPORTING";
case CAN_CTRLMODE_FD:
- return "fd";
+ return "FD";
case CAN_CTRLMODE_PRESUME_ACK:
- return "presume-ack";
+ return "PRESUME-ACK";
case CAN_CTRLMODE_FD_NON_ISO:
- return "fd-non-iso";
+ return "FD-NON-ISO";
case CAN_CTRLMODE_CC_LEN8_DLC:
- return "cc-len8-dlc";
+ return "CC-LEN8-DLC";
case CAN_CTRLMODE_TDC_AUTO:
- return "fd-tdc-auto";
+ return "TDC-AUTO";
case CAN_CTRLMODE_TDC_MANUAL:
- return "fd-tdc-manual";
+ return "TDC-MANUAL";
+ case CAN_CTRLMODE_RESTRICTED:
+ return "RESTRICTED";
+ case CAN_CTRLMODE_XL:
+ return "XL";
+ case CAN_CTRLMODE_XL_TDC_AUTO:
+ return "XL-TDC-AUTO";
+ case CAN_CTRLMODE_XL_TDC_MANUAL:
+ return "XL-TDC-MANUAL";
+ case CAN_CTRLMODE_XL_TMS:
+ return "TMS";
default:
return "<unknown>";
}
@@ -348,7 +358,13 @@ void can_set_default_mtu(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->ctrlmode & CAN_CTRLMODE_XL) {
+ if (can_is_canxl_dev_mtu(dev->mtu))
+ return;
+ dev->mtu = CANXL_MTU;
+ dev->min_mtu = CANXL_MIN_MTU;
+ dev->max_mtu = CANXL_MAX_MTU;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_FD) {
dev->mtu = CANFD_MTU;
dev->min_mtu = CANFD_MTU;
dev->max_mtu = CANFD_MTU;
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 6f83b87d54fc..d6b0e686fb11 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -2,7 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -22,6 +22,10 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_XL_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_XL_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_PWM] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
@@ -36,6 +40,18 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
+static const struct nla_policy can_pwm_policy[IFLA_CAN_PWM_MAX + 1] = {
+ [IFLA_CAN_PWM_PWMS_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO] = { .type = NLA_U32 },
+};
+
static int can_validate_bittiming(struct nlattr *data[],
struct netlink_ext_ack *extack,
int ifla_can_bittiming)
@@ -70,7 +86,7 @@ static int can_validate_tdc(struct nlattr *data_tdc,
return -EOPNOTSUPP;
}
- /* If one of the CAN_CTRLMODE_TDC_* flag is set then TDC
+ /* If one of the CAN_CTRLMODE_{,XL}_TDC_* flags is set then TDC
* must be set and vice-versa
*/
if ((tdc_auto || tdc_manual) && !data_tdc) {
@@ -82,8 +98,8 @@ static int can_validate_tdc(struct nlattr *data_tdc,
return -EOPNOTSUPP;
}
- /* If providing TDC parameters, at least TDCO is needed. TDCV
- * is needed if and only if CAN_CTRLMODE_TDC_MANUAL is set
+ /* If providing TDC parameters, at least TDCO is needed. TDCV is
+ * needed if and only if CAN_CTRLMODE_{,XL}_TDC_MANUAL is set
*/
if (data_tdc) {
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
@@ -116,6 +132,40 @@ static int can_validate_tdc(struct nlattr *data_tdc,
return 0;
}
+static int can_validate_pwm(struct nlattr *data[],
+ struct netlink_ext_ack *extack, u32 flags)
+{
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ int err;
+
+ if (!data[IFLA_CAN_XL_PWM])
+ return 0;
+
+ if (!(flags & CAN_CTRLMODE_XL_TMS)) {
+ NL_SET_ERR_MSG(extack, "PWM requires TMS");
+ return -EOPNOTSUPP;
+ }
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, data[IFLA_CAN_XL_PWM],
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb_pwm[IFLA_CAN_PWM_PWMS] != !tb_pwm[IFLA_CAN_PWM_PWML]) {
+ NL_SET_ERR_MSG(extack,
+ "Provide either both PWMS and PWML, or none for automatic calculation");
+ return -EOPNOTSUPP;
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO] &&
+ (!tb_pwm[IFLA_CAN_PWM_PWMS] || !tb_pwm[IFLA_CAN_PWM_PWML])) {
+ NL_SET_ERR_MSG(extack, "PWMO requires both PWMS and PWML");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int can_validate_databittiming(struct nlattr *data[],
struct netlink_ext_ack *extack,
int ifla_can_data_bittiming, u32 flags)
@@ -126,10 +176,10 @@ static int can_validate_databittiming(struct nlattr *data[],
bool is_on;
int err;
- /* Make sure that valid CAN FD configurations always consist of
+ /* Make sure that valid CAN FD/XL configurations always consist of
* - nominal/arbitration bittiming
* - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
+ * - control mode with CAN_CTRLMODE_{FD,XL} set
* - TDC parameters are coherent (details in can_validate_tdc())
*/
@@ -139,7 +189,10 @@ static int can_validate_databittiming(struct nlattr *data[],
is_on = flags & CAN_CTRLMODE_FD;
type = "FD";
} else {
- return -EOPNOTSUPP; /* Place holder for CAN XL */
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_XL_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_XL;
+ type = "XL";
}
if (is_on) {
@@ -175,6 +228,32 @@ static int can_validate_databittiming(struct nlattr *data[],
return 0;
}
+static int can_validate_xl_flags(struct netlink_ext_ack *extack,
+ u32 masked_flags, u32 mask)
+{
+ if (masked_flags & CAN_CTRLMODE_XL) {
+ if (masked_flags & CAN_CTRLMODE_XL_TMS) {
+ const u32 tms_conflicts_mask = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_XL_TDC_MASK;
+ u32 tms_conflicts = masked_flags & tms_conflicts_mask;
+
+ if (tms_conflicts) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "TMS and %s are mutually exclusive",
+ can_get_ctrlmode_str(tms_conflicts));
+ return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (mask & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack, "TMS requires CAN XL");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
@@ -188,6 +267,17 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
flags = cm->flags & cm->mask;
+
+ if ((flags & CAN_CTRLMODE_LISTENONLY) &&
+ (flags & CAN_CTRLMODE_RESTRICTED)) {
+ NL_SET_ERR_MSG(extack,
+ "LISTEN-ONLY and RESTRICTED modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ err = can_validate_xl_flags(extack, flags, cm->mask);
+ if (err)
+ return err;
}
err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
@@ -199,6 +289,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (err)
return err;
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_XL_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_pwm(data, extack, flags);
+ if (err)
+ return err;
+
return 0;
}
@@ -208,7 +307,7 @@ static int can_ctrlmode_changelink(struct net_device *dev,
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode *cm;
- u32 ctrlstatic, maskedflags, notsupp, ctrlstatic_missing;
+ u32 ctrlstatic, maskedflags, deactivated, notsupp, ctrlstatic_missing;
if (!data[IFLA_CAN_CTRLMODE])
return 0;
@@ -220,6 +319,7 @@ static int can_ctrlmode_changelink(struct net_device *dev,
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
ctrlstatic = can_get_static_ctrlmode(priv);
maskedflags = cm->flags & cm->mask;
+ deactivated = ~cm->flags & cm->mask;
notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
@@ -241,21 +341,40 @@ static int can_ctrlmode_changelink(struct net_device *dev,
return -EOPNOTSUPP;
}
+ /* If FD was active and is not turned off, check for XL conflicts */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD & ~deactivated) {
+ if (maskedflags & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack,
+ "TMS can not be activated while CAN FD is on");
+ return -EOPNOTSUPP;
+ }
+ }
+
/* If a top dependency flag is provided, reset all its dependencies */
if (cm->mask & CAN_CTRLMODE_FD)
priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ if (cm->mask & CAN_CTRLMODE_XL)
+ priv->ctrlmode &= ~(CAN_CTRLMODE_XL_TDC_MASK |
+ CAN_CTRLMODE_XL_TMS);
/* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= maskedflags;
- /* Wipe potential leftovers from previous CAN FD config */
+ /* Wipe potential leftovers from previous CAN FD/XL config */
if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
memset(&priv->fd.data_bittiming, 0,
sizeof(priv->fd.data_bittiming));
priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL)) {
+ memset(&priv->xl.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_XL_TDC_MASK;
+ memset(&priv->xl.tdc, 0, sizeof(priv->xl.tdc));
+ memset(&priv->xl.pwm, 0, sizeof(priv->xl.pwm));
+ }
can_set_default_mtu(dev);
@@ -330,7 +449,10 @@ static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
dbt_params = &priv->fd;
tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
} else {
- return -EOPNOTSUPP; /* Place holder for CAN XL */
+ data_bittiming = data[IFLA_CAN_XL_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ dbt_params = &priv->xl;
+ tdc_mask = CAN_CTRLMODE_XL_TDC_MASK;
}
if (!data_bittiming)
@@ -366,7 +488,8 @@ static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- need_tdc_calc = !(cm->mask & tdc_mask);
+ if (fd || !(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ need_tdc_calc = !(cm->mask & tdc_mask);
}
if (data_tdc) {
/* TDC parameters are provided: use them */
@@ -381,7 +504,7 @@ static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
*/
can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
- } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ } /* else: both CAN_CTRLMODE_{,XL}_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
*/
@@ -397,6 +520,76 @@ static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
return 0;
}
+static int can_pwm_changelink(struct net_device *dev,
+ const struct nlattr *pwm_nla,
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ struct can_pwm pwm = { 0 };
+ int err;
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ return 0;
+
+ if (!pwm_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support PWM");
+ return -EOPNOTSUPP;
+ }
+
+ if (!pwm_nla)
+ return can_calc_pwm(dev, extack);
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, pwm_nla,
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMS]) {
+ pwm.pwms = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMS]);
+ if (pwm.pwms < pwm_const->pwms_min ||
+ pwm.pwms > pwm_const->pwms_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin is out of range: %u...%u",
+ pwm.pwms, pwm_const->pwms_min,
+ pwm_const->pwms_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWML]) {
+ pwm.pwml = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWML]);
+ if (pwm.pwml < pwm_const->pwml_min ||
+ pwm.pwml > pwm_const->pwml_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWML: %u tqmin is out of range: %u...%u",
+ pwm.pwml, pwm_const->pwml_min,
+ pwm_const->pwml_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO]) {
+ pwm.pwmo = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMO]);
+ if (pwm.pwmo < pwm_const->pwmo_min ||
+ pwm.pwmo > pwm_const->pwmo_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin is out of range: %u...%u",
+ pwm.pwmo, pwm_const->pwmo_min,
+ pwm_const->pwmo_max);
+ return -EINVAL;
+ }
+ }
+
+ err = can_validate_pwm_bittiming(dev, &pwm, extack);
+ if (err)
+ return err;
+
+ priv->xl.pwm = pwm;
+ return 0;
+}
+
static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -486,6 +679,14 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ /* CAN XL */
+ err = can_dbt_changelink(dev, data, false, extack);
+ if (err)
+ return err;
+ err = can_pwm_changelink(dev, data[IFLA_CAN_XL_PWM], extack);
+ if (err)
+ return err;
+
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
@@ -553,14 +754,14 @@ static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_para
{
size_t size = 0;
- if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_{,XL}_DATA_BITTIMING */
size += nla_total_size(sizeof(dbt_params->data_bittiming));
- if (dbt_params->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_{,XL}_DATA_BITTIMING_CONST */
size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
- if (dbt_params->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_{,XL}_DATA_BITRATE_CONST */
size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
dbt_params->data_bitrate_const_cnt);
- size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_TDC */
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_{,XL}_TDC */
return size;
}
@@ -571,6 +772,30 @@ static size_t can_ctrlmode_ext_get_size(void)
nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
}
+static size_t can_pwm_get_size(const struct can_pwm_const *pwm_const,
+ bool pwm_on)
+{
+ size_t size;
+
+ if (!pwm_const || !pwm_on)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_PWM */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MAX */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO */
+
+ return size;
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -600,6 +825,11 @@ static size_t can_get_size(const struct net_device *dev)
size += can_data_bittiming_get_size(&priv->fd,
priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+ size += can_data_bittiming_get_size(&priv->xl,
+ priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+ size += can_pwm_get_size(priv->xl.pwm_const, /* IFLA_CAN_XL_PWM */
+ priv->ctrlmode & CAN_CTRLMODE_XL_TMS);
+
return size;
}
@@ -644,7 +874,9 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
tdc_is_enabled = can_fd_tdc_is_enabled(priv);
tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
} else {
- return -EOPNOTSUPP; /* Place holder for CAN XL */
+ dbt_params = &priv->xl;
+ tdc_is_enabled = can_xl_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MANUAL;
}
tdc_const = dbt_params->tdc_const;
tdc = &dbt_params->tdc;
@@ -695,6 +927,42 @@ err_cancel:
return -EMSGSIZE;
}
+static int can_pwm_fill_info(struct sk_buff *skb, const struct can_priv *priv)
+{
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ const struct can_pwm *pwm = &priv->xl.pwm;
+ struct nlattr *nest;
+
+ if (!pwm_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_XL_PWM);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MIN, pwm_const->pwms_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MAX, pwm_const->pwms_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MIN, pwm_const->pwml_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MAX, pwm_const->pwml_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MIN, pwm_const->pwmo_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MAX, pwm_const->pwmo_max))
+ goto err_cancel;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS, pwm->pwms) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML, pwm->pwml) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO, pwm->pwmo))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
const struct can_priv *priv)
{
@@ -766,9 +1034,22 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
- can_ctrlmode_ext_fill_info(skb, priv)
- )
+ can_ctrlmode_ext_fill_info(skb, priv) ||
+
+ can_bittiming_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING,
+ &priv->xl.data_bittiming) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING_CONST,
+ priv->xl.data_bittiming_const) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_XL_DATA_BITRATE_CONST,
+ priv->xl.data_bitrate_const,
+ priv->xl.data_bitrate_const_cnt) ||
+
+ can_tdc_fill_info(skb, dev, IFLA_CAN_XL_TDC) ||
+
+ can_pwm_fill_info(skb, priv)
+ )
return -EMSGSIZE;
return 0;
diff --git a/drivers/net/can/dummy_can.c b/drivers/net/can/dummy_can.c
new file mode 100644
index 000000000000..41953655e3d3
--- /dev/null
+++ b/drivers/net/can/dummy_can.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org> */
+
+#include <linux/array_size.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+#include <linux/string_choices.h>
+
+#include <linux/can.h>
+#include <linux/can/bittiming.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+struct dummy_can {
+ struct can_priv can;
+ struct net_device *dev;
+};
+
+static struct dummy_can *dummy_can;
+
+static const struct can_bittiming_const dummy_can_bittiming_const = {
+ .name = "dummy_can CC",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_bittiming_const dummy_can_fd_databittiming_const = {
+ .name = "dummy_can FD",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_fd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_bittiming_const dummy_can_xl_databittiming_const = {
+ .name = "dummy_can XL",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_xl_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_pwm_const dummy_can_pwm_const = {
+ .pwms_min = 1,
+ .pwms_max = 8,
+ .pwml_min = 2,
+ .pwml_max = 24,
+ .pwmo_min = 0,
+ .pwmo_max = 16,
+};
+
+static void dummy_can_print_bittiming(struct net_device *dev,
+ struct can_bittiming *bt)
+{
+ netdev_dbg(dev, "\tbitrate: %u\n", bt->bitrate);
+ netdev_dbg(dev, "\tsample_point: %u\n", bt->sample_point);
+ netdev_dbg(dev, "\ttq: %u\n", bt->tq);
+ netdev_dbg(dev, "\tprop_seg: %u\n", bt->prop_seg);
+ netdev_dbg(dev, "\tphase_seg1: %u\n", bt->phase_seg1);
+ netdev_dbg(dev, "\tphase_seg2: %u\n", bt->phase_seg2);
+ netdev_dbg(dev, "\tsjw: %u\n", bt->sjw);
+ netdev_dbg(dev, "\tbrp: %u\n", bt->brp);
+}
+
+static void dummy_can_print_tdc(struct net_device *dev, struct can_tdc *tdc)
+{
+ netdev_dbg(dev, "\t\ttdcv: %u\n", tdc->tdcv);
+ netdev_dbg(dev, "\t\ttdco: %u\n", tdc->tdco);
+ netdev_dbg(dev, "\t\ttdcf: %u\n", tdc->tdcf);
+}
+
+static void dummy_can_print_pwm(struct net_device *dev, struct can_pwm *pwm,
+ struct can_bittiming *dbt)
+{
+ netdev_dbg(dev, "\t\tpwms: %u\n", pwm->pwms);
+ netdev_dbg(dev, "\t\tpwml: %u\n", pwm->pwml);
+ netdev_dbg(dev, "\t\tpwmo: %u\n", pwm->pwmo);
+}
+
+static void dummy_can_print_ctrlmode(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+ unsigned long supported = can_priv->ctrlmode_supported;
+ u32 enabled = can_priv->ctrlmode;
+
+ netdev_dbg(dev, "Control modes:\n");
+ netdev_dbg(dev, "\tsupported: 0x%08x\n", (u32)supported);
+ netdev_dbg(dev, "\tenabled: 0x%08x\n", enabled);
+
+ if (supported) {
+ int idx;
+
+ netdev_dbg(dev, "\tlist:");
+ for_each_set_bit(idx, &supported, BITS_PER_TYPE(u32))
+ netdev_dbg(dev, "\t\t%s: %s\n",
+ can_get_ctrlmode_str(BIT(idx)),
+ enabled & BIT(idx) ? "on" : "off");
+ }
+}
+
+static void dummy_can_print_bittiming_info(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+
+ netdev_dbg(dev, "Clock frequency: %u\n", can_priv->clock.freq);
+ netdev_dbg(dev, "Maximum bitrate: %u\n", can_priv->bitrate_max);
+ netdev_dbg(dev, "MTU: %u\n", dev->mtu);
+ netdev_dbg(dev, "\n");
+
+ dummy_can_print_ctrlmode(dev);
+ netdev_dbg(dev, "\n");
+
+ netdev_dbg(dev, "Classical CAN nominal bittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->bittiming);
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_FD) {
+ netdev_dbg(dev, "CAN FD databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->fd.data_bittiming);
+ if (can_fd_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN FD TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->fd.tdc);
+ }
+ }
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL) {
+ netdev_dbg(dev, "CAN XL databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->xl.data_bittiming);
+ if (can_xl_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN XL TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->xl.tdc);
+ }
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ netdev_dbg(dev, "\tCAN XL PWM:\n");
+ dummy_can_print_pwm(dev, &can_priv->xl.pwm,
+ &can_priv->xl.data_bittiming);
+ }
+ }
+ netdev_dbg(dev, "\n");
+}
+
+static int dummy_can_netdev_open(struct net_device *dev)
+{
+ int ret;
+ struct can_priv *priv = netdev_priv(dev);
+
+ dummy_can_print_bittiming_info(dev);
+ netdev_dbg(dev, "error-signalling is %s\n",
+ str_enabled_disabled(!can_dev_in_xl_only_mode(priv)));
+
+ ret = open_candev(dev);
+ if (ret)
+ return ret;
+ netif_start_queue(dev);
+ netdev_dbg(dev, "dummy-can is up\n");
+
+ return 0;
+}
+
+static int dummy_can_netdev_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ close_candev(dev);
+ netdev_dbg(dev, "dummy-can is down\n");
+
+ return 0;
+}
+
+static netdev_tx_t dummy_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ can_put_echo_skb(skb, dev, 0, 0);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += can_get_echo_skb(dev, 0, NULL);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops dummy_can_netdev_ops = {
+ .ndo_open = dummy_can_netdev_open,
+ .ndo_stop = dummy_can_netdev_close,
+ .ndo_start_xmit = dummy_can_start_xmit,
+};
+
+static const struct ethtool_ops dummy_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int __init dummy_can_init(void)
+{
+ struct net_device *dev;
+ struct dummy_can *priv;
+ int ret;
+
+ dev = alloc_candev(sizeof(*priv), 1);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->netdev_ops = &dummy_can_netdev_ops;
+ dev->ethtool_ops = &dummy_can_ethtool_ops;
+ priv = netdev_priv(dev);
+ priv->can.bittiming_const = &dummy_can_bittiming_const;
+ priv->can.bitrate_max = 20 * MEGA /* BPS */;
+ priv->can.clock.freq = 160 * MEGA /* Hz */;
+ priv->can.fd.data_bittiming_const = &dummy_can_fd_databittiming_const;
+ priv->can.fd.tdc_const = &dummy_can_fd_tdc_const;
+ priv->can.xl.data_bittiming_const = &dummy_can_xl_databittiming_const;
+ priv->can.xl.tdc_const = &dummy_can_xl_tdc_const;
+ priv->can.xl.pwm_const = &dummy_can_pwm_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_RESTRICTED | CAN_CTRLMODE_XL |
+ CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TMS;
+ priv->dev = dev;
+
+ ret = register_candev(priv->dev);
+ if (ret) {
+ free_candev(priv->dev);
+ return ret;
+ }
+
+ dummy_can = priv;
+ netdev_dbg(dev, "dummy-can ready\n");
+
+ return 0;
+}
+
+static void __exit dummy_can_exit(void)
+{
+ struct net_device *dev = dummy_can->dev;
+
+ netdev_dbg(dev, "dummy-can bye bye\n");
+ unregister_candev(dev);
+ free_candev(dev);
+}
+
+module_init(dummy_can_init);
+module_exit(dummy_can_exit);
+
+MODULE_DESCRIPTION("A dummy CAN driver, mainly to test the netlink interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincent Mailhol <mailhol@kernel.org>");
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 49ab65274b51..7895e1fdea1c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -468,6 +468,7 @@ struct rcar_canfd_global {
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
+ struct clk *clk_ram; /* Clock RAM */
unsigned long channels_mask; /* Enabled channels mask */
bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
@@ -709,6 +710,11 @@ static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
rcar_canfd_update(val, val, addr);
}
+static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, 0, addr);
+}
+
static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
{
rcar_canfd_update(mask, val, addr);
@@ -755,25 +761,6 @@ static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
}
-static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
-{
- if (gpriv->info->ch_interface_mode) {
- u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
- : RCANFD_GEN4_FDCFG_CLOE;
-
- for_each_set_bit(ch, &gpriv->channels_mask,
- gpriv->info->max_channels)
- rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg, val);
- } else {
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- }
-}
-
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
struct device *dev = &gpriv->pdev->dev;
@@ -806,6 +793,16 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
/* Reset Global error flags */
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
+ /* Set the controller into appropriate mode */
+ if (!gpriv->info->ch_interface_mode) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
+
/* Transition all Channels to reset mode */
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
@@ -823,10 +820,23 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
- }
- /* Set the controller into appropriate mode */
- rcar_canfd_set_mode(gpriv);
+ /* Set the controller into appropriate mode */
+ if (gpriv->info->ch_interface_mode) {
+ /* Do not set CLOE and FDOE simultaneously */
+ if (!gpriv->fdmode) {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ } else {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ }
+ }
+ }
return 0;
}
@@ -1569,8 +1579,8 @@ static int rcar_canfd_close(struct net_device *ndev)
netif_stop_queue(ndev);
rcar_canfd_stop(ndev);
napi_disable(&priv->napi);
- clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
+ clk_disable_unprepare(gpriv->can_clk);
phy_power_off(priv->transceiver);
return 0;
}
@@ -1960,22 +1970,120 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
}
}
+static int rcar_canfd_global_init(struct rcar_canfd_global *gpriv)
+{
+ struct device *dev = &gpriv->pdev->dev;
+ u32 rule_entry = 0;
+ u32 ch, sts;
+ int err;
+
+ err = reset_control_reset(gpriv->rstc1);
+ if (err)
+ return err;
+
+ err = reset_control_reset(gpriv->rstc2);
+ if (err)
+ goto fail_reset1;
+
+ /* Enable peripheral clock for register access */
+ err = clk_prepare_enable(gpriv->clkp);
+ if (err) {
+ dev_err(dev, "failed to enable peripheral clock: %pe\n",
+ ERR_PTR(err));
+ goto fail_reset2;
+ }
+
+ /* Enable RAM clock */
+ err = clk_prepare_enable(gpriv->clk_ram);
+ if (err) {
+ dev_err(dev,
+ "failed to enable RAM clock, error %d\n", err);
+ goto fail_clk;
+ }
+
+ err = rcar_canfd_reset_controller(gpriv);
+ if (err) {
+ dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
+ goto fail_ram_clk;
+ }
+
+ /* Controller in Global reset & Channel reset mode */
+ rcar_canfd_configure_controller(gpriv);
+
+ /* Configure per channel attributes */
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ /* Configure Channel's Rx fifo */
+ rcar_canfd_configure_rx(gpriv, ch);
+
+ /* Configure Channel's Tx (Common) fifo */
+ rcar_canfd_configure_tx(gpriv, ch);
+
+ /* Configure receive rules */
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
+ }
+
+ /* Configure common interrupts */
+ rcar_canfd_enable_global_interrupts(gpriv);
+
+ /* Start Global operation mode */
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
+ RCANFD_GCTR_GMDC_GOPM);
+
+ /* Verify mode change */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
+ if (err) {
+ dev_err(dev, "global operational mode failed\n");
+ goto fail_mode;
+ }
+
+ return 0;
+
+fail_mode:
+ rcar_canfd_disable_global_interrupts(gpriv);
+fail_ram_clk:
+ clk_disable_unprepare(gpriv->clk_ram);
+fail_clk:
+ clk_disable_unprepare(gpriv->clkp);
+fail_reset2:
+ reset_control_assert(gpriv->rstc2);
+fail_reset1:
+ reset_control_assert(gpriv->rstc1);
+ return err;
+}
+
+static void rcar_canfd_global_deinit(struct rcar_canfd_global *gpriv, bool full)
+{
+ rcar_canfd_disable_global_interrupts(gpriv);
+
+ if (full) {
+ rcar_canfd_reset_controller(gpriv);
+
+ /* Enter global sleep mode */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ }
+
+ clk_disable_unprepare(gpriv->clk_ram);
+ clk_disable_unprepare(gpriv->clkp);
+ reset_control_assert(gpriv->rstc2);
+ reset_control_assert(gpriv->rstc1);
+}
+
static int rcar_canfd_probe(struct platform_device *pdev)
{
struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, };
const struct rcar_canfd_hw_info *info;
struct device *dev = &pdev->dev;
void __iomem *addr;
- u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
struct device_node *of_child;
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
- u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
- struct clk *clk_ram;
+ u32 ch, fcan_freq;
int i;
info = of_device_get_match_data(dev);
@@ -2065,10 +2173,10 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->extclk = gpriv->info->external_clk;
}
- clk_ram = devm_clk_get_optional_enabled(dev, "ram_clk");
- if (IS_ERR(clk_ram))
- return dev_err_probe(dev, PTR_ERR(clk_ram),
- "cannot get enabled ram clock\n");
+ gpriv->clk_ram = devm_clk_get_optional(dev, "ram_clk");
+ if (IS_ERR(gpriv->clk_ram))
+ return dev_err_probe(dev, PTR_ERR(gpriv->clk_ram),
+ "cannot get ram clock\n");
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
@@ -2117,59 +2225,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
}
- err = reset_control_reset(gpriv->rstc1);
+ err = rcar_canfd_global_init(gpriv);
if (err)
- goto fail_dev;
- err = reset_control_reset(gpriv->rstc2);
- if (err) {
- reset_control_assert(gpriv->rstc1);
- goto fail_dev;
- }
-
- /* Enable peripheral clock for register access */
- err = clk_prepare_enable(gpriv->clkp);
- if (err) {
- dev_err(dev, "failed to enable peripheral clock: %pe\n",
- ERR_PTR(err));
- goto fail_reset;
- }
-
- err = rcar_canfd_reset_controller(gpriv);
- if (err) {
- dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
- goto fail_clk;
- }
-
- /* Controller in Global reset & Channel reset mode */
- rcar_canfd_configure_controller(gpriv);
-
- /* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
- /* Configure Channel's Rx fifo */
- rcar_canfd_configure_rx(gpriv, ch);
-
- /* Configure Channel's Tx (Common) fifo */
- rcar_canfd_configure_tx(gpriv, ch);
-
- /* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
- rule_entry += RCANFD_CHANNEL_NUMRULES;
- }
-
- /* Configure common interrupts */
- rcar_canfd_enable_global_interrupts(gpriv);
-
- /* Start Global operation mode */
- rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
- RCANFD_GCTR_GMDC_GOPM);
-
- /* Verify mode change */
- err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
- !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
- if (err) {
- dev_err(dev, "global operational mode failed\n");
goto fail_mode;
- }
for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq,
@@ -2188,12 +2246,7 @@ fail_channel:
for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
- rcar_canfd_disable_global_interrupts(gpriv);
-fail_clk:
- clk_disable_unprepare(gpriv->clkp);
-fail_reset:
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, false);
fail_dev:
return err;
}
@@ -2203,33 +2256,79 @@ static void rcar_canfd_remove(struct platform_device *pdev)
struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
u32 ch;
- rcar_canfd_reset_controller(gpriv);
- rcar_canfd_disable_global_interrupts(gpriv);
-
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
- /* Enter global sleep mode */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
- clk_disable_unprepare(gpriv->clkp);
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, true);
}
-static int __maybe_unused rcar_canfd_suspend(struct device *dev)
+static int rcar_canfd_suspend(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ netif_device_detach(ndev);
+
+ err = rcar_canfd_close(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_close() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+ }
+
+ /* TODO Skip if wake-up (which is not yet supported) is enabled */
+ rcar_canfd_global_deinit(gpriv, false);
+
return 0;
}
-static int __maybe_unused rcar_canfd_resume(struct device *dev)
+static int rcar_canfd_resume(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ err = rcar_canfd_global_init(gpriv);
+ if (err) {
+ dev_err(dev, "rcar_canfd_global_init() failed %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ err = rcar_canfd_open(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_open() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ netif_device_attach(ndev);
+ }
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
- rcar_canfd_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
+ rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
@@ -2246,7 +2345,7 @@ static struct platform_driver rcar_canfd_driver = {
.driver = {
.name = RCANFD_DRV_NAME,
.of_match_table = of_match_ptr(rcar_canfd_of_table),
- .pm = &rcar_canfd_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_canfd_pm_ops),
},
.probe = rcar_canfd_probe,
.remove = rcar_canfd_remove,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index acfa49db3907..a8fa0d6516b9 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -548,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
goto out;
- while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
- (n < SJA1000_MAX_IRQ)) {
+ while ((n < SJA1000_MAX_IRQ) &&
+ (isrc = priv->read_reg(priv, SJA1000_IR))) {
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller due to hw unplug */
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 6fcb301ef611..af52285d5a4e 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -657,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
u8 isrc, status;
int n = 0;
- while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
- (n < SUN4I_CAN_MAX_IRQ)) {
+ while ((n < SUN4I_CAN_MAX_IRQ) &&
+ (isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) {
n++;
status = readl(priv->base + SUN4I_REG_STA_ADDR);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 1321eb5e89ae..e29e85b67fd4 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -261,14 +261,21 @@ struct canfd_quirk {
u8 quirk;
} __packed;
+/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates
+ * a regular RX'ed CAN frame
+ */
+#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff
+
struct gs_host_frame {
- u32 echo_id;
- __le32 can_id;
+ struct_group(header,
+ u32 echo_id;
+ __le32 can_id;
- u8 can_dlc;
- u8 channel;
- u8 flags;
- u8 reserved;
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+ );
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
@@ -568,6 +575,37 @@ gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb,
return len;
}
+static unsigned int
+gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf,
+ unsigned int *data_length_p)
+{
+ unsigned int minimum_length, data_length = 0;
+
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX)
+ data_length = can_fd_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, canfd_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ } else {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX &&
+ !(hf->can_id & cpu_to_le32(CAN_RTR_FLAG)))
+ data_length = can_cc_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, classic_can_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ }
+
+ *data_length_p = data_length;
+ return minimum_length;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *parent = urb->context;
@@ -576,6 +614,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
+ unsigned int minimum_length, data_length;
struct gs_tx_context *txc;
struct can_frame *cf;
struct canfd_frame *cfd;
@@ -594,6 +633,15 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
}
+ minimum_length = sizeof(hf->header);
+ if (urb->actual_length < minimum_length) {
+ dev_err_ratelimited(&parent->udev->dev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
+ goto resubmit_urb;
+ }
+
/* device reports out of range channel id */
if (hf->channel >= parent->channel_cnt)
goto device_detach;
@@ -609,20 +657,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!netif_running(netdev))
goto resubmit_urb;
- if (hf->echo_id == -1) { /* normal rx */
+ minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length);
+ if (urb->actual_length < minimum_length) {
+ stats->rx_errors++;
+ stats->rx_length_errors++;
+
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
+ goto resubmit_urb;
+ }
+
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */
if (hf->flags & GS_CAN_FLAG_FD) {
skb = alloc_canfd_skb(netdev, &cfd);
if (!skb)
return;
cfd->can_id = le32_to_cpu(hf->can_id);
- cfd->len = can_fd_dlc2len(hf->can_dlc);
+ cfd->len = data_length;
if (hf->flags & GS_CAN_FLAG_BRS)
cfd->flags |= CANFD_BRS;
if (hf->flags & GS_CAN_FLAG_ESI)
cfd->flags |= CANFD_ESI;
- memcpy(cfd->data, hf->canfd->data, cfd->len);
+ memcpy(cfd->data, hf->canfd->data, data_length);
} else {
skb = alloc_can_skb(netdev, &cf);
if (!skb)
@@ -631,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id = le32_to_cpu(hf->can_id);
can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->classic_can->data, 8);
+ memcpy(cf->data, hf->classic_can->data, data_length);
/* ERROR frames tell us information about the controller */
if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
@@ -687,7 +748,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, parent->udev,
parent->pipe_in,
- hf, dev->parent->hf_size_rx,
+ hf, parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -750,8 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct gs_can *dev = txc->dev;
struct net_device *netdev = dev->netdev;
- if (urb->status)
- netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
+ if (!urb->status)
+ return;
+
+ if (urb->status != -ESHUTDOWN && net_ratelimit())
+ netdev_info(netdev, "failed to xmit URB %u: %pe\n",
+ txc->echo_id, ERR_PTR(urb->status));
+
+ netdev->stats.tx_dropped++;
+ netdev->stats.tx_errors++;
+
+ can_free_echo_skb(netdev, txc->echo_id, NULL);
+ gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
+ netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index c29828a94ad0..1167d38344f1 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -685,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
- pos = round_up(pos,
+ pos = round_up(pos + 1,
le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
@@ -1732,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
* number of events in case of a heavy rx load on the bus.
*/
if (cmd->len == 0) {
- pos = round_up(pos, le16_to_cpu
+ pos = round_up(pos + 1, le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index bfe21f9f7dcd..cb23bea9c21b 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -376,8 +376,18 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
/* Register both leds */
- led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
- led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register sync_good LED\n");
+ goto out;
+ }
+
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register is_gm LED\n");
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ goto out;
+ }
ret = 0;
diff --git a/drivers/net/dsa/ks8995.c b/drivers/net/dsa/ks8995.c
index 5c4c83e00477..77d8b842693c 100644
--- a/drivers/net/dsa/ks8995.c
+++ b/drivers/net/dsa/ks8995.c
@@ -203,13 +203,13 @@ static const struct spi_device_id ks8995_id[] = {
};
MODULE_DEVICE_TABLE(spi, ks8995_id);
-static const struct of_device_id ks8895_spi_of_match[] = {
+static const struct of_device_id ks8995_spi_of_match[] = {
{ .compatible = "micrel,ks8995" },
{ .compatible = "micrel,ksz8864" },
{ .compatible = "micrel,ksz8795" },
{ },
};
-MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+MODULE_DEVICE_TABLE(of, ks8995_spi_of_match);
static inline u8 get_chip_id(u8 val)
{
@@ -842,7 +842,7 @@ static void ks8995_remove(struct spi_device *spi)
static struct spi_driver ks8995_driver = {
.driver = {
.name = "spi-ks8995",
- .of_match_table = ks8895_spi_of_match,
+ .of_match_table = ks8995_spi_of_match,
},
.probe = ks8995_probe,
.remove = ks8995_remove,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 933ae8dc6337..0c10351fe5eb 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2587,8 +2587,8 @@ static int ksz_irq_phy_setup(struct ksz_device *dev)
irq = irq_find_mapping(dev->ports[port].pirq.domain,
PORT_SRC_PHY_INT);
- if (irq < 0) {
- ret = irq;
+ if (!irq) {
+ ret = -EINVAL;
goto out;
}
ds->user_mii_bus->irq[phy] = irq;
@@ -2952,8 +2952,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
- if (pirq->irq_num < 0)
- return pirq->irq_num;
+ if (!pirq->irq_num)
+ return -EINVAL;
return ksz_irq_common_setup(dev, pirq);
}
@@ -3038,12 +3038,12 @@ static int ksz_setup(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, dev->ds) {
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
- goto out_girq;
+ goto port_release;
if (dev->info->ptp_capable) {
ret = ksz_ptp_irq_setup(ds, dp->index);
if (ret)
- goto out_pirq;
+ goto pirq_release;
}
}
}
@@ -3053,7 +3053,7 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret) {
dev_err(dev->dev, "Failed to register PTP clock: %d\n",
ret);
- goto out_ptpirq;
+ goto port_release;
}
}
@@ -3076,17 +3076,16 @@ static int ksz_setup(struct dsa_switch *ds)
out_ptp_clock_unregister:
if (dev->info->ptp_capable)
ksz_ptp_clock_unregister(ds);
-out_ptpirq:
- if (dev->irq > 0 && dev->info->ptp_capable)
- dsa_switch_for_each_user_port(dp, dev->ds)
- ksz_ptp_irq_free(ds, dp->index);
-out_pirq:
- if (dev->irq > 0)
- dsa_switch_for_each_user_port(dp, dev->ds)
+port_release:
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) {
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
+pirq_release:
ksz_irq_free(&dev->ports[dp->index].pirq);
-out_girq:
- if (dev->irq > 0)
+ }
ksz_irq_free(&dev->girq);
+ }
return ret;
}
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 35fc21b1ee48..997e4a76d0a6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -1093,19 +1093,19 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
static const char * const name[] = {"pdresp-msg", "xdreq-msg",
"sync-msg"};
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_irq *ptpirq = &port->ptpirq;
struct ksz_ptp_irq *ptpmsg_irq;
ptpmsg_irq = &port->ptpmsg_irq[n];
+ ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
+ if (!ptpmsg_irq->num)
+ return -EINVAL;
ptpmsg_irq->port = port;
ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
strscpy(ptpmsg_irq->name, name[n]);
- ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
- if (ptpmsg_irq->num < 0)
- return ptpmsg_irq->num;
-
return request_threaded_irq(ptpmsg_irq->num, NULL,
ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
ptpmsg_irq->name, ptpmsg_irq);
@@ -1135,12 +1135,9 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
if (!ptpirq->domain)
return -ENOMEM;
- for (irq = 0; irq < ptpirq->nirqs; irq++)
- irq_create_mapping(ptpirq->domain, irq);
-
ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
- if (ptpirq->irq_num < 0) {
- ret = ptpirq->irq_num;
+ if (!ptpirq->irq_num) {
+ ret = -EINVAL;
goto out;
}
@@ -1159,12 +1156,11 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
out_ptp_msg:
free_irq(ptpirq->irq_num, ptpirq);
- while (irq--)
+ while (irq--) {
free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
-out:
- for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_dispose_mapping(port->ptpmsg_irq[irq].num);
-
+ }
+out:
irq_domain_remove(ptpirq->domain);
return ret;
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index b1ae3b9de3d1..5a1496fff445 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -540,6 +540,7 @@ static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
ksz_pread16(dev, port, reg, &data16);
/* Update tune Adjust */
+ data16 &= ~PORT_TUNE_ADJ;
data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
ksz_pwrite16(dev, port, reg, data16);
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index f674c400f05b..aa2145cf29a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1302,14 +1302,7 @@ static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
- * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
- * we need to configure the PCS only (if even that).
*/
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
- speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
- else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
- speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
-
mac[port].speed = speed;
return 0;
diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c
index 944988e29127..ebfd34f72314 100644
--- a/drivers/net/dsa/yt921x.c
+++ b/drivers/net/dsa/yt921x.c
@@ -39,60 +39,55 @@ struct yt921x_mib_desc {
* to perform 32bit MIB overflow wraparound.
*/
static const struct yt921x_mib_desc yt921x_mib_descs[] = {
- MIB_DESC(1, 0x00, NULL), /* RxBroadcast */
- MIB_DESC(1, 0x04, NULL), /* RxPause */
- MIB_DESC(1, 0x08, NULL), /* RxMulticast */
- MIB_DESC(1, 0x0c, NULL), /* RxCrcErr */
-
- MIB_DESC(1, 0x10, NULL), /* RxAlignErr */
- MIB_DESC(1, 0x14, NULL), /* RxUnderSizeErr */
- MIB_DESC(1, 0x18, NULL), /* RxFragErr */
- MIB_DESC(1, 0x1c, NULL), /* RxPktSz64 */
-
- MIB_DESC(1, 0x20, NULL), /* RxPktSz65To127 */
- MIB_DESC(1, 0x24, NULL), /* RxPktSz128To255 */
- MIB_DESC(1, 0x28, NULL), /* RxPktSz256To511 */
- MIB_DESC(1, 0x2c, NULL), /* RxPktSz512To1023 */
-
- MIB_DESC(1, 0x30, NULL), /* RxPktSz1024To1518 */
- MIB_DESC(1, 0x34, NULL), /* RxPktSz1519ToMax */
- MIB_DESC(2, 0x38, NULL), /* RxGoodBytes */
- /* 0x3c */
-
- MIB_DESC(2, 0x40, "RxBadBytes"),
- /* 0x44 */
- MIB_DESC(2, 0x48, NULL), /* RxOverSzErr */
- /* 0x4c */
-
- MIB_DESC(1, 0x50, NULL), /* RxDropped */
- MIB_DESC(1, 0x54, NULL), /* TxBroadcast */
- MIB_DESC(1, 0x58, NULL), /* TxPause */
- MIB_DESC(1, 0x5c, NULL), /* TxMulticast */
-
- MIB_DESC(1, 0x60, NULL), /* TxUnderSizeErr */
- MIB_DESC(1, 0x64, NULL), /* TxPktSz64 */
- MIB_DESC(1, 0x68, NULL), /* TxPktSz65To127 */
- MIB_DESC(1, 0x6c, NULL), /* TxPktSz128To255 */
-
- MIB_DESC(1, 0x70, NULL), /* TxPktSz256To511 */
- MIB_DESC(1, 0x74, NULL), /* TxPktSz512To1023 */
- MIB_DESC(1, 0x78, NULL), /* TxPktSz1024To1518 */
- MIB_DESC(1, 0x7c, NULL), /* TxPktSz1519ToMax */
-
- MIB_DESC(2, 0x80, NULL), /* TxGoodBytes */
- /* 0x84 */
- MIB_DESC(2, 0x88, NULL), /* TxCollision */
- /* 0x8c */
-
- MIB_DESC(1, 0x90, NULL), /* TxExcessiveCollistion */
- MIB_DESC(1, 0x94, NULL), /* TxMultipleCollision */
- MIB_DESC(1, 0x98, NULL), /* TxSingleCollision */
- MIB_DESC(1, 0x9c, NULL), /* TxPkt */
-
- MIB_DESC(1, 0xa0, NULL), /* TxDeferred */
- MIB_DESC(1, 0xa4, NULL), /* TxLateCollision */
- MIB_DESC(1, 0xa8, "RxOAM"),
- MIB_DESC(1, 0xac, "TxOAM"),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL),
+ MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"),
};
struct yt921x_info {
@@ -705,7 +700,7 @@ static int yt921x_read_mib(struct yt921x_priv *priv, int port)
res = yt921x_reg_read(priv, reg + 4, &val1);
if (res)
break;
- val = ((u64)val0 << 32) | val1;
+ val = ((u64)val1 << 32) | val0;
}
WRITE_ONCE(*valp, val);
diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h
index 3e85d90826fb..44719d841d40 100644
--- a/drivers/net/dsa/yt921x.h
+++ b/drivers/net/dsa/yt921x.h
@@ -173,6 +173,60 @@
#define YT921X_MIB_CTRL_ALL_PORT BIT(0)
#define YT921X_MIBn_DATA0(port) (0xc0100 + 0x100 * (port))
#define YT921X_MIBn_DATAm(port, x) (YT921X_MIBn_DATA0(port) + 4 * (x))
+#define YT921X_MIB_DATA_RX_BROADCAST 0x00
+#define YT921X_MIB_DATA_RX_PAUSE 0x04
+#define YT921X_MIB_DATA_RX_MULTICAST 0x08
+#define YT921X_MIB_DATA_RX_CRC_ERR 0x0c
+
+#define YT921X_MIB_DATA_RX_ALIGN_ERR 0x10
+#define YT921X_MIB_DATA_RX_UNDERSIZE_ERR 0x14
+#define YT921X_MIB_DATA_RX_FRAG_ERR 0x18
+#define YT921X_MIB_DATA_RX_PKT_SZ_64 0x1c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127 0x20
+#define YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255 0x24
+#define YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511 0x28
+#define YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023 0x2c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518 0x30
+#define YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX 0x34
+/* 0x38: unused */
+#define YT921X_MIB_DATA_RX_GOOD_BYTES 0x3c
+
+/* 0x40: 64 bytes */
+#define YT921X_MIB_DATA_RX_BAD_BYTES 0x44
+/* 0x48: 64 bytes */
+#define YT921X_MIB_DATA_RX_OVERSIZE_ERR 0x4c
+
+#define YT921X_MIB_DATA_RX_DROPPED 0x50
+#define YT921X_MIB_DATA_TX_BROADCAST 0x54
+#define YT921X_MIB_DATA_TX_PAUSE 0x58
+#define YT921X_MIB_DATA_TX_MULTICAST 0x5c
+
+#define YT921X_MIB_DATA_TX_UNDERSIZE_ERR 0x60
+#define YT921X_MIB_DATA_TX_PKT_SZ_64 0x64
+#define YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127 0x68
+#define YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255 0x6c
+
+#define YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511 0x70
+#define YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023 0x74
+#define YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518 0x78
+#define YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX 0x7c
+
+/* 0x80: unused */
+#define YT921X_MIB_DATA_TX_GOOD_BYTES 0x84
+/* 0x88: 64 bytes */
+#define YT921X_MIB_DATA_TX_COLLISION 0x8c
+
+#define YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION 0x90
+#define YT921X_MIB_DATA_TX_MULTIPLE_COLLISION 0x94
+#define YT921X_MIB_DATA_TX_SINGLE_COLLISION 0x98
+#define YT921X_MIB_DATA_TX_PKT 0x9c
+
+#define YT921X_MIB_DATA_TX_DEFERRED 0xa0
+#define YT921X_MIB_DATA_TX_LATE_COLLISION 0xa4
+#define YT921X_MIB_DATA_RX_OAM 0xa8
+#define YT921X_MIB_DATA_TX_OAM 0xac
#define YT921X_EDATA_CTRL 0xe0000
#define YT921X_EDATA_CTRL_ADDR_M GENMASK(15, 8)
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index c373f21d95f5..0caabb0c3aa0 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -308,7 +308,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
if (!airoha_is_valid_gdm_port(eth, port))
return -EINVAL;
- if (dsa_port >= 0)
+ if (dsa_port >= 0 || eth->ports[1])
pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
: port->id;
else
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 0b53a1fab46d..4a6b35c84dab 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -255,7 +255,8 @@ int pdsc_dl_flash_update(struct devlink *dl,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index d8dc39da4161..b576be626a29 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -22,7 +22,8 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
}
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f3adf29b222b..0653e69f0ef7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1259,6 +1259,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
udp_tunnel_nic_reset_ntf(netdev);
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ goto err_txrx;
+
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
@@ -1268,6 +1273,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
return 0;
+err_txrx:
+ hw_if->disable_rx(pdata);
+ hw_if->disable_tx(pdata);
+
err_irqs:
xgbe_free_irqs(pdata);
@@ -1574,11 +1583,6 @@ static int xgbe_open(struct net_device *netdev)
goto err_dev_wq;
}
- /* Reset the phy settings */
- ret = xgbe_phy_reset(pdata);
- if (ret)
- goto err_an_wq;
-
/* Enable the clocks */
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 35a381a83647..a68757e8fd22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -989,6 +989,7 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
return ret;
}
phy_data->phydev = phydev;
+ phy_data->phydev->mac_managed_pm = true;
xgbe_phy_external_phy_quirks(pdata);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 1921741f7311..18b08277d2e1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -15,6 +15,7 @@
#include "aq_hw.h"
#include "aq_nic.h"
+#include "hw_atl/hw_atl_llh.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
lo_hi_writeq(value, hw->mmio + reg);
}
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
+{
+ int err;
+ u32 val;
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
+
+ err = aq_hw_err_from_flags(hw);
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ hw, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index ffa6e4067c21..d89c63d88e4a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f21de0c21e52..d23d23bed39f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
+
+ /* There will be an extra fragment */
+ if (buff->len > AQ_CFG_RX_HDR_SIZE)
+ frag_cnt++;
+
buff_ = buff;
do {
bool is_rsc_completed = true;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 493432d036b9..c7895bfb2ecf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1198,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
- int err;
- u32 val;
-
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
- /* Invalidate Descriptor Cache to prevent writing to the cached
- * descriptors and to the data pointer of those descriptors
- */
- hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
-
- err = aq_hw_err_from_flags(self);
-
- if (err)
- goto err_exit;
-
- readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
- self, val, val == 1, 1000U, 10000U);
-
-err_exit:
- return err;
+ return aq_hw_invalidate_descriptor_cache(self);
}
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index b0ed572e88c6..0ce9caae8799 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -759,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
- return 0;
+ return aq_hw_invalidate_descriptor_cache(self);
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc8dec37a9e4..3d853eeb976f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3355,19 +3355,11 @@ static int bnx2x_get_rxfh_fields(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 bnx2x_get_rx_ring_count(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = BNX2X_NUM_ETH_QUEUES(bp);
- return 0;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return BNX2X_NUM_ETH_QUEUES(bp);
}
static int bnx2x_set_rxfh_fields(struct net_device *dev,
@@ -3674,7 +3666,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
@@ -3702,7 +3694,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f0f05d7315ac..aca4267babc8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -308,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/****************************************************************************
* General service functions
****************************************************************************/
-
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
@@ -12813,14 +12816,9 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -EAGAIN;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bnx2x_hwtstamp_ioctl(bp, ifr);
- default:
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
- }
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
}
static int bnx2x_validate_addr(struct net_device *dev)
@@ -13036,6 +13034,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+ .ndo_hwtstamp_get = bnx2x_hwtstamp_get,
+ .ndo_hwtstamp_set = bnx2x_hwtstamp_set,
};
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
@@ -15350,31 +15350,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
return 0;
}
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct bnx2x *bp = netdev_priv(dev);
int rc;
- DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+ DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n");
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
bp->hwtstamp_ioctl_called = true;
- bp->tx_type = config.tx_type;
- bp->rx_filter = config.rx_filter;
+ bp->tx_type = config->tx_type;
+ bp->rx_filter = config->rx_filter;
rc = bnx2x_configure_ptp_filters(bp);
- if (rc)
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "HW configuration failure");
return rc;
+ }
+
+ config->rx_filter = bp->rx_filter;
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct bnx2x *bp = netdev_priv(dev);
- config.rx_filter = bp->rx_filter;
+ config->rx_filter = bp->rx_filter;
+ config->tx_type = bp->tx_type;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Configures HW for PTP */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a625e7c311dd..d17d0ea89c36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -877,7 +877,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
next_tx_int:
cons = NEXT_TX(cons);
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
}
WRITE_ONCE(txr->tx_cons, cons);
@@ -4479,7 +4479,14 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+ RX_BD_TYPE_RX_AGG_BD;
+
+ /* On P7, setting EOP will cause the chip to disable
+ * Relaxed Ordering (RO) for TPA data. Disable EOP for
+ * potentially higher performance with RO.
+ */
+ if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
+ type |= RX_BD_FLAGS_AGG_EOP;
bnxt_init_rxbd_pages(ring, type);
}
@@ -5688,6 +5695,10 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
u16 cmd = bnxt_vf_req_snif[i];
unsigned int bit, idx;
+ if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
+ cmd == HWRM_PORT_PHY_QCFG)
+ continue;
+
idx = cmd / 32;
bit = cmd % 32;
data[idx] |= 1 << bit;
@@ -8506,6 +8517,11 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+ if (resp->roce_bidi_opt_mode &
+ FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
+ bp->cos0_cos1_shared = 1;
+ else
+ bp->cos0_cos1_shared = 0;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -9653,6 +9669,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
@@ -14020,11 +14038,19 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int i = bnapi->index;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
+ int i = bnapi->index, j;
netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ cpr2 = &cpr->cp_ring_arr[j];
+ if (!cpr2->bnapi)
+ continue;
+ netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, j, cpr2->cp_ring_struct.fw_ring_id,
+ cpr2->cp_raw_cons);
+ }
}
static void bnxt_dbg_dump_states(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3613a172483a..f5f07a7e6b29 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -131,6 +131,7 @@ struct rx_bd {
#define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
#define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
#define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_AGG_EOP (1 << 6)
#define RX_BD_FLAGS_EOP (1 << 7)
#define RX_BD_FLAGS_BUFFERS (3 << 8)
#define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
@@ -2424,6 +2425,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 cos0_cos1_shared;
u8 num_tc;
u16 max_pfcwd_tmo_ms;
@@ -2482,6 +2484,7 @@ struct bnxt {
#define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
#define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
+ #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(9)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
#define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 67ca02d84c97..15de802bbac4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1086,7 +1086,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
@@ -1168,7 +1169,8 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 41686a6f84b5..efb9bf20e66b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -688,16 +688,22 @@ skip_ring_stats:
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
}
}
@@ -4617,6 +4623,11 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
return 0;
+ if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){
+ NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T");
+ return -EOPNOTSUPP;
+ }
switch (bp->link_info.module_status) {
case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 80fed2c07b9e..be7deb9cc410 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -332,6 +332,38 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return rc;
}
+static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input *req;
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ return 0;
+
+ vf = &bp->pf.vf[vf_id];
+
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(vf->fw_fid);
+ switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) {
+ case BNXT_VF_LINK_FORCED:
+ req->options =
+ FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN;
+ break;
+ case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP):
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP;
+ break;
+ default:
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ break;
+ }
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
{
struct bnxt *bp = netdev_priv(dev);
@@ -357,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
break;
default:
netdev_err(bp->dev, "Invalid link option\n");
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
- if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)
+ rc = bnxt_set_vf_link_admin_state(bp, vf_id);
+ else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
@@ -666,15 +699,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
+
+ vf->fw_fid = pf->first_vf_id + i;
+ rc = bnxt_set_vf_link_admin_state(bp, i);
+ if (rc)
+ break;
+
if (reset)
__bnxt_set_vf_params(bp, i);
- req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+ req->vf_id = cpu_to_le16(vf->fw_fid);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = pf->first_vf_id + i;
}
if (pf->active_vfs) {
@@ -741,6 +780,12 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) {
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ req->enables |=
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ }
+
mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index f8c2c72b382d..927971c362f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -142,7 +142,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
- edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
netdev_unlock(dev);
@@ -159,8 +158,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
ulp = edev->ulp_tbl;
netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
- if (ulp->msix_requested)
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
@@ -298,7 +295,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
struct bnxt_ulp_ops *ops;
bool reset = false;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
@@ -321,7 +318,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 7b9dd8ebe4bc..3c5b8a53f715 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -58,7 +58,6 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
- #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index fc6053414b7d..413028bdcacb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -541,21 +541,11 @@ static int nicvf_get_rxfh_fields(struct net_device *dev,
return 0;
}
-static int nicvf_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 nicvf_get_rx_ring_count(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nic->rx_queues;
- ret = 0;
- break;
- default:
- break;
- }
- return ret;
+ return nic->rx_queues;
}
static int nicvf_set_rxfh_fields(struct net_device *dev,
@@ -861,7 +851,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
- .get_rxnfc = nicvf_get_rxnfc,
+ .get_rx_ring_count = nicvf_get_rx_ring_count,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7e2283c95b97..66b8854e059f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3485,7 +3485,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
struct adapter *adap = pi->adapter;
struct ch_sched_queue qe = { 0 };
struct ch_sched_params p = { 0 };
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 req_rate;
int err = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1672d3afe5be..f8dcf0b4abcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -56,7 +56,7 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
struct ch_sched_queue qe;
- struct sched_class *e;
+ struct ch_sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -180,7 +180,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u32 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 338b04f339b3..a2dcd2e24263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -330,7 +330,7 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u8 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index a1b14468d1ff..38a30aeee122 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -44,7 +44,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
{
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
e = &s->tab[p->u.params.class];
@@ -122,7 +122,7 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
const u32 val)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
+ struct ch_sched_class *e, *end;
void *found = NULL;
/* Look for an entry with matching @val */
@@ -166,8 +166,8 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p)
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_queue_entry *qe = NULL;
@@ -187,7 +187,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
@@ -218,7 +218,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
unsigned int qid;
int err = 0;
@@ -260,7 +260,7 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -288,7 +288,7 @@ static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -322,7 +322,7 @@ out_err:
}
static void t4_sched_class_unbind_all(struct port_info *pi,
- struct sched_class *e,
+ struct ch_sched_class *e,
enum sched_bind_type type)
{
if (!e)
@@ -476,12 +476,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
}
/* If @p is NULL, fetch any available unused class */
-static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
- const struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *found = NULL;
- struct sched_class *e, *end;
+ struct ch_sched_class *found = NULL;
+ struct ch_sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -522,10 +522,10 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
return found;
}
-static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
- struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
{
- struct sched_class *e = NULL;
+ struct ch_sched_class *e = NULL;
u8 class_id;
int err;
@@ -579,8 +579,8 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
* scheduling class with matching @p is found, then the matching class is
* returned.
*/
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p)
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
{
struct port_info *pi = netdev2pinfo(dev);
u8 class_id;
@@ -607,7 +607,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct ch_sched_params p;
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 speed;
int ret;
@@ -640,7 +640,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
}
}
-static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+static void t4_sched_class_free(struct net_device *dev, struct ch_sched_class *e)
{
struct port_info *pi = netdev2pinfo(dev);
@@ -660,7 +660,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
s->sched_size = sched_size;
for (i = 0; i < s->sched_size; i++) {
- memset(&s->tab[i], 0, sizeof(struct sched_class));
+ memset(&s->tab[i], 0, sizeof(struct ch_sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].entry_list);
@@ -682,7 +682,7 @@ void t4_cleanup_sched(struct adapter *adap)
continue;
for (i = 0; i < s->sched_size; i++) {
- struct sched_class *e;
+ struct ch_sched_class *e;
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 6b3c778815f0..4d3b5a757536 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -71,7 +71,7 @@ struct sched_flowc_entry {
struct ch_sched_flowc param;
};
-struct sched_class {
+struct ch_sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[] __counted_by(sched_size);
+ struct ch_sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
@@ -103,15 +103,15 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p);
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type);
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p);
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 4036db466e18..ee19933e2cca 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -159,19 +159,13 @@ static u8 tcp_state_to_flowc_state(u8 state)
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt)
{
- struct flowc_packed {
- struct fw_flowc_wr fc;
- struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
- } __packed sflowc;
+ DEFINE_RAW_FLEX(struct fw_flowc_wr, flowc, mnemval, FW_FLOWC_MNEM_MAX);
int nparams, paramidx, flowclen16, flowclen;
- struct fw_flowc_wr *flowc;
struct chtls_sock *csk;
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
tp = tcp_sk(sk);
- memset(&sflowc, 0, sizeof(sflowc));
- flowc = &sflowc.fc;
#define FLOWC_PARAM(__m, __v) \
do { \
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cb004fd16252..5bb31c8fab39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
- struct sk_buff **skb)
+ struct sk_buff **skb,
+ struct be_wrb_params *wrb_params)
{
struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
bool os2bmc = false;
@@ -1360,7 +1361,7 @@ done:
* to BMC, asic expects the vlan to be inline in the packet.
*/
if (os2bmc)
- *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
return os2bmc;
}
@@ -1387,7 +1388,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
*/
- if (be_send_pkt_to_bmc(adapter, &skb)) {
+ if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index d09e456f14c0..ed3fa80af8c3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -467,6 +467,47 @@ revert_values:
return res;
}
+static void dpaa_get_pause_stats(struct net_device *net_dev,
+ struct ethtool_pause_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_pause_stats)
+ mac_dev->get_pause_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_rmon_stats(struct net_device *net_dev,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_rmon_stats)
+ mac_dev->get_rmon_stats(mac_dev->fman_mac, s, ranges);
+}
+
+static void dpaa_get_eth_ctrl_stats(struct net_device *net_dev,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_ctrl_stats)
+ mac_dev->get_eth_ctrl_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_eth_mac_stats(struct net_device *net_dev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_mac_stats)
+ mac_dev->get_eth_mac_stats(mac_dev->fman_mac, s);
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
@@ -487,4 +528,8 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
+ .get_pause_stats = dpaa_get_pause_stats,
+ .get_rmon_stats = dpaa_get_rmon_stats,
+ .get_eth_ctrl_stats = dpaa_get_eth_ctrl_stats,
+ .get_eth_mac_stats = dpaa_get_eth_mac_stats,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index ebea4298791c..3ed0f7a02767 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -170,6 +170,9 @@
/* Port MAC 0/1 Maximum Frame Length Register */
#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400)
+/* Port internal MDIO base address, use to access PCS */
+#define ENETC4_PM_IMDIO_BASE 0x5030
+
/* Port MAC 0/1 Pause Quanta Register */
#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400)
@@ -198,6 +201,9 @@
#define SSP_1G 2
#define PM_IF_MODE_ENA BIT(15)
+/* Port external MDIO Base address, use to access off-chip PHY */
+#define ENETC4_EMDIO_BASE 0x5c00
+
/**********************ENETC Pseudo MAC port registers************************/
/* Port pseudo MAC receive octets counter (64-bit) */
#define ENETC4_PPMROCR 0x5080
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 9c634205e2a7..76263b8566bb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -176,7 +176,12 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_EMDIO_BASE;
+
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
err = of_mdiobus_register(bus, np);
@@ -221,7 +226,12 @@ static int enetc_imdio_create(struct enetc_pf *pf)
bus->phy_mask = ~0;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_PM_IMDIO_BASE;
+
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
err = mdiobus_register(bus);
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
index d7aee3c934d3..443983fdecd9 100644
--- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -67,6 +67,9 @@
#define IERB_EMDIOFAUXR 0x344
#define IERB_T0FAUXR 0x444
#define IERB_ETBCR(a) (0x300c + 0x100 * (a))
+#define IERB_LBCR(a) (0x1010 + 0x40 * (a))
+#define LBCR_MDIO_PHYAD_PRTAD(addr) (((addr) & 0x1f) << 8)
+
#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a))
#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a))
#define FAUXR_LDID GENMASK(3, 0)
@@ -322,6 +325,142 @@ static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv)
1000, 100000, true, priv->prb, PRB_NETCRR);
}
+static int netc_get_phy_addr(struct device_node *np)
+{
+ struct device_node *mdio_node, *phy_node;
+ u32 addr = 0;
+ int err = 0;
+
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ return 0;
+
+ phy_node = of_get_next_child(mdio_node, NULL);
+ if (!phy_node)
+ goto of_put_mdio_node;
+
+ err = of_property_read_u32(phy_node, "reg", &addr);
+ if (err)
+ goto of_put_phy_node;
+
+ if (addr >= PHY_MAX_ADDR)
+ err = -EINVAL;
+
+of_put_phy_node:
+ of_node_put(phy_node);
+
+of_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return err ? err : addr;
+}
+
+static int netc_parse_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ u32 mask = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ u32 addr;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &addr);
+ if (err)
+ return err;
+
+ if (addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mask |= BIT(addr);
+ }
+
+ *phy_mask = mask;
+
+ return 0;
+}
+
+static int netc_get_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,ee00"))
+ continue;
+
+ return netc_parse_emdio_phy_mask(gchild, phy_mask);
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int bus_devfn, addr, err;
+ u32 phy_mask = 0;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ /* Update the port EMDIO PHY address through parsing phy properties.
+ * This is needed when using the port EMDIO but it's harmless when
+ * using the central EMDIO. So apply it on all cases.
+ */
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(gchild);
+ if (addr < 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is
+ * 0, so no need to set the register.
+ */
+ if (!addr)
+ continue;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(0),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(1),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(2),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int imx95_ierb_init(struct platform_device *pdev)
{
struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
@@ -349,7 +488,7 @@ static int imx95_ierb_init(struct platform_device *pdev)
/* NETC TIMER */
netc_reg_write(priv->ierb, IERB_T0FAUXR, 7);
- return 0;
+ return imx95_enetc_mdio_phyaddr_config(pdev);
}
static int imx94_get_enetc_id(struct device_node *np)
@@ -424,12 +563,64 @@ end:
return 0;
}
+static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
+ struct device_node *np,
+ u32 phy_mask)
+{
+ struct device *dev = &priv->pdev->dev;
+ int bus_devfn, addr;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(np);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(np);
+ if (addr <= 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC0_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC1_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC2_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int imx94_ierb_init(struct platform_device *pdev)
{
struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
+ u32 phy_mask = 0;
int err;
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
for_each_child_of_node_scoped(np, child) {
for_each_child_of_node_scoped(child, gchild) {
if (!of_device_is_compatible(gchild, "pci1131,e101"))
@@ -438,6 +629,11 @@ static int imx94_ierb_init(struct platform_device *pdev)
err = imx94_enetc_update_tid(priv, gchild);
if (err)
return err;
+
+ err = imx94_enetc_mdio_phyaddr_config(priv, gchild,
+ phy_mask);
+ if (err)
+ return err;
}
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 41e0d85d15da..fd9a93d02f8e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -24,9 +24,7 @@
#include <linux/timecounter.h>
#include <net/xdp.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -242,23 +240,6 @@ struct bufdesc_ex {
__fec16 res0[4];
};
-/*
- * The following definitions courtesy of commproc.h, which where
- * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
- */
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
-
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
@@ -530,12 +511,6 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
-struct fec_enet_priv_txrx_info {
- int offset;
- struct page *page;
- struct sk_buff *skb;
-};
-
enum {
RX_XDP_REDIRECT = 0,
RX_XDP_PASS,
@@ -575,7 +550,7 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+ struct page *rx_buf[RX_RING_SIZE];
/* page_pool */
struct page_pool *page_pool;
@@ -668,7 +643,6 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
- unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
@@ -687,6 +661,7 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ bool perout_enable;
struct hrtimer perout_timer;
u64 perout_stime;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b6fbb84cfb06..c685a5c0cc51 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -253,9 +253,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
+#ifndef CONFIG_M5272
#define OPT_ARCH_HAS_MAX_FL 1
#else
#define OPT_ARCH_HAS_MAX_FL 0
@@ -1012,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
rxq->bd.cur = rxq->bd.base;
}
@@ -1062,7 +1060,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -1657,8 +1655,7 @@ static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
if (unlikely(!new_page))
return -ENOMEM;
- rxq->rx_skb_info[index].page = new_page;
- rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[index] = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
@@ -1773,7 +1770,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
__fec32 cbd_bufaddr;
u32 sub_len = 4;
-#if !defined(CONFIG_M5272)
/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
* FEC_RACC_SHIFT16 is set by default in the probe function.
*/
@@ -1781,7 +1777,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
data_start += 2;
sub_len += 2;
}
-#endif
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1840,7 +1835,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- page = rxq->rx_skb_info[index].page;
+ page = rxq->rx_buf[index];
cbd_bufaddr = bdp->cbd_bufaddr;
if (fec_enet_update_cbd(rxq, bdp, index)) {
ndev->stats.rx_dropped++;
@@ -2517,9 +2512,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_set_max_speed(phy_dev, 1000);
phy_remove_link_mode(phy_dev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-#if !defined(CONFIG_M5272)
phy_support_sym_pause(phy_dev);
-#endif
}
else
phy_set_max_speed(phy_dev, 100);
@@ -2710,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
}
/* List of registers that can be safety be read to dump them with ethtool */
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
@@ -2786,30 +2777,22 @@ static u32 fec_enet_register_offset[] = {
static void fec_enet_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *regbuf)
{
+ u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *reg_list = fec_enet_register_offset;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
- u32 *reg_list;
- u32 reg_cnt;
-
- if (!of_machine_is_compatible("fsl,imx6ul")) {
- reg_list = fec_enet_register_offset;
- reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
- } else {
+
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
+ if (of_machine_is_compatible("fsl,imx6ul")) {
reg_list = fec_enet_register_offset_6ul;
reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
}
-#else
- /* coldfire */
- static u32 *reg_list = fec_enet_register_offset;
- static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
#endif
+
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -3328,7 +3311,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
+ false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
@@ -3454,6 +3438,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
return err;
}
+ /* Some platforms require the RX buffer must be 64 bytes alignment.
+ * Some platforms require 16 bytes alignment. And some platforms
+ * require 4 bytes alignment. But since the page pool have been
+ * introduced into the driver, the address of RX buffer is always
+ * the page address plus FEC_ENET_XDP_HEADROOM, and
+ * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
+ * satisfy all platforms. To prevent future modifications to
+ * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
+ * BUILD_BUG_ON() test has been added, which ensures that
+ * FEC_ENET_XDP_HEADROOM provides the required alignment.
+ */
+ BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
+
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -3462,8 +3459,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skb_info[i].page = page;
- rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[i] = page;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3476,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
return 0;
err_alloc:
@@ -3512,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
return 0;
@@ -4089,10 +4085,8 @@ static int fec_enet_init(struct net_device *ndev)
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
- fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
@@ -4181,10 +4175,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
fep->tx_align = 0;
- fep->rx_align = 0x3f;
- }
ndev->hw_features = ndev->features;
@@ -4402,11 +4394,9 @@ fec_probe(struct platform_device *pdev)
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
-#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
-#endif
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index fa88b47d526c..4b7bad9a485d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -128,6 +128,12 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->perout_enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ dev_err(&fep->pdev->dev, "PEROUT is running");
+ return -EBUSY;
+ }
+
if (fep->pps_enable == enable) {
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
@@ -243,6 +249,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
* the FEC_TCCR register in time and missed the start time.
*/
if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ fep->perout_enable = false;
dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -1;
@@ -497,7 +504,10 @@ static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
{
unsigned long flags;
+ hrtimer_cancel(&fep->perout_timer);
+
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ fep->perout_enable = false;
writel(0, fep->hwp + FEC_TCSR(channel));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
@@ -529,6 +539,8 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return ret;
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ u32 reload_period;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -548,12 +560,14 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
- fep->reload_period = div_u64(period_ns, 2);
- if (on && fep->reload_period) {
+ reload_period = div_u64(period_ns, 2);
+ if (on && reload_period) {
+ u64 perout_stime;
+
/* Convert 1588 timestamp to ns*/
start_time.tv_sec = rq->perout.start.sec;
start_time.tv_nsec = rq->perout.start.nsec;
- fep->perout_stime = timespec64_to_ns(&start_time);
+ perout_stime = timespec64_to_ns(&start_time);
mutex_lock(&fep->ptp_clk_mutex);
if (!fep->ptp_clk_on) {
@@ -562,18 +576,41 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (fep->pps_enable) {
+ dev_err(&fep->pdev->dev, "PPS is running");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (fep->perout_enable) {
+ dev_err(&fep->pdev->dev,
+ "PEROUT has been enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
/* Read current timestamp */
curr_time = timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
- mutex_unlock(&fep->ptp_clk_mutex);
+ if (perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev,
+ "Start time must be greater than current time\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
/* Calculate time difference */
- delta = fep->perout_stime - curr_time;
+ delta = perout_stime - curr_time;
+ fep->reload_period = reload_period;
+ fep->perout_stime = perout_stime;
+ fep->perout_enable = true;
- if (fep->perout_stime <= curr_time) {
- dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
- return -EINVAL;
- }
+unlock:
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ if (ret)
+ return ret;
/* Because the timer counter of FEC only has 31-bits, correspondingly,
* the time comparison register FEC_TCCR also only low 31 bits can be
@@ -681,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
fep->next_counter = (fep->next_counter + fep->reload_period) &
fep->cc.mask;
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(fep->ptp_clock, &event);
+ if (fep->pps_enable) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 0291093f2e4e..c84f0336c94c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -649,6 +649,7 @@ static u32 memac_if_mode(phy_interface_t interface)
return IF_MODE_GMII | IF_MODE_RGMII;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
return IF_MODE_GMII;
case PHY_INTERFACE_MODE_10GBASER:
@@ -667,6 +668,7 @@ static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
return memac->sgmii_pcs;
case PHY_INTERFACE_MODE_QSGMII:
return memac->qsgmii_pcs;
@@ -685,6 +687,7 @@ static int memac_prepare(struct phylink_config *config, unsigned int mode,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_10GBASER:
return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
@@ -897,6 +900,89 @@ static int memac_set_exception(struct fman_mac *memac,
return 0;
}
+static u64 memac_read64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32be(reg + 4);
+ low = ioread32be(reg);
+ tmp = ioread32be(reg + 4);
+ } while (high != tmp);
+
+ return ((u64)high << 32) | low;
+}
+
+static void memac_get_pause_stats(struct fman_mac *memac,
+ struct ethtool_pause_stats *s)
+{
+ s->tx_pause_frames = memac_read64(&memac->regs->txpf_l);
+ s->rx_pause_frames = memac_read64(&memac->regs->rxpf_l);
+}
+
+static const struct ethtool_rmon_hist_range memac_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 9600 },
+ {},
+};
+
+static void memac_get_rmon_stats(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = memac_read64(&memac->regs->rund_l);
+ s->oversize_pkts = memac_read64(&memac->regs->rovr_l);
+ s->fragments = memac_read64(&memac->regs->rfrg_l);
+ s->jabbers = memac_read64(&memac->regs->rjbr_l);
+
+ s->hist[0] = memac_read64(&memac->regs->r64_l);
+ s->hist[1] = memac_read64(&memac->regs->r127_l);
+ s->hist[2] = memac_read64(&memac->regs->r255_l);
+ s->hist[3] = memac_read64(&memac->regs->r511_l);
+ s->hist[4] = memac_read64(&memac->regs->r1023_l);
+ s->hist[5] = memac_read64(&memac->regs->r1518_l);
+ s->hist[6] = memac_read64(&memac->regs->r1519x_l);
+
+ s->hist_tx[0] = memac_read64(&memac->regs->t64_l);
+ s->hist_tx[1] = memac_read64(&memac->regs->t127_l);
+ s->hist_tx[2] = memac_read64(&memac->regs->t255_l);
+ s->hist_tx[3] = memac_read64(&memac->regs->t511_l);
+ s->hist_tx[4] = memac_read64(&memac->regs->t1023_l);
+ s->hist_tx[5] = memac_read64(&memac->regs->t1518_l);
+ s->hist_tx[6] = memac_read64(&memac->regs->t1519x_l);
+
+ *ranges = memac_rmon_ranges;
+}
+
+static void memac_get_eth_ctrl_stats(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = memac_read64(&memac->regs->tcnp_l);
+ s->MACControlFramesReceived = memac_read64(&memac->regs->rcnp_l);
+}
+
+static void memac_get_eth_mac_stats(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = memac_read64(&memac->regs->tfrm_l);
+ s->FramesReceivedOK = memac_read64(&memac->regs->rfrm_l);
+ s->FrameCheckSequenceErrors = memac_read64(&memac->regs->rfcs_l);
+ s->AlignmentErrors = memac_read64(&memac->regs->raln_l);
+ s->OctetsTransmittedOK = memac_read64(&memac->regs->teoct_l);
+ s->FramesLostDueToIntMACXmitError = memac_read64(&memac->regs->terr_l);
+ s->OctetsReceivedOK = memac_read64(&memac->regs->reoct_l);
+ s->FramesLostDueToIntMACRcvError = memac_read64(&memac->regs->rdrntp_l);
+ s->MulticastFramesXmittedOK = memac_read64(&memac->regs->tmca_l);
+ s->BroadcastFramesXmittedOK = memac_read64(&memac->regs->tbca_l);
+ s->MulticastFramesReceivedOK = memac_read64(&memac->regs->rmca_l);
+ s->BroadcastFramesReceivedOK = memac_read64(&memac->regs->rbca_l);
+}
+
static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
@@ -1089,6 +1175,10 @@ int memac_initialization(struct mac_device *mac_dev,
mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->enable = memac_enable;
mac_dev->disable = memac_disable;
+ mac_dev->get_pause_stats = memac_get_pause_stats;
+ mac_dev->get_rmon_stats = memac_get_rmon_stats;
+ mac_dev->get_eth_ctrl_stats = memac_get_eth_ctrl_stats;
+ mac_dev->get_eth_mac_stats = memac_get_eth_mac_stats;
mac_dev->fman_mac = memac_config(mac_dev, params);
if (!mac_dev->fman_mac)
@@ -1226,6 +1316,7 @@ int memac_initialization(struct mac_device *mac_dev,
* those configurations modes don't use in-band autonegotiation.
*/
if (!of_property_present(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_2500BASEX &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
mac_dev->phylink_config.default_an_inband = true;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 955ace338965..63c2c5b4f99e 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -16,6 +16,11 @@
#include "fman.h"
#include "fman_mac.h"
+struct ethtool_eth_ctrl_stats;
+struct ethtool_eth_mac_stats;
+struct ethtool_pause_stats;
+struct ethtool_rmon_stats;
+struct ethtool_rmon_hist_range;
struct fman_mac;
struct mac_priv_s;
@@ -46,6 +51,15 @@ struct mac_device {
enet_addr_t *eth_addr);
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*get_pause_stats)(struct fman_mac *memac,
+ struct ethtool_pause_stats *s);
+ void (*get_rmon_stats)(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_eth_ctrl_stats)(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s);
+ void (*get_eth_mac_stats)(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s);
void (*update_speed)(struct mac_device *mac_dev, int speed);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index a33b44c1eb86..970d5ca8cdde 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -205,6 +205,13 @@ struct gve_rx_buf_state_dqo {
s16 next;
};
+/* Wrapper for XDP Rx metadata */
+struct gve_xdp_buff {
+ struct xdp_buff xdp;
+ struct gve_priv *gve;
+ const struct gve_rx_compl_desc_dqo *compl_desc;
+};
+
/* `head` and `tail` are indices into an array, or -1 if empty. */
struct gve_index_list {
s16 head;
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 6eb442096e02..5871f773f0c7 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -36,6 +36,7 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
bool gve_xdp_poll_dqo(struct gve_notify_block *block);
bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6fb8fbb38a7d..a5a2b18d309b 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2188,10 +2188,6 @@ static int gve_set_ts_config(struct net_device *dev,
}
kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
- gve_clock_nic_ts_read(priv);
- ptp_schedule_worker(priv->ptp->clock, 0);
- } else {
- ptp_cancel_worker_sync(priv->ptp->clock);
}
priv->ts_config.rx_filter = kernel_config->rx_filter;
@@ -2352,6 +2348,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_set_features_flag_locked(priv->dev, xdp_features);
}
+static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
+ .xmo_rx_timestamp = gve_xdp_rx_timestamp,
+};
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -2447,6 +2447,9 @@ setup_device:
}
gve_set_netdev_xdp_features(priv);
+ if (!gve_is_gqi(priv))
+ priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
+
err = gve_setup_device_resources(priv);
if (err)
goto err_free_xsk_bitmap;
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
index a384a9ed4914..073677d82ee8 100644
--- a/drivers/net/ethernet/google/gve/gve_ptp.c
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -133,9 +133,21 @@ int gve_init_clock(struct gve_priv *priv)
err = -ENOMEM;
goto release_ptp;
}
+ err = gve_clock_nic_ts_read(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "failed to read NIC clock %d\n", err);
+ goto release_nic_ts_report;
+ }
+ ptp_schedule_worker(priv->ptp->clock,
+ msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS));
return 0;
+release_nic_ts_report:
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
release_ptp:
gve_ptp_release(priv);
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 1aff3bbb8cfc..f1bd8f5d5732 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -240,6 +240,11 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->rx_headroom = 0;
}
+ /* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes
+ * the 24 byte field cb to store gve specific data.
+ */
+ XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
+
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
@@ -456,20 +461,38 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
* Note that this means if the time delta between packet reception and the last
* clock read is greater than ~2 seconds, this will provide invalid results.
*/
+static ktime_t gve_rx_get_hwtstamp(struct gve_priv *gve, u32 hwts)
+{
+ u64 last_read = READ_ONCE(gve->last_sync_nic_counter);
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ return ns_to_ktime(last_read + diff);
+}
+
static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *desc)
{
- u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
struct sk_buff *skb = rx->ctx.skb_head;
- u32 ts, low;
- s32 diff;
-
- if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) {
- ts = le32_to_cpu(desc->ts);
- low = (u32)last_read;
- diff = ts - low;
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
- }
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)
+ skb_hwtstamps(skb)->hwtstamp =
+ gve_rx_get_hwtstamp(rx->gve, le32_to_cpu(desc->ts));
+}
+
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct gve_xdp_buff *ctx = (void *)_ctx;
+
+ if (!ctx->gve->nic_ts_report)
+ return -ENODATA;
+
+ if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
+ return -ENODATA;
+
+ *timestamp = gve_rx_get_hwtstamp(ctx->gve,
+ le32_to_cpu(ctx->compl_desc->ts));
+ return 0;
}
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
@@ -683,16 +706,23 @@ err:
}
static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state, int buf_len,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ struct gve_rx_buf_state_dqo *buf_state,
struct bpf_prog *xprog)
{
struct xdp_buff *xdp = buf_state->xsk_buff;
+ int buf_len = compl_desc->packet_len;
struct gve_priv *priv = rx->gve;
+ struct gve_xdp_buff *gve_xdp;
int xdp_act;
xdp->data_end = xdp->data + buf_len;
xsk_buff_dma_sync_for_cpu(xdp);
+ gve_xdp = (void *)xdp;
+ gve_xdp->gve = priv;
+ gve_xdp->compl_desc = compl_desc;
+
if (xprog) {
xdp_act = bpf_prog_run_xdp(xprog, xdp);
buf_len = xdp->data_end - xdp->data;
@@ -782,7 +812,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
xprog = READ_ONCE(priv->xdp_prog);
if (buf_state->xsk_buff)
- return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
+ return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog);
/* Page might have not been used for awhile and was likely last written
* by a different thread.
@@ -840,23 +870,26 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (xprog) {
- struct xdp_buff xdp;
+ struct gve_xdp_buff gve_xdp;
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ xdp_init_buff(&gve_xdp.xdp, buf_state->page_info.buf_size,
&rx->xdp_rxq);
- xdp_prepare_buff(&xdp,
+ xdp_prepare_buff(&gve_xdp.xdp,
buf_state->page_info.page_address +
buf_state->page_info.page_offset,
buf_state->page_info.pad,
buf_len, false);
- old_data = xdp.data;
- xdp_act = bpf_prog_run_xdp(xprog, &xdp);
- buf_state->page_info.pad += xdp.data - old_data;
- buf_len = xdp.data_end - xdp.data;
+ gve_xdp.gve = priv;
+ gve_xdp.compl_desc = compl_desc;
+
+ old_data = gve_xdp.xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &gve_xdp.xdp);
+ buf_state->page_info.pad += gve_xdp.xdp.data - old_data;
+ buf_len = gve_xdp.xdp.data_end - gve_xdp.xdp.data;
if (xdp_act != XDP_PASS) {
- gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ gve_xdp_done_dqo(priv, rx, &gve_xdp.xdp, xprog, xdp_act,
buf_state);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index c6ff0968929d..97efc8d27e6f 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -730,7 +730,9 @@ unmap_drop:
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 6f1d515673d2..40b89b3e5a31 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -1002,7 +1002,9 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
dev_kfree_skb_any(skb);
return 0;
}
@@ -1324,7 +1326,11 @@ static void remove_miss_completions(struct gve_priv *priv,
/* This indicates the packet was dropped. */
dev_kfree_skb_any(pending_packet->skb);
pending_packet->skb = NULL;
+
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
+
net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
priv->dev->name,
(int)(pending_packet - tx->dqo.pending_packets));
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 38875c196cb6..18eca7d12c20 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -151,6 +151,7 @@ config HIBMCGE
select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
+ select PAGE_POOL
help
If you wish to compile a kernel for a BMC with HIBMC-xx_gmac
then you should answer Y to this. This makes this driver suitable for use
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index 1a9da564b306..d6610ba16855 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -3,6 +3,7 @@
# Makefile for the HISILICON BMC GE network device drivers.
#
+ccflags-y += -I$(src)
obj-$(CONFIG_HIBMCGE) += hibmcge.o
hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 2097e4c2b3d7..8e134da3e217 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -7,6 +7,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/page_pool/helpers.h>
#include "hbg_reg.h"
#define HBG_STATUS_DISABLE 0x0
@@ -55,6 +56,12 @@ struct hbg_buffer {
dma_addr_t skb_dma;
u32 skb_len;
+ struct page *page;
+ void *page_addr;
+ dma_addr_t page_dma;
+ u32 page_size;
+ u32 page_offset;
+
enum hbg_dir dir;
struct hbg_ring *ring;
struct hbg_priv *priv;
@@ -78,6 +85,7 @@ struct hbg_ring {
struct hbg_priv *priv;
struct napi_struct napi;
char *tout_log_buf; /* tx timeout log buffer */
+ struct page_pool *page_pool; /* only for rx */
};
enum hbg_hw_event_type {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index a39d1e796e4a..30b3903c8f2d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -252,6 +252,8 @@ struct hbg_rx_desc {
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W3_IP_OFFSET_M GENMASK(23, 16)
+#define HBG_RX_DESC_W3_VLAN_M GENMASK(15, 0)
#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
@@ -269,6 +271,8 @@ struct hbg_rx_desc {
#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+#define HBG_RX_DESC_W4_PARSE_MODE_M GENMASK(6, 5)
+#define HBG_RX_DESC_W5_VALID_SIZE_M GENMASK(15, 0)
enum hbg_l3_err_code {
HBG_L3_OK = 0,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
new file mode 100644
index 000000000000..b70fd960da8d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hibmcge
+
+#if !defined(_HBG_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HBG_TRACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include "hbg_reg.h"
+
+TRACE_EVENT(hbg_rx_desc,
+ TP_PROTO(struct hbg_priv *priv, u32 index,
+ struct hbg_rx_desc *rx_desc),
+ TP_ARGS(priv, index, rx_desc),
+
+ TP_STRUCT__entry(__field(u32, index)
+ __field(u8, port_num)
+ __field(u8, ip_offset)
+ __field(u8, parse_mode)
+ __field(u8, l4_error_code)
+ __field(u8, l3_error_code)
+ __field(u8, l2_error_code)
+ __field(u16, packet_len)
+ __field(u16, valid_size)
+ __field(u16, vlan)
+ __string(pciname, pci_name(priv->pdev))
+ __string(devname, priv->netdev->name)
+ ),
+
+ TP_fast_assign(__entry->index = index,
+ __entry->packet_len =
+ FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M,
+ rx_desc->word2);
+ __entry->port_num =
+ FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M,
+ rx_desc->word2);
+ __entry->ip_offset =
+ FIELD_GET(HBG_RX_DESC_W3_IP_OFFSET_M,
+ rx_desc->word3);
+ __entry->vlan =
+ FIELD_GET(HBG_RX_DESC_W3_VLAN_M,
+ rx_desc->word3);
+ __entry->parse_mode =
+ FIELD_GET(HBG_RX_DESC_W4_PARSE_MODE_M,
+ rx_desc->word4);
+ __entry->l4_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l3_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l2_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B,
+ rx_desc->word4);
+ __entry->valid_size =
+ FIELD_GET(HBG_RX_DESC_W5_VALID_SIZE_M,
+ rx_desc->word5);
+ __assign_str(pciname);
+ __assign_str(devname);
+ ),
+
+ TP_printk("%s %s index:%u, port num:%u, len:%u, valid size:%u, ip_offset:%u, vlan:0x%04x, parse mode:%u, l4_err:0x%x, l3_err:0x%x, l2_err:0x%x",
+ __get_str(pciname), __get_str(devname), __entry->index,
+ __entry->port_num, __entry->packet_len,
+ __entry->valid_size, __entry->ip_offset, __entry->vlan,
+ __entry->parse_mode, __entry->l4_error_code,
+ __entry->l3_error_code, __entry->l2_error_code
+ )
+);
+
+#endif /* _HBG_TRACE_H_ */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hbg_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index 8d814c8f19ea..a4ea92c31c2f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -7,6 +7,9 @@
#include "hbg_reg.h"
#include "hbg_txrx.h"
+#define CREATE_TRACE_POINTS
+#include "hbg_trace.h"
+
#define netdev_get_tx_ring(netdev) \
(&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
@@ -28,6 +31,11 @@
typeof(ring) _ring = (ring); \
_ring->p = hbg_queue_next_prt(_ring->p, _ring); })
+#define hbg_get_page_order(ring) ({ \
+ typeof(ring) _ring = (ring); \
+ get_order(hbg_spec_max_frame_len(_ring->priv, _ring->dir)); })
+#define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring)))
+
#define HBG_TX_STOP_THRS 2
#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
@@ -62,6 +70,43 @@ static void hbg_dma_unmap(struct hbg_buffer *buffer)
buffer->skb_dma = 0;
}
+static void hbg_buffer_free_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+
+ if (unlikely(!buffer->page))
+ return;
+
+ page_pool_put_full_page(ring->page_pool, buffer->page, false);
+
+ buffer->page = NULL;
+ buffer->page_dma = 0;
+ buffer->page_addr = NULL;
+ buffer->page_size = 0;
+ buffer->page_offset = 0;
+}
+
+static int hbg_buffer_alloc_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+ u32 len = hbg_get_page_size(ring);
+ u32 offset;
+
+ if (unlikely(!ring->page_pool))
+ return 0;
+
+ buffer->page = page_pool_dev_alloc_frag(ring->page_pool, &offset, len);
+ if (unlikely(!buffer->page))
+ return -ENOMEM;
+
+ buffer->page_dma = page_pool_get_dma_addr(buffer->page) + offset;
+ buffer->page_addr = page_address(buffer->page) + offset;
+ buffer->page_size = len;
+ buffer->page_offset = offset;
+
+ return 0;
+}
+
static void hbg_init_tx_desc(struct hbg_buffer *buffer,
struct hbg_tx_desc *tx_desc)
{
@@ -135,24 +180,14 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
buffer->skb = NULL;
}
-static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
-{
- u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
- struct hbg_priv *priv = buffer->priv;
-
- buffer->skb = netdev_alloc_skb(priv->netdev, len);
- if (unlikely(!buffer->skb))
- return -ENOMEM;
-
- buffer->skb_len = len;
- memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
- return 0;
-}
-
static void hbg_buffer_free(struct hbg_buffer *buffer)
{
- hbg_dma_unmap(buffer);
- hbg_buffer_free_skb(buffer);
+ if (buffer->skb) {
+ hbg_dma_unmap(buffer);
+ return hbg_buffer_free_skb(buffer);
+ }
+
+ hbg_buffer_free_page(buffer);
}
static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
@@ -374,25 +409,44 @@ static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
struct hbg_buffer *buffer;
int ret;
- if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
+ if (hbg_queue_is_full(ring->ntc, ring->ntu, ring) ||
+ hbg_fifo_is_full(priv, ring->dir))
return 0;
buffer = &ring->queue[ring->ntu];
- ret = hbg_buffer_alloc_skb(buffer);
+ ret = hbg_buffer_alloc_page(buffer);
if (unlikely(ret))
return ret;
- ret = hbg_dma_map(buffer);
- if (unlikely(ret)) {
- hbg_buffer_free_skb(buffer);
- return ret;
- }
+ memset(buffer->page_addr, 0, HBG_PACKET_HEAD_SIZE);
+ dma_sync_single_for_device(&priv->pdev->dev, buffer->page_dma,
+ HBG_PACKET_HEAD_SIZE, DMA_TO_DEVICE);
- hbg_hw_fill_buffer(priv, buffer->skb_dma);
+ hbg_hw_fill_buffer(priv, buffer->page_dma);
hbg_queue_move_next(ntu, ring);
return 0;
}
+static int hbg_rx_fill_buffers(struct hbg_priv *priv)
+{
+ u32 remained = hbg_hw_get_fifo_used_num(priv, HBG_DIR_RX);
+ u32 max_count = priv->dev_specs.rx_fifo_num;
+ u32 refill_count;
+ int ret;
+
+ if (unlikely(remained >= max_count))
+ return 0;
+
+ refill_count = max_count - remained;
+ while (refill_count--) {
+ ret = hbg_rx_fill_one_buffer(priv);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
struct hbg_buffer *buffer)
{
@@ -401,13 +455,29 @@ static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
/* make sure HW write desc complete */
dma_rmb();
- dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
- buffer->skb_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev, buffer->page_dma,
+ buffer->page_size, DMA_FROM_DEVICE);
- rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
}
+static int hbg_build_skb(struct hbg_priv *priv,
+ struct hbg_buffer *buffer, u32 pkt_len)
+{
+ net_prefetch(buffer->page_addr);
+
+ buffer->skb = napi_build_skb(buffer->page_addr, buffer->page_size);
+ if (unlikely(!buffer->skb))
+ return -ENOMEM;
+ skb_mark_for_recycle(buffer->skb);
+
+ /* page will be freed together with the skb */
+ buffer->page = NULL;
+
+ return 0;
+}
+
static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
{
struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
@@ -417,33 +487,39 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
u32 packet_done = 0;
u32 pkt_len;
+ hbg_rx_fill_buffers(priv);
while (packet_done < budget) {
if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
break;
buffer = &ring->queue[ring->ntc];
- if (unlikely(!buffer->skb))
+ if (unlikely(!buffer->page))
goto next_buffer;
if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
break;
- rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
+ trace_hbg_rx_desc(priv, ring->ntc, rx_desc);
+
+ if (unlikely(hbg_build_skb(priv, buffer, pkt_len))) {
+ hbg_buffer_free_page(buffer);
+ goto next_buffer;
+ }
if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
- hbg_buffer_free(buffer);
+ hbg_buffer_free_skb(buffer);
goto next_buffer;
}
- hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
priv->netdev);
-
dev_sw_netstats_rx_add(priv->netdev, pkt_len);
napi_gro_receive(napi, buffer->skb);
buffer->skb = NULL;
+ buffer->page = NULL;
next_buffer:
hbg_rx_fill_one_buffer(priv);
@@ -458,6 +534,42 @@ next_buffer:
return packet_done;
}
+static void hbg_ring_page_pool_destory(struct hbg_ring *ring)
+{
+ if (!ring->page_pool)
+ return;
+
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+}
+
+static int hbg_ring_page_pool_init(struct hbg_priv *priv, struct hbg_ring *ring)
+{
+ u32 buf_size = hbg_spec_max_frame_len(priv, ring->dir);
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = hbg_get_page_order(ring),
+ .pool_size = ring->len * buf_size / hbg_get_page_size(ring),
+ .nid = dev_to_node(&priv->pdev->dev),
+ .dev = &priv->pdev->dev,
+ .napi = &ring->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = hbg_get_page_size(ring),
+ };
+ int ret = 0;
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ ret = PTR_ERR(ring->page_pool);
+ dev_err(&priv->pdev->dev,
+ "failed to create page pool, ret = %d\n", ret);
+ ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
static void hbg_ring_uninit(struct hbg_ring *ring)
{
struct hbg_buffer *buffer;
@@ -476,6 +588,7 @@ static void hbg_ring_uninit(struct hbg_ring *ring)
buffer->priv = NULL;
}
+ hbg_ring_page_pool_destory(ring);
dma_free_coherent(&ring->priv->pdev->dev,
ring->len * sizeof(*ring->queue),
ring->queue, ring->queue_dma);
@@ -491,8 +604,19 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
{
struct hbg_buffer *buffer;
u32 i, len;
+ int ret;
len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
+ /* To improve receiving performance under high-stress scenarios,
+ * in the `hbg_napi_rx_poll()`, we first use the other half of
+ * the buffer to receive packets from the hardware via the
+ * `hbg_rx_fill_buffers()`, and then process the packets in the
+ * original half of the buffer to avoid packet loss caused by
+ * hardware overflow as much as possible.
+ */
+ if (dir == HBG_DIR_RX)
+ len += hbg_get_spec_fifo_max_num(priv, dir);
+
ring->queue = dma_alloc_coherent(&priv->pdev->dev,
len * sizeof(*ring->queue),
&ring->queue_dma, GFP_KERNEL);
@@ -514,11 +638,23 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
ring->ntu = 0;
ring->len = len;
- if (dir == HBG_DIR_TX)
+ if (dir == HBG_DIR_TX) {
netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
- else
+ } else {
netif_napi_add(priv->netdev, &ring->napi, napi_poll);
+ ret = hbg_ring_page_pool_init(priv, ring);
+ if (ret) {
+ netif_napi_del(&ring->napi);
+ dma_free_coherent(&ring->priv->pdev->dev,
+ ring->len * sizeof(*ring->queue),
+ ring->queue, ring->queue_dma);
+ ring->queue = NULL;
+ ring->len = 0;
+ return ret;
+ }
+ }
+
napi_enable(&ring->napi);
return 0;
}
@@ -541,21 +677,16 @@ static int hbg_tx_ring_init(struct hbg_priv *priv)
static int hbg_rx_ring_init(struct hbg_priv *priv)
{
int ret;
- u32 i;
ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
if (ret)
return ret;
- for (i = 0; i < priv->rx_ring.len - 1; i++) {
- ret = hbg_rx_fill_one_buffer(priv);
- if (ret) {
- hbg_ring_uninit(&priv->rx_ring);
- return ret;
- }
- }
+ ret = hbg_rx_fill_buffers(priv);
+ if (ret)
+ hbg_ring_uninit(&priv->rx_ring);
- return 0;
+ return ret;
}
int hbg_txrx_init(struct hbg_priv *priv)
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cee57a2149ab..7b1ac90b3de4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -551,9 +551,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- size_t total_len, max_len;
u16 *eeprom_buff;
int ret_val = 0;
+ size_t max_len;
int first_word;
int last_word;
void *ptr;
@@ -571,10 +571,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
max_len = hw->nvm.word_size * 2;
- if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) ||
- total_len > max_len)
- return -EFBIG;
-
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index bf2029144c1d..76e42abca965 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -734,22 +734,11 @@ static int fm10k_get_rssh_fields(struct net_device *dev,
return 0;
}
-static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static u32 fm10k_get_rx_ring_count(struct net_device *dev)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = interface->num_rx_queues;
- ret = 0;
- break;
- default:
- break;
- }
-
- return ret;
+ return interface->num_rx_queues;
}
static int fm10k_set_rssh_fields(struct net_device *dev,
@@ -1160,7 +1149,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_ringparam = fm10k_set_ringparam,
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
- .get_rxnfc = fm10k_get_rxnfc,
+ .get_rx_ring_count = fm10k_get_rx_ring_count,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
index bc205e3077c7..229179ccc131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -24,7 +24,8 @@ static int i40e_max_mac_per_vf_set(struct devlink *devlink,
static int i40e_max_mac_per_vf_get(struct devlink *devlink,
u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct i40e_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 86c72596617a..f2c2646ea298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3522,6 +3522,20 @@ no_input_set:
}
/**
+ * i40e_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 i40e_get_rx_ring_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3538,10 +3552,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -5819,6 +5829,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_msglevel = i40e_set_msglevel,
.get_rxnfc = i40e_get_rxnfc,
.set_rxnfc = i40e_set_rxnfc,
+ .get_rx_ring_count = i40e_get_rx_ring_count,
.self_test = i40e_diag_test,
.get_strings = i40e_get_strings,
.get_eee = i40e_get_eee,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 9d91a382612d..8b30a3accd31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2967,7 +2967,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
dev_err(&pf->pdev->dev,
"Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n",
mac_add_max);
- return -EPERM;
+ return -EPERM;
}
if (!vf_trusted) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index a3f8ced23266..2cc21289a707 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1639,6 +1639,19 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * iavf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 iavf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_active_queues;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -1653,10 +1666,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_active_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
@@ -1866,6 +1875,7 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
.set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
+ .get_rx_ring_count = iavf_get_rx_ring_count,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 34a422a4a29c..88156082a41d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -793,7 +793,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -838,7 +839,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
@@ -941,7 +943,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -987,7 +990,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 938914abbe06..d88b7f3fd1f9 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -610,11 +610,13 @@ exit_release_res:
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to store the parameter value
+ * @extack: netlink extended ACK structure
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
@@ -1349,7 +1351,8 @@ static const struct devlink_ops ice_sf_devlink_ops;
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct iidc_rdma_core_dev_info *cdev;
@@ -1415,7 +1418,8 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
static int
ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct iidc_rdma_core_dev_info *cdev;
@@ -1522,11 +1526,13 @@ static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
* @devlink: Pointer to the devlink instance.
* @id: The parameter ID to set.
* @ctx: Context to store the parameter value.
+ * @extack: netlink extended ACK structure
*
* Return: Zero.
*/
static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct ice_port_info *pi;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a1d9abee97e5..969d4f8f9c02 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3084,6 +3084,20 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 ice_get_rx_ring_count(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3103,10 +3117,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
hw = &vsi->back->hw;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = hw->fdir_active_fltr;
/* report total rule count */
@@ -4853,6 +4863,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.set_rxnfc = ice_set_rxnfc,
+ .get_rx_ring_count = ice_get_rx_ring_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 26b357c0ae15..b29fbdec9442 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -1121,7 +1121,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* ice_fdir_has_frag - does flow type have 2 ptypes
* @flow: flow ptype
*
- * returns true is there is a fragment packet for this ptype
+ * Return: true if there is a fragment packet for this ptype
*/
bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index d86db081579f..973a13d3d92a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -534,7 +534,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ dev_err(dev, "Firmware failed to erase %s (module 0x%02x), aq_err %s\n",
component, module,
libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 985b3e79b312..4c8d20f2d2c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -3253,7 +3253,7 @@ void ice_ptp_init(struct ice_pf *pf)
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
- goto err_exit;
+ goto err_clean_pf;
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
@@ -3270,13 +3270,19 @@ void ice_ptp_init(struct ice_pf *pf)
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
+err_clean_pf:
+ mutex_destroy(&ptp->port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
err_exit:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- ptp->state = ICE_PTP_ERROR;
+ /* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
+ * and to avoid duplicated resources release.
+ */
+ ptp->state = ICE_PTP_UNINIT;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3289,9 +3295,19 @@ err_exit:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (pf->ptp.state != ICE_PTP_READY)
+ if (pf->ptp.state == ICE_PTP_UNINIT)
return;
+ if (pf->ptp.state != ICE_PTP_READY) {
+ mutex_destroy(&pf->ptp.port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
+ return;
+ }
+
pf->ptp.state = ICE_PTP_UNINIT;
/* Disable timestamping for both Tx and Rx */
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
index 7928f4e8e788..f73d5a3e83d4 100644
--- a/drivers/net/ethernet/intel/ice/virt/queues.c
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -842,6 +842,9 @@ int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
goto error_param;
+
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+
if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64)
goto error_param;
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 50fa7be0c00d..8cfc68cbfa06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -131,14 +131,12 @@ enum idpf_cap_field {
/**
* enum idpf_vport_state - Current vport state
- * @__IDPF_VPORT_DOWN: Vport is down
- * @__IDPF_VPORT_UP: Vport is up
- * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
+ * @IDPF_VPORT_UP: Vport is up
+ * @IDPF_VPORT_STATE_NBITS: Must be last, number of states
*/
enum idpf_vport_state {
- __IDPF_VPORT_DOWN,
- __IDPF_VPORT_UP,
- __IDPF_VPORT_STATE_LAST,
+ IDPF_VPORT_UP,
+ IDPF_VPORT_STATE_NBITS
};
/**
@@ -162,7 +160,7 @@ struct idpf_netdev_priv {
u16 vport_idx;
u16 max_tx_hdr_size;
u16 tx_max_bufs;
- enum idpf_vport_state state;
+ DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS);
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index a5a1eec9ade8..2589e124e41c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -6,6 +6,25 @@
#include "idpf_virtchnl.h"
/**
+ * idpf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 idpf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct idpf_vport *vport;
+ u32 num_rxq;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ num_rxq = vport->num_rxq;
+ idpf_vport_ctrl_unlock(netdev);
+
+ return num_rxq;
+}
+
+/**
* idpf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -28,9 +47,6 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vport->num_rxq;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = user_config->num_fsteer_fltrs;
cmd->data = idpf_fsteer_max_rules(vport);
@@ -386,7 +402,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -436,7 +452,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -1167,7 +1183,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP) {
+ if (!test_bit(IDPF_VPORT_UP, np->state)) {
idpf_vport_ctrl_unlock(netdev);
return;
@@ -1319,7 +1335,7 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
@@ -1507,7 +1523,7 @@ static int idpf_set_coalesce(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
@@ -1710,7 +1726,7 @@ static void idpf_get_ts_stats(struct net_device *netdev,
ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
} while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto exit;
for (u16 i = 0; i < vport->num_txq_grp; i++) {
@@ -1757,6 +1773,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
.set_rxnfc = idpf_set_rxnfc,
+ .get_rx_ring_count = idpf_get_rx_ring_count,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 8a941f0fb048..7a7e101afeb6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -519,7 +519,7 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (np->state == __IDPF_VPORT_UP) {
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
err = idpf_add_del_mac_filters(vport, np, false, async);
@@ -590,7 +590,7 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
if (err)
return err;
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
err = idpf_add_del_mac_filters(vport, np, true, async);
return err;
@@ -894,7 +894,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (rtnl)
@@ -921,7 +921,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
idpf_vport_intr_rel(vport);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
rtnl_unlock();
@@ -1345,7 +1345,7 @@ static int idpf_up_complete(struct idpf_vport *vport)
netif_tx_start_all_queues(vport->netdev);
}
- np->state = __IDPF_VPORT_UP;
+ set_bit(IDPF_VPORT_UP, np->state);
return 0;
}
@@ -1391,7 +1391,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
struct idpf_vport_config *vport_config;
int err;
- if (np->state != __IDPF_VPORT_DOWN)
+ if (test_bit(IDPF_VPORT_UP, np->state))
return -EBUSY;
if (rtnl)
@@ -1602,7 +1602,7 @@ void idpf_init_task(struct work_struct *work)
/* Once state is put into DOWN, driver is ready for dev_open */
np = netdev_priv(vport->netdev);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
idpf_vport_open(vport, true);
@@ -1801,7 +1801,7 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
continue;
np = netdev_priv(adapter->netdevs[i]);
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
set_bit(IDPF_VPORT_UP_REQUESTED,
adapter->vport_config[i]->flags);
}
@@ -1939,7 +1939,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- enum idpf_vport_state current_state = np->state;
+ bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
int err;
@@ -1990,7 +1990,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
- if (current_state <= __IDPF_VPORT_DOWN) {
+ if (!vport_is_up) {
idpf_send_delete_queues_msg(vport);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
@@ -2023,7 +2023,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (err)
goto err_open;
- if (current_state == __IDPF_VPORT_UP)
+ if (vport_is_up)
err = idpf_vport_open(vport, false);
goto free_vport;
@@ -2033,7 +2033,7 @@ err_reset:
vport->num_rxq, vport->num_bufq);
err_open:
- if (current_state == __IDPF_VPORT_UP)
+ if (vport_is_up)
idpf_vport_open(vport, false);
free_vport:
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index 7a06eaf46a08..de5d722cc21d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -141,6 +141,8 @@ destroy_wqs:
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->vport_config[i])
+ continue;
kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 61e613066140..e3ddf18dcbf5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -570,7 +570,7 @@ fetch_next_txq_desc:
np = netdev_priv(tx_q->netdev);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = np->state != __IDPF_VPORT_UP ||
+ dont_wake = !test_bit(IDPF_VPORT_UP, np->state) ||
!netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 828f7c444d30..1d91c56f7469 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -134,7 +134,7 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
{
idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- if (!complq->comp)
+ if (!complq->desc_ring)
return;
dma_free_coherent(complq->netdev->dev.parent, complq->size,
@@ -922,8 +922,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
err = idpf_rx_desc_alloc(vport, q);
if (err) {
pci_err(vport->adapter->pdev,
- "Memory allocation for Rx Queue %u failed\n",
- i);
+ "Memory allocation for Rx queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -939,8 +939,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
err = idpf_bufq_desc_alloc(vport, q);
if (err) {
pci_err(vport->adapter->pdev,
- "Memory allocation for Rx Buffer Queue %u failed\n",
- i);
+ "Memory allocation for Rx Buffer Queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -2275,7 +2275,7 @@ fetch_next_desc:
/* Update BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
+ dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) ||
!netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index cbb5fa30f5a0..44cd4b466c48 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -68,7 +68,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
vport->link_up = v2e->link_status;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (vport->link_up) {
@@ -2755,7 +2755,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
/* Don't send get_stats message if the link is down */
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 21ce25b0567f..958d16f87424 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -418,7 +418,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
!test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
!!vport->xdp_prog == !!prog) {
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
idpf_xdp_copy_prog_to_rqs(vport, prog);
old = xchg(&vport->xdp_prog, prog);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 10e2445e0ded..b507576b28b2 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2541,6 +2541,13 @@ static int igb_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 igb_get_rx_ring_count(struct net_device *dev)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2548,10 +2555,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_filter_count;
ret = 0;
@@ -3473,6 +3476,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ts_info = igb_get_ts_info,
.get_rxnfc = igb_get_rxnfc,
.set_rxnfc = igb_set_rxnfc,
+ .get_rx_ring_count = igb_get_rx_ring_count,
.get_eee = igb_get_eee,
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index bb783042d1af..e94c1922b97a 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1091,15 +1091,19 @@ static int igc_ethtool_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 igc_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igc_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_rule_count;
return 0;
@@ -2170,6 +2174,7 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_coalesce = igc_ethtool_set_coalesce,
.get_rxnfc = igc_ethtool_get_rxnfc,
.set_rxnfc = igc_ethtool_set_rxnfc,
+ .get_rx_ring_count = igc_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
.get_rxfh = igc_ethtool_get_rxfh,
.set_rxfh = igc_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 2d660e9edb80..2ad81f687a84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2805,6 +2805,14 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
return 64;
}
+static u32 ixgbe_get_rx_ring_count(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
+ return min_t(u32, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2812,11 +2820,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = min_t(int, adapter->num_rx_queues,
- ixgbe_rss_indir_tbl_max(adapter));
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->fdir_filter_count;
ret = 0;
@@ -3743,6 +3746,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
@@ -3791,6 +3795,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3190ce7e44c7..4af3b3e71ff1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7449,7 +7449,7 @@ int ixgbe_open(struct net_device *netdev)
adapter->hw.link.link_info.link_cfg_err);
err = ixgbe_non_sfp_link_config(&adapter->hw);
- if (ixgbe_non_sfp_link_config(&adapter->hw))
+ if (err)
e_dev_err("Link setup failed, err %d.\n", err);
}
@@ -12046,7 +12046,7 @@ err_dma:
* @pdev: PCI device information struct
*
* ixgbe_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
+ * that it should release a PCI device. This could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index bebad564188e..537a60d5276f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -867,19 +867,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
return 0;
}
-static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 ixgbevf_get_rx_ring_count(struct net_device *dev)
{
struct ixgbevf_adapter *adapter = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_rx_queues;
- return 0;
- default:
- hw_dbg(&adapter->hw, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return adapter->num_rx_queues;
}
static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
@@ -987,7 +979,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
.get_coalesce = ixgbevf_get_coalesce,
.set_coalesce = ixgbevf_set_coalesce,
- .get_rxnfc = ixgbevf_get_rxnfc,
+ .get_rx_ring_count = ixgbevf_get_rx_ring_count,
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 039187607e98..516a6fdd23d0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -241,23 +241,7 @@ struct ixgbevf_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define IXGBEVF_QV_STATE_IDLE 0
-#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
-#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
-#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
-#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
-#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
-#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \
- IXGBEVF_QV_STATE_POLL_YIELD)
-#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \
- IXGBEVF_QV_STATE_POLL_YIELD)
- spinlock_t lock;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct ixgbevf_ring ring[] ____cacheline_internodealigned_in_smp;
};
/* microsecond values for various ITR rates shifted by 2 to fit itr register
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 89ccb8eb82c7..7af44f858fa3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5012,17 +5012,9 @@ static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
return MVNETA_RSS_LU_TABLE_SIZE;
}
-static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 mvneta_ethtool_get_rx_ring_count(struct net_device *dev)
{
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = rxq_number;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ return rxq_number;
}
static int mvneta_config_rss(struct mvneta_port *pp)
@@ -5356,7 +5348,7 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
- .get_rxnfc = mvneta_ethtool_get_rxnfc,
+ .get_rx_ring_count = mvneta_ethtool_get_rx_ring_count,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ab0c99aa9f9a..33426fded919 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5580,6 +5580,13 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static u32 mvpp2_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return port->nrxqs;
+}
+
static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -5590,9 +5597,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = port->nrxqs;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = port->n_rfs_rules;
break;
@@ -5827,6 +5831,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rx_ring_count = mvpp2_ethtool_get_rx_ring_count,
.get_rxnfc = mvpp2_ethtool_get_rxnfc,
.set_rxnfc = mvpp2_ethtool_set_rxnfc,
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 7370812ece2a..15d3cb0b9da6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1663,6 +1663,9 @@ static void print_tm_tree(struct seq_file *m,
int blkaddr;
u64 cfg;
+ if (!sq_ctx->ena)
+ return;
+
blkaddr = nix_hw->blkaddr;
schq = sq_ctx->smq;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 3735372539bd..0f9953eaf1b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1233,7 +1233,8 @@ static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1259,7 +1260,8 @@ enum rvu_af_dl_param_id {
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1314,7 +1316,8 @@ static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1376,7 +1379,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
}
static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1402,7 +1406,8 @@ static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index e13ae5484c19..a72694219df4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -48,7 +48,8 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
}
static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -84,7 +85,8 @@ static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id,
}
static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a68cd3f0304c..ad6298456639 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1727,6 +1727,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
}
+static u32 mlx4_en_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->rx_ring_num;
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1743,9 +1750,6 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return -EINVAL;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_ring_num;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = mlx4_en_get_num_flows(priv);
break;
@@ -2154,6 +2158,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
+ .get_rx_ring_count = mlx4_en_get_rx_ring_count,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 03d2fc7d9b09..2de226951e19 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -174,7 +174,8 @@ MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is defaul
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vbool = !!mlx4_internal_err_reset;
return 0;
@@ -189,7 +190,8 @@ static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 722282cebce9..5b08e5ffe0e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -181,6 +181,7 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
lockdep_assert_held(&cmd->alloc_lock);
+ cmd->ent_arr[idx] = NULL;
set_bit(idx, &cmd->vars.bitmask);
}
@@ -1200,6 +1201,44 @@ out_err:
return err;
}
+/* Check if all command slots are stalled (timed out and not recovered).
+ * returns true if all slots timed out on a recent command and have not been
+ * completed by FW yet. (stalled state)
+ * false otherwise (at least one slot is not stalled).
+ *
+ * In such odd situation "all_stalled", this serves as a protection mechanism
+ * to avoid blocking the kernel for long periods of time in case FW is not
+ * responding to commands.
+ */
+static bool mlx5_cmd_all_stalled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ bool all_stalled = true;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+
+ /* at least one command slot is free */
+ if (bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds) > 0) {
+ all_stalled = false;
+ goto out;
+ }
+
+ for_each_clear_bit(i, &cmd->vars.bitmask, cmd->vars.max_reg_cmds) {
+ struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
+
+ if (!test_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, &ent->state)) {
+ all_stalled = false;
+ break;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return all_stalled;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1230,6 +1269,15 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue)
return -EINVAL;
+ if (!page_queue && mlx5_cmd_all_stalled(dev)) {
+ mlx5_core_err_rl(dev,
+ "All CMD slots are stalled, aborting command\n");
+ /* there's no reason to wait and block the whole kernel if FW
+ * isn't currently responding to all slots, fail immediately
+ */
+ return -EAGAIN;
+ }
+
ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
callback, context, page_queue);
if (IS_ERR(ent))
@@ -1700,6 +1748,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (test_bit(i, &vector)) {
ent = cmd->ent_arr[i];
+ if (forced && ent->ret == -ETIMEDOUT)
+ set_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+ else if (!forced) /* real FW completion */
+ clear_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+
/* if we already completed the command, ignore it */
if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index c9555119a661..43b9bf8829cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -26,7 +26,8 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
- MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE
+ MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 080e7eab52c7..7bcf822a89f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -54,7 +54,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 12e10feb30f0..424f8a2728a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -82,7 +82,7 @@ static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
}
static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
struct skb_shared_hwtstamps hwts = {};
ktime_t diff;
@@ -92,8 +92,17 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
/* Maximal allowed diff is 1 / 128 second */
if (diff > (NSEC_PER_SEC >> 7)) {
- cq_stats->abort++;
- cq_stats->abort_abs_diff_ns += diff;
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ ptpsq->cq_stats->abort++;
+ ptpsq->cq_stats->abort_abs_diff_ns += diff;
+ if (diff > (NSEC_PER_SEC >> 1) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netdev_warn(sq->channel->netdev,
+ "PTP TX timestamp difference between CQE and port exceeds threshold: %lld ns, recovering SQ %u\n",
+ (s64)diff, sq->sqn);
+ queue_work(sq->priv->wq, &ptpsq->report_unhealthy_work);
+ }
return;
}
@@ -103,7 +112,7 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
switch (hwtstamp_type) {
case (MLX5E_SKB_CB_CQE_HWTSTAMP):
@@ -121,7 +130,7 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
!mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
return;
- mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
+ mlx5e_skb_cb_hwtstamp_tx(skb, ptpsq);
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
@@ -209,7 +218,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
- hwtstamp, ptpsq->cq_stats);
+ hwtstamp, ptpsq);
ptpsq->cq_stats->cqe++;
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 1c0e0a86a9ac..2a457a2ed707 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -147,7 +147,7 @@ enum {
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats);
+ struct mlx5e_ptpsq *ptpsq);
void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
#endif /* __MLX5_EN_PTP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 9b93da4d52f6..cf8f14ce4cd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -627,7 +627,7 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else if (max_bw_value[i] <= upper_limit_gbps) {
+ } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 01b8f05a23db..fe67c73849f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -261,6 +261,11 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_2_Full_BIT);
}
static void mlx5e_ethtool_get_speed_arr(bool ext,
@@ -2027,7 +2032,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
int size_read = 0;
u8 data[4] = {0};
- size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data, NULL);
if (size_read < 2)
return -EIO;
@@ -2069,6 +2074,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
int offset = ee->offset;
int size_read;
+ u8 status = 0;
int i = 0;
if (!ee->len)
@@ -2078,15 +2084,15 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
while (i < ee->len) {
size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
- data + i);
-
+ data + i, &status);
if (!size_read)
/* Done reading */
return 0;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
- __func__, size_read);
+ netdev_err(netdev,
+ "%s: mlx5_query_eeprom failed:0x%x, status %u\n",
+ __func__, size_read, status);
return size_read;
}
@@ -2106,6 +2112,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 *data = page_data->data;
int size_read;
+ u8 status = 0;
int i = 0;
if (!page_data->length)
@@ -2119,7 +2126,8 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
query.page = page_data->page;
while (i < page_data->length) {
query.size = page_data->length - i;
- size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
+ size_read = mlx5_query_module_eeprom_by_page(mdev, &query,
+ data + i, &status);
/* Done reading, return how many bytes was read */
if (!size_read)
@@ -2128,8 +2136,8 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d",
- i, size_read);
+ "Query module eeprom by page failed, read %u bytes, err %d, status %u",
+ i, size_read, status);
return size_read;
}
@@ -2492,21 +2500,18 @@ static int mlx5e_set_rxfh_fields(struct net_device *dev,
return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
}
+static u32 mlx5e_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -2766,6 +2771,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.remove_rxfh_context = mlx5e_remove_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_rx_ring_count = mlx5e_get_rx_ring_count,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pause_stats = mlx5e_get_pause_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 79916f1abd14..63bdef5b4ba5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -704,7 +704,7 @@ static int validate_flow(struct mlx5e_priv *priv,
num_tuples += ret;
break;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
ret = validate_vlan(fs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2702b3885f06..14884b9ea7f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -755,7 +755,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
if (sq->ptpsq) {
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
- hwts.hwtstamp, sq->ptpsq->cq_stats);
+ hwts.hwtstamp, sq->ptpsq);
} else {
skb_tstamp_tx(skb, &hwts);
sq->stats->timestamps++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 56e6f54b1e2e..4278bcb04c72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -341,13 +341,6 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
if (max_guarantee)
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- /* If nodes max min_rate divider is 0 but their parent has bw_share
- * configured, then set bw_share for nodes to minimal value.
- */
-
- if (parent && parent->bw_share)
- return 1;
-
/* If the node nodes has min_rate configured, a divider of 0 sets all
* nodes' bw_share to 0, effectively disabling min guarantees.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e2ffb87b94cb..4b7a1ce7f406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -875,13 +875,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
vport_num, 1,
vport->info.link_state);
- /* Host PF has its own mac/guid. */
- if (vport_num) {
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
- vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
- vport->info.node_guid);
- }
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true,
+ vport->info.mac);
+ mlx5_query_nic_vport_node_guid(esw->dev, vport_num, true,
+ &vport->info.node_guid);
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
@@ -947,12 +944,6 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
goto err_vhca_mapping;
}
- /* External controller host PF has factory programmed MAC.
- * Read it from the device.
- */
- if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
- mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1483,7 +1474,7 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
info.new_mode = mode;
- blocking_notifier_call_chain(&esw->n_head, 0, &info);
+ blocking_notifier_call_chain(&esw->dev->priv.esw_n_head, 0, &info);
}
static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
@@ -1978,7 +1969,8 @@ static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
}
static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -2059,7 +2051,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
@@ -2235,6 +2226,9 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->vf = vport - 1;
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport, true,
+ evport->info.mac);
ether_addr_copy(ivi->mac, evport->info.mac);
ivi->linkstate = evport->info.link_state;
ivi->vlan = evport->info.vlan;
@@ -2385,14 +2379,16 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&esw->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.esw_n_head, nb);
}
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&esw->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.esw_n_head, nb);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index beaec450a734..ad1073f7b79f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -403,7 +403,6 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
- struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
@@ -864,8 +863,10 @@ struct mlx5_esw_event_info {
u16 new_mode;
};
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
bool mlx5_esw_hold(struct mlx5_core_dev *dev);
void mlx5_esw_release(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0b1a180ef238..8de6c7f6c294 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2618,7 +2618,8 @@ done:
}
static int esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -4491,6 +4492,9 @@ int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true,
+ vport->info.mac);
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
mutex_unlock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index e5c1012921d2..1ec61164e6b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -211,7 +211,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
if (!max_num_qps) {
mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n");
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 1af76da8b132..ced747bef641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -239,6 +239,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
MLX5_SET(set_flow_table_root_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(set_flow_table_root_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
if (!err &&
@@ -302,6 +306,10 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap);
@@ -360,6 +368,10 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
if (!err)
@@ -394,6 +406,10 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(modify_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(modify_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -429,6 +445,10 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
@@ -451,6 +471,10 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
@@ -559,6 +583,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id);
+ MLX5_SET(set_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
@@ -788,6 +815,10 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(delete_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, delete_fte, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2ca3bddbdf05..0a6031a64c6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -939,10 +939,10 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
- enum fs_flow_table_type table_type,
- enum fs_flow_table_op_mod op_mod,
- u32 flags)
+static struct mlx5_flow_table *
+alloc_flow_table(struct mlx5_flow_table_attr *ft_attr, u16 vport,
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod)
{
struct mlx5_flow_table *ft;
int ret;
@@ -957,12 +957,13 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
return ERR_PTR(ret);
}
- ft->level = level;
+ ft->level = ft_attr->level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
- ft->flags = flags;
+ ft->esw_owner_vhca_id = ft_attr->esw_owner_vhca_id;
+ ft->flags = ft_attr->flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -1370,10 +1371,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
/* The level is related to the
* priority level range.
*/
- ft = alloc_flow_table(ft_attr->level,
- vport,
- root->table_type,
- op_mod, ft_attr->flags);
+ ft = alloc_flow_table(ft_attr, vport, root->table_type, op_mod);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto unlock_root;
@@ -3310,6 +3308,62 @@ err:
return ret;
}
+static bool mlx5_fs_ns_is_empty(struct mlx5_flow_namespace *ns)
+{
+ struct fs_prio *iter_prio;
+
+ fs_for_each_prio(iter_prio, ns) {
+ if (iter_prio->num_ft)
+ return false;
+ }
+
+ return true;
+}
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type)
+{
+ struct mlx5_flow_root_namespace **root;
+ int total_vports;
+ int i;
+
+ switch (table_type) {
+ case FS_FT_RDMA_TRANSPORT_TX:
+ root = dev->priv.steering->rdma_transport_tx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_tx_vports;
+ break;
+ case FS_FT_RDMA_TRANSPORT_RX:
+ root = dev->priv.steering->rdma_transport_rx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_rx_vports;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_vports; i++) {
+ mutex_lock(&root[i]->chain_lock);
+ if (!mlx5_fs_ns_is_empty(&root[i]->ns)) {
+ mutex_unlock(&root[i]->chain_lock);
+ goto err;
+ }
+ root[i]->dev = new_dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ return 0;
+err:
+ while (i--) {
+ mutex_lock(&root[i]->chain_lock);
+ root[i]->dev = dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ /* If you hit this error try destroying all flow tables and try again */
+ mlx5_core_err(dev, "Failed to set root device for RDMA TRANSPORT\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mlx5_fs_set_root_dev);
+
static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
{
struct mlx5_core_dev *dev = steering->dev;
@@ -3779,7 +3833,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 8458ce203dac..1c6591425260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -103,24 +103,6 @@ enum fs_node_type {
FS_TYPE_FLOW_DEST
};
-enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_NIC_TX = 0x1,
- FS_FT_ESW_EGRESS_ACL = 0x2,
- FS_FT_ESW_INGRESS_ACL = 0x3,
- FS_FT_FDB = 0X4,
- FS_FT_SNIFFER_RX = 0X5,
- FS_FT_SNIFFER_TX = 0X6,
- FS_FT_RDMA_RX = 0X7,
- FS_FT_RDMA_TX = 0X8,
- FS_FT_PORT_SEL = 0X9,
- FS_FT_FDB_RX = 0xa,
- FS_FT_FDB_TX = 0xb,
- FS_FT_RDMA_TRANSPORT_RX = 0xd,
- FS_FT_RDMA_TRANSPORT_TX = 0xe,
- FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
-};
-
enum fs_flow_table_op_mod {
FS_FT_OP_MOD_NORMAL,
FS_FT_OP_MOD_LAG_DEMUX,
@@ -205,6 +187,7 @@ struct mlx5_flow_table {
};
u32 id;
u16 vport;
+ u16 esw_owner_vhca_id;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 89e399606877..2bceb42c98cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -73,7 +73,8 @@ static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u3
}
static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 4b3430ac3905..3b2f54ca30a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -266,21 +266,18 @@ static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
+static u32 mlx5i_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -304,6 +301,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.set_rxfh_fields = mlx5i_set_rxfh_fields,
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
+ .get_rx_ring_count = mlx5i_get_rx_ring_count,
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
index 459a0b4d08e6..19bb620b7436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
@@ -8,6 +8,8 @@ enum {
MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP = 0x10b,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF = 0x11d,
MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
};
@@ -32,6 +34,12 @@ union mlx5_ifc_config_item_type_auto_bits {
u8 reserved_at_0[0x20];
};
+enum {
+ MLX5_ACCESS_MODE_NEXT = 0,
+ MLX5_ACCESS_MODE_CURRENT,
+ MLX5_ACCESS_MODE_DEFAULT,
+};
+
struct mlx5_ifc_config_item_bits {
u8 valid[0x2];
u8 priority[0x2];
@@ -123,6 +131,17 @@ struct mlx5_ifc_nv_sw_offload_conf_bits {
u8 lro_log_timeout0[0x4];
};
+struct mlx5_ifc_nv_sw_offload_cap_bits {
+ u8 reserved_at_0[0x19];
+ u8 swp_l4_csum_mode_l4_only[0x1];
+ u8 reserved_at_1a[0x6];
+};
+
+struct mlx5_ifc_nv_sw_accelerate_conf_bits {
+ u8 swp_l4_csum_mode[0x2];
+ u8 reserved_at_2[0x3e];
+};
+
#define MNVDA_HDR_SZ \
(MLX5_ST_SZ_BYTES(mnvda_reg) - \
MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
@@ -195,12 +214,39 @@ mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
return mlx5_nv_param_read(dev, mnvda, len);
}
+static int
+mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int
+mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len, int access_mode)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_accelerate_conf);
+ MLX5_SET(mnvda_reg, mnvda, configuration_item_header.access_mode,
+ access_mode);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
static const char *const
cqe_compress_str[] = { "balanced", "aggressive" };
static int
mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
@@ -268,6 +314,182 @@ mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
}
+enum swp_l4_csum_mode {
+ SWP_L4_CSUM_MODE_DEFAULT = 0,
+ SWP_L4_CSUM_MODE_FULL_CSUM = 1,
+ SWP_L4_CSUM_MODE_L4_ONLY = 2,
+};
+
+static const char *const
+ swp_l4_csum_mode_str[] = { "default", "full_csum", "l4_only" };
+
+static int
+mlx5_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ int access_mode, u8 *value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ access_mode);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ *value = MLX5_GET(nv_sw_accelerate_conf, data, swp_l4_csum_mode);
+
+ if (*value >= ARRAY_SIZE(swp_l4_csum_mode_str)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Invalid swp_l4_csum_mode value %u read from device",
+ *value);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_NEXT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(swp_l4_csum_mode_str); i++) {
+ if (!strcmp(val.vstr, swp_l4_csum_mode_str[i]))
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(swp_l4_csum_mode_str) ||
+ i == SWP_L4_CSUM_MODE_DEFAULT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are full_csum/l4_only");
+ return -EINVAL;
+ }
+
+ if (i == SWP_L4_CSUM_MODE_L4_ONLY) {
+ err = mlx5_nv_param_read_sw_offload_cap(dev, cap, sizeof(cap));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+ if (!MLX5_GET(nv_sw_offload_cap, data, swp_l4_csum_mode_l4_only)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "l4_only mode is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, u8 value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ MLX5_ACCESS_MODE_NEXT);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_accelerate_conf, data, swp_l4_csum_mode, value);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to write sw_accelerate_conf mnvda reg");
+
+ return err;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "full_csum"))
+ value = SWP_L4_CSUM_MODE_FULL_CSUM;
+ else
+ value = SWP_L4_CSUM_MODE_L4_ONLY;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
void *mnvda, size_t len)
{
@@ -302,7 +524,8 @@ static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
}
static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
@@ -413,7 +636,8 @@ static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
}
static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
@@ -545,6 +769,14 @@ static const struct devlink_param mlx5_nv_param_devlink_params[] = {
mlx5_nv_param_devlink_cqe_compress_get,
mlx5_nv_param_devlink_cqe_compress_set,
mlx5_nv_param_devlink_cqe_compress_validate),
+ DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
+ "swp_l4_csum_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_swp_l4_csum_mode_get,
+ mlx5_devlink_swp_l4_csum_mode_set,
+ mlx5_devlink_swp_l4_csum_mode_validate,
+ mlx5_devlink_swp_l4_csum_mode_get_default,
+ mlx5_devlink_swp_l4_csum_mode_set_default),
};
int mlx5_nv_param_register_dl_params(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
index 47fe215f66bf..ef06fe6cbb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
@@ -19,13 +19,16 @@ struct mlx5_st {
struct mutex lock;
struct xa_limit index_limit;
struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
+ u8 direct_mode : 1;
};
struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
{
struct pci_dev *pdev = dev->pdev;
struct mlx5_st *st;
+ u8 direct_mode = 0;
u16 num_entries;
+ u32 tbl_loc;
int ret;
if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
@@ -40,10 +43,16 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
if (!pdev->tph_cap)
return NULL;
- num_entries = pcie_tph_get_st_table_size(pdev);
- /* We need a reserved entry for non TPH cases */
- if (num_entries < 2)
- return NULL;
+ tbl_loc = pcie_tph_get_st_table_loc(pdev);
+ if (tbl_loc == PCI_TPH_LOC_NONE)
+ direct_mode = 1;
+
+ if (!direct_mode) {
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ /* We need a reserved entry for non TPH cases */
+ if (num_entries < 2)
+ return NULL;
+ }
/* The OS doesn't support ST */
ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
@@ -56,6 +65,10 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
mutex_init(&st->lock);
xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
+ st->direct_mode = direct_mode;
+ if (st->direct_mode)
+ return st;
+
/* entry 0 is reserved for non TPH cases */
st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
st->index_limit.max = num_entries - 1;
@@ -96,6 +109,11 @@ int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
if (ret)
return ret;
+ if (st->direct_mode) {
+ *st_index = tag;
+ return 0;
+ }
+
mutex_lock(&st->lock);
xa_for_each(&st->idx_xa, index, idx_data) {
@@ -145,6 +163,9 @@ int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
if (!st)
return -EOPNOTSUPP;
+ if (st->direct_mode)
+ return 0;
+
mutex_lock(&st->lock);
idx_data = xa_load(&st->idx_xa, st_index);
if (WARN_ON_ONCE(!idx_data)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index d55e15c1f380..304912637c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -149,7 +149,7 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
struct mlx5_vxlan *vxlan;
if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c904696cbc3a..024339ce41f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1010,16 +1010,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_irq_cleanup;
}
- err = mlx5_events_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize events\n");
- goto err_eq_cleanup;
- }
-
err = mlx5_fw_reset_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize fw reset events\n");
- goto err_events_cleanup;
+ goto err_eq_cleanup;
}
mlx5_cq_debugfs_init(dev);
@@ -1121,8 +1115,6 @@ err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
-err_events_cleanup:
- mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
@@ -1155,7 +1147,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
- mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
@@ -1386,12 +1377,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_vhca_event_start(dev);
- err = mlx5_sf_hw_table_create(dev);
- if (err) {
- mlx5_core_err(dev, "sf table create failed %d\n", err);
- goto err_vhca;
- }
-
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1420,8 +1405,6 @@ err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
err_ec:
- mlx5_sf_hw_table_destroy(dev);
-err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
mlx5_fs_core_cleanup(dev);
@@ -1447,12 +1430,12 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
+ mlx5_vhca_event_stop(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
- mlx5_vhca_event_stop(dev);
mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
@@ -1833,6 +1816,50 @@ static int vhca_id_show(struct seq_file *file, void *priv)
DEFINE_SHOW_ATTRIBUTE(vhca_id);
+static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_events_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize events\n");
+ return err;
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.esw_n_head);
+ mlx5_vhca_state_notifier_init(dev);
+
+ err = mlx5_sf_hw_notifier_init(dev);
+ if (err)
+ goto err_sf_hw_notifier;
+
+ err = mlx5_sf_notifiers_init(dev);
+ if (err)
+ goto err_sf_notifiers;
+
+ err = mlx5_sf_dev_notifier_init(dev);
+ if (err)
+ goto err_sf_dev_notifier;
+
+ return 0;
+
+err_sf_dev_notifier:
+ mlx5_sf_notifiers_cleanup(dev);
+err_sf_notifiers:
+ mlx5_sf_hw_notifier_cleanup(dev);
+err_sf_hw_notifier:
+ mlx5_events_cleanup(dev);
+ return err;
+}
+
+static void mlx5_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ mlx5_sf_dev_notifier_cleanup(dev);
+ mlx5_sf_notifiers_cleanup(dev);
+ mlx5_sf_hw_notifier_cleanup(dev);
+ mlx5_events_cleanup(dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1888,6 +1915,10 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ err = mlx5_notifiers_init(dev);
+ if (err)
+ goto err_hca_caps;
+
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
* Those values are supplied to FW as part of the init HCA command to
@@ -1930,6 +1961,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
if (priv->sw_vhca_id > 0)
ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+ mlx5_notifiers_cleanup(dev);
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index acef7d0ffa09..cfebc110c02f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -357,11 +357,11 @@ int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data);
+ u16 offset, u16 size, u8 *data, u8 *status);
int
mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
- u8 *data);
+ u8 *data, u8 *status);
int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index e18a850c615c..aa3b5878e3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -324,10 +324,8 @@ err_xa:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
- if (i && rmap && *rmap) {
- free_irq_cpu_rmap(*rmap);
- *rmap = NULL;
- }
+ if (i && rmap && *rmap)
+ irq_cpu_rmap_remove(*rmap, irq->map.virq);
err_irq_rmap:
#endif
if (i && pci_msix_can_alloc_dyn(dev->pdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index aa9f2b0a77d3..85a9e534f442 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -289,11 +289,11 @@ int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
}
static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
- u8 *module_id)
+ u8 *module_id, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int err, status;
+ int err;
u8 *ptr;
MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
@@ -308,12 +308,12 @@ static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ if (MLX5_GET(mcia_reg, out, status)) {
+ if (status)
+ *status = MLX5_GET(mcia_reg, out, status);
return -EIO;
}
+
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
*module_id = ptr[0];
@@ -370,13 +370,14 @@ static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
}
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
- struct mlx5_module_eeprom_query_params *params, u8 *data)
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int status, err;
void *ptr;
u16 size;
+ int err;
size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
@@ -392,12 +393,9 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ *status = MLX5_GET(mcia_reg, out, status);
+ if (*status)
return -EIO;
- }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
@@ -406,7 +404,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data)
+ u16 offset, u16 size, u8 *data, u8 *status)
{
struct mlx5_module_eeprom_query_params query = {0};
u8 module_id;
@@ -416,7 +414,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
if (err)
return err;
- err = mlx5_query_module_id(dev, query.module_number, &module_id);
+ err = mlx5_query_module_id(dev, query.module_number, &module_id,
+ status);
if (err)
return err;
@@ -441,12 +440,12 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
query.size = size;
query.offset = offset;
- return mlx5_query_mcia(dev, &query, data);
+ return mlx5_query_mcia(dev, &query, data, status);
}
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
- u8 *data)
+ u8 *data, u8 *status)
{
int err;
@@ -460,7 +459,7 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return -EINVAL;
}
- return mlx5_query_mcia(dev, params, data);
+ return mlx5_query_mcia(dev, params, data, status);
}
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
@@ -1109,6 +1108,7 @@ mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
[MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
[MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
+ [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 99219ea52c4b..f310bde3d11f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -16,7 +16,6 @@ struct mlx5_sf_dev_table {
struct xarray devices;
phys_addr_t base_address;
u64 sf_bar_length;
- struct notifier_block nb;
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
@@ -156,18 +155,23 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
static int
mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
{
- struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_dev_nb);
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index;
u16 base_id;
- max_functions = mlx5_sf_max_functions(table->dev);
+ if (!table)
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
if (!max_functions)
return 0;
- base_id = mlx5_sf_start_function_id(table->dev);
+ base_id = mlx5_sf_start_function_id(dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -177,19 +181,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
break;
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
else
- mlx5_core_err(table->dev,
+ mlx5_core_err(dev,
"SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
- mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
+ mlx5_sf_dev_add(dev, sf_index, event->function_id,
event->sw_function_id);
break;
default:
@@ -315,6 +319,15 @@ static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
}
}
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_dev_nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ return mlx5_vhca_event_notifier_register(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -329,17 +342,12 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
goto table_err;
}
- table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
xa_init(&table->devices);
dev->priv.sf_dev_table = table;
- err = mlx5_vhca_event_notifier_register(dev, &table->nb);
- if (err)
- goto vhca_err;
-
err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
@@ -351,10 +359,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
arm_err:
mlx5_sf_dev_destroy_active_works(table);
-add_active_err:
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
-vhca_err:
+add_active_err:
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -372,6 +378,14 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
}
}
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
@@ -380,8 +394,6 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
return;
mlx5_sf_dev_destroy_active_works(table);
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
- mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index b99131e95e37..3ab0449c770c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -25,7 +25,9 @@ struct mlx5_sf_peer_devlink_event_ctx {
int err;
};
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_driver_register(void);
@@ -35,10 +37,19 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
#else
+static inline int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
}
+static inline void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3304f25cc805..b82323b8449e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -31,9 +31,6 @@ struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
- struct notifier_block esw_nb;
- struct notifier_block vhca_nb;
- struct notifier_block mdev_nb;
};
static struct mlx5_sf *
@@ -391,11 +388,16 @@ static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_vhca_nb);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
const struct mlx5_vhca_state_event *event = data;
bool update = false;
struct mlx5_sf *sf;
+ if (!table)
+ return 0;
+
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
@@ -407,7 +409,7 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
- trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
+ trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
unlock:
mutex_unlock(&table->sf_state_lock);
@@ -425,12 +427,16 @@ static void mlx5_sf_del_all(struct mlx5_sf_table *table)
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_esw_nb);
const struct mlx5_esw_event_info *mode = data;
+ if (!dev->priv.sf_table)
+ return 0;
+
switch (mode->new_mode) {
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_del_all(table);
+ mlx5_sf_del_all(dev->priv.sf_table);
break;
default:
break;
@@ -441,15 +447,16 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_mdev_nb);
struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int ret = NOTIFY_DONE;
struct mlx5_sf *sf;
- if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
return NOTIFY_DONE;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
if (!sf)
@@ -464,10 +471,40 @@ out:
return ret;
}
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb);
+ if (err)
+ return err;
+
+ dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_table_vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb);
+ if (err)
+ goto mdev_err;
+
+ return 0;
+mdev_err:
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
+ return err;
+}
+
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table;
- int err;
if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
return 0;
@@ -480,28 +517,18 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
table->dev = dev;
xa_init(&table->function_ids);
dev->priv.sf_table = table;
- table->esw_nb.notifier_call = mlx5_sf_esw_event;
- err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
- if (err)
- goto reg_err;
-
- table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
- err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
- if (err)
- goto vhca_err;
-
- table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
- mlx5_blocking_notifier_register(dev, &table->mdev_nb);
return 0;
+}
-vhca_err:
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
-reg_err:
- mutex_destroy(&table->sf_state_lock);
- kfree(table);
- dev->priv.sf_table = NULL;
- return err;
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb);
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
}
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
@@ -511,9 +538,6 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
- mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
mutex_destroy(&table->sf_state_lock);
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 1f613320fe07..bd968f3b3855 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -30,9 +30,7 @@ enum mlx5_sf_hwc_index {
};
struct mlx5_sf_hw_table {
- struct mlx5_core_dev *dev;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
- struct notifier_block vhca_nb;
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
@@ -71,14 +69,16 @@ mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
return NULL;
}
-static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
+static int mlx5_sf_hw_table_id_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller,
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
int free_idx = -1;
int i;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
if (!hwc->sfs)
return -ENOSPC;
@@ -100,11 +100,13 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
return free_idx;
}
-static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
+static void mlx5_sf_hw_table_id_free(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller, int id)
{
struct mlx5_sf_hwc_table *hwc;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
hwc->sfs[id].allocated = false;
hwc->sfs[id].pending_delete = false;
}
@@ -120,7 +122,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
return -EOPNOTSUPP;
mutex_lock(&table->table_lock);
- sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
+ sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum);
if (sw_id < 0) {
err = sw_id;
goto exist_err;
@@ -151,7 +153,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
vhca_err:
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
- mlx5_sf_hw_table_id_free(table, controller, sw_id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
@@ -165,7 +167,7 @@ void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
mutex_lock(&table->table_lock);
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
- mlx5_sf_hw_table_id_free(table, controller, id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, id);
mutex_unlock(&table->table_lock);
}
@@ -216,10 +218,12 @@ static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
}
}
-static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
+static void mlx5_sf_hw_table_dealloc_all(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table)
{
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev,
+ &table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
}
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
@@ -301,7 +305,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
}
mutex_init(&table->table_lock);
- table->dev = dev;
dev->priv.sf_hw_table = table;
base_id = mlx5_sf_start_function_id(dev);
@@ -338,19 +341,22 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
mutex_destroy(&table->table_lock);
kfree(table);
+ dev->priv.sf_hw_table = NULL;
res_unregister:
mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_hw_table_vhca_nb);
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
- if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
@@ -365,20 +371,28 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
- mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
+ mlx5_sf_hw_table_hwc_sf_free(dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
-
- if (!table)
+ if (mlx5_core_is_sf(dev))
return 0;
- table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
- return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
+ dev->priv.sf_hw_table_vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
+}
+
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -388,9 +402,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
- mlx5_sf_hw_table_dealloc_all(table);
+ mlx5_sf_hw_table_dealloc_all(dev, table);
}
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 89559a37997a..d8a934a0e968 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -12,10 +12,13 @@
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
@@ -44,20 +47,33 @@ static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+static inline int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
{
}
+static inline int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index cda01ba441ae..b04cf6cf8956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -9,15 +9,9 @@
#define CREATE_TRACE_POINTS
#include "diag/vhca_tracepoint.h"
-struct mlx5_vhca_state_notifier {
- struct mlx5_core_dev *dev;
- struct mlx5_nb nb;
- struct blocking_notifier_head n_head;
-};
-
struct mlx5_vhca_event_work {
struct work_struct work;
- struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_core_dev *dev;
struct mlx5_vhca_state_event event;
};
@@ -95,16 +89,14 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
mlx5_vhca_event_arm(dev, event->function_id);
trace_mlx5_sf_vhca_event(dev, event);
- blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+ blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event);
}
static void mlx5_vhca_state_work_handler(struct work_struct *_work)
{
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
- struct mlx5_vhca_state_notifier *notifier = work->notifier;
- struct mlx5_core_dev *dev = notifier->dev;
- mlx5_vhca_event_notify(dev, &work->event);
+ mlx5_vhca_event_notify(work->dev, &work->event);
kfree(work);
}
@@ -116,8 +108,8 @@ void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct wo
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
- struct mlx5_vhca_state_notifier *notifier =
- mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev,
+ priv.vhca_state_nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
int wq_idx;
@@ -126,10 +118,10 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
- work->notifier = notifier;
+ work->dev = dev;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
- mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
+ mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -145,9 +137,15 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
}
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head);
+ MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier,
+ VHCA_STATE_CHANGE);
+}
+
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct mlx5_vhca_events *events;
int err, i;
@@ -160,7 +158,6 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
return -ENOMEM;
events->dev = dev;
- dev->priv.vhca_events = events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
events->handler[i].wq = create_singlethread_workqueue(wq_name);
@@ -169,20 +166,10 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
goto err_create_wq;
}
}
+ dev->priv.vhca_events = events;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier) {
- err = -ENOMEM;
- goto err_notifier;
- }
-
- dev->priv.vhca_state_notifier = notifier;
- notifier->dev = dev;
- BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
- MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
-err_notifier:
err_create_wq:
for (--i; i >= 0; i--)
destroy_workqueue(events->handler[i].wq);
@@ -211,8 +198,6 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_vhca_event_supported(dev))
return;
- kfree(dev->priv.vhca_state_notifier);
- dev->priv.vhca_state_notifier = NULL;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
destroy_workqueue(vhca_events->handler[i].wq);
@@ -221,34 +206,30 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_register(dev, &notifier->nb);
+ mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb);
}
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_unregister(dev, &notifier->nb);
+ mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb);
+
+ /* Flush workqueues of all pending events. */
+ mlx5_vhca_event_work_queues_flush(dev);
}
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- if (!dev->priv.vhca_state_notifier)
- return -EOPNOTSUPP;
- return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head,
+ nb);
}
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
index 1725ba64f8af..52790423874c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -18,6 +18,7 @@ static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev);
int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
@@ -37,6 +38,10 @@ static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *s
{
}
+static inline void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+}
+
static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 65740bb68b09..e8c67ed9f748 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -410,7 +410,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
@@ -419,7 +419,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
@@ -428,10 +428,10 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_FDB:
if (!dmn->info.caps.eswitch_manager)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 992873536c1b..306affbcfd3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -78,15 +78,14 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out)
+ bool other_vport, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
@@ -97,7 +96,7 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -219,7 +218,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -429,7 +428,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -451,7 +450,7 @@ int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -462,7 +461,8 @@ out:
return err;
}
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
@@ -472,7 +472,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out);
if (err)
goto out;
@@ -529,7 +529,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -804,7 +804,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (err)
goto out;
@@ -908,7 +908,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index b032d5a4b3b8..10f5bc4892fc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -601,6 +601,8 @@ int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
info->psid);
+ if (err)
+ goto unlock;
sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
info->fw_sub_minor);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index b1d08e958bf9..69f9da9fb305 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1489,7 +1489,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam *tcam;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6a4a81c63451..353fd9ca89a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -830,8 +830,10 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!rule)
- return -EINVAL;
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
&drops, &lastuse, &used_hw_stats);
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index dff51f23d295..ca5c7ac2a5bc 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -26,6 +26,7 @@ config FBNIC
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select PAGE_POOL
+ select PCS_XPCS
select PHYLINK
select PLDMFW
help
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 15e8ff649615..72c41af65364 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -21,6 +21,7 @@ fbnic-y := fbnic_csr.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_mdio.o \
fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index b03e5a3d5144..779a083b9215 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -34,7 +34,7 @@ struct fbnic_dev {
u32 __iomem *uc_addr4;
const struct fbnic_mac *mac;
unsigned int fw_msix_vector;
- unsigned int pcs_msix_vector;
+ unsigned int mac_msix_vector;
unsigned short num_irqs;
struct {
@@ -83,6 +83,10 @@ struct fbnic_dev {
/* Last @time_high refresh time in jiffies (to catch stalls) */
unsigned long last_read;
+ /* PMD specific data */
+ unsigned long end_of_pmd_training;
+ u8 pmd_state;
+
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
@@ -91,6 +95,9 @@ struct fbnic_dev {
u64 prev_firmware_time;
struct fbnic_fw_log fw_log;
+
+ /* MDIO bus for PHYs */
+ struct mii_bus *mdio_bus;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
@@ -175,8 +182,8 @@ void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
void fbnic_hwmon_register(struct fbnic_dev *fbd);
void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-int fbnic_pcs_request_irq(struct fbnic_dev *fbd);
-void fbnic_pcs_free_irq(struct fbnic_dev *fbd);
+int fbnic_mac_request_irq(struct fbnic_dev *fbd);
+void fbnic_mac_free_irq(struct fbnic_dev *fbd);
void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
int fbnic_napi_request_irq(struct fbnic_dev *fbd,
@@ -200,6 +207,8 @@ void fbnic_dbg_exit(void);
void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd);
+int fbnic_mdiobus_create(struct fbnic_dev *fbd);
+
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index d3a7ad921f18..422265dc7abd 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -787,6 +787,8 @@ enum {
/* MAC PCS registers */
#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */
+#define FBNIC_PCS_PAGE(n) (0x10000 + 0x400 * (n)) /* 0x40000 + 1024*n */
+#define FBNIC_PCS(reg, n) ((reg) + FBNIC_PCS_PAGE(n))
#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 95fac020eb93..693ebdf38705 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -1863,6 +1863,14 @@ fbnic_get_rmon_stats(struct net_device *netdev,
*ranges = fbnic_rmon_ranges;
}
+static void fbnic_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ stats->link_down_events = fbn->link_down_events;
+}
+
static const struct ethtool_ops fbnic_ethtool_ops = {
.cap_link_lanes_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -1874,6 +1882,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_regs_len = fbnic_get_regs_len,
.get_regs = fbnic_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = fbnic_get_link_ext_stats,
.get_coalesce = fbnic_get_coalesce,
.set_coalesce = fbnic_set_coalesce,
.get_ringparam = fbnic_get_ringparam,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 1166fa17438d..d8d9b6cfde82 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -201,7 +201,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
return -ENODEV;
/* Fill all but 1 unused descriptors in the Rx queue. */
- count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
+ count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
while (!err && count--) {
struct fbnic_tlv_msg *msg;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 1c88a2bf3a7a..02e8b0b257fe 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -118,12 +118,12 @@ void fbnic_fw_free_mbx(struct fbnic_dev *fbd)
fbd->fw_msix_vector = 0;
}
-static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
+static irqreturn_t fbnic_mac_msix_intr(int __always_unused irq, void *data)
{
struct fbnic_dev *fbd = data;
struct fbnic_net *fbn;
- if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ if (fbd->mac->get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
1u << FBNIC_PCS_MSIX_ENTRY);
return IRQ_HANDLED;
@@ -131,26 +131,28 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
fbn = netdev_priv(fbd->netdev);
- phylink_pcs_change(&fbn->phylink_pcs, false);
+ /* Record link down events */
+ if (!fbd->mac->get_link(fbd, fbn->aui, fbn->fec))
+ phylink_pcs_change(fbn->pcs, false);
return IRQ_HANDLED;
}
/**
- * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link
+ * fbnic_mac_request_irq - Configure the MAC to enable it to advertise link
* @fbd: Pointer to device to initialize
*
- * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
+ * This function provides basic bringup for the MAC/PHY IRQ. For now the IRQ
* will remain disabled until we start the MAC/PCS/PHY logic via phylink.
*
* Return: non-zero on failure.
**/
-int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
+int fbnic_mac_request_irq(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
int vector, err;
- WARN_ON(fbd->pcs_msix_vector);
+ WARN_ON(fbd->mac_msix_vector);
vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
if (vector < 0)
@@ -159,7 +161,7 @@ int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
/* Request the IRQ for PCS link vector.
* Map PCS cause to it, and unmask it
*/
- err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
+ err = request_irq(vector, &fbnic_mac_msix_intr, 0,
fbd->netdev->name, fbd);
if (err)
return err;
@@ -168,22 +170,22 @@ int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
- fbd->pcs_msix_vector = vector;
+ fbd->mac_msix_vector = vector;
return 0;
}
/**
- * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping
+ * fbnic_mac_free_irq - Teardown the MAC IRQ to prepare for stopping
* @fbd: Pointer to device that is stopping
*
- * This function undoes the work done in fbnic_pcs_request_irq and prepares
+ * This function undoes the work done in fbnic_mac_request_irq and prepares
* the device to no longer receive traffic on the host interface.
**/
-void fbnic_pcs_free_irq(struct fbnic_dev *fbd)
+void fbnic_mac_free_irq(struct fbnic_dev *fbd)
{
/* Vector has already been freed */
- if (!fbd->pcs_msix_vector)
+ if (!fbd->mac_msix_vector)
return;
/* Disable interrupt */
@@ -192,14 +194,14 @@ void fbnic_pcs_free_irq(struct fbnic_dev *fbd)
fbnic_wrfl(fbd);
/* Synchronize IRQ to prevent race that would unmask vector */
- synchronize_irq(fbd->pcs_msix_vector);
+ synchronize_irq(fbd->mac_msix_vector);
/* Mask the vector */
fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
/* Free the vector */
- free_irq(fbd->pcs_msix_vector, fbd);
- fbd->pcs_msix_vector = 0;
+ free_irq(fbd->mac_msix_vector, fbd);
+ fbd->mac_msix_vector = 0;
}
void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 2a84bd1d7e26..fc7abea4ef5b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -434,14 +434,14 @@ static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
}
-static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
+static int fbnic_mac_get_link_event(struct fbnic_dev *fbd)
{
- u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+ u32 intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
- if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ if (intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
return FBNIC_LINK_EVENT_DOWN;
- return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ return (intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
}
@@ -466,9 +466,8 @@ static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
return command_config;
}
-static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
+static bool fbnic_mac_get_link_status(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
u32 pcs_status, lane_mask = ~0;
pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
@@ -476,7 +475,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return false;
/* Define the expected lane mask for the status bits we need to check */
- switch (fbn->aui) {
+ switch (aui) {
case FBNIC_AUI_100GAUI2:
lane_mask = 0xf;
break;
@@ -484,7 +483,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
lane_mask = 3;
break;
case FBNIC_AUI_LAUI2:
- switch (fbn->fec) {
+ switch (fec) {
case FBNIC_FEC_OFF:
lane_mask = 0x63;
break;
@@ -502,7 +501,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
}
/* Use an XOR to remove the bits we expect to see set */
- switch (fbn->fec) {
+ switch (fec) {
case FBNIC_FEC_OFF:
lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
pcs_status);
@@ -521,7 +520,46 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return !lane_mask;
}
-static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
+static bool fbnic_pmd_update_state(struct fbnic_dev *fbd, bool signal_detect)
+{
+ /* Delay link up for 4 seconds to allow for link training.
+ * The state transitions for this are as follows:
+ *
+ * All states have the following two transitions in common:
+ * Loss of signal -> FBNIC_PMD_INITIALIZE
+ * The condition handled below (!signal)
+ * Reconfiguration -> FBNIC_PMD_INITIALIZE
+ * Occurs when mac_prepare starts a PHY reconfig
+ * FBNIC_PMD_TRAINING:
+ * signal still detected && 4s have passed -> Report link up
+ * When link is brought up in link_up -> FBNIC_PMD_SEND_DATA
+ * FBNIC_PMD_INITIALIZE:
+ * signal detected -> FBNIC_PMD_TRAINING
+ */
+ if (!signal_detect) {
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
+ return false;
+ }
+
+ switch (fbd->pmd_state) {
+ case FBNIC_PMD_TRAINING:
+ return time_before(fbd->end_of_pmd_training, jiffies);
+ case FBNIC_PMD_LINK_READY:
+ case FBNIC_PMD_SEND_DATA:
+ return true;
+ }
+
+ fbd->end_of_pmd_training = jiffies + 4 * HZ;
+
+ /* Ensure end_of_training is visible before the state change */
+ smp_wmb();
+
+ fbd->pmd_state = FBNIC_PMD_TRAINING;
+
+ return false;
+}
+
+static bool fbnic_mac_get_link(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
bool link;
@@ -538,7 +576,8 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
- link = fbnic_mac_get_pcs_link_status(fbd);
+ link = fbnic_mac_get_link_status(fbd, aui, fec);
+ link = fbnic_pmd_update_state(fbd, link);
/* Enable interrupt to only capture changes in link state */
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
@@ -586,20 +625,15 @@ void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec)
}
}
-static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
+static void fbnic_mac_prepare(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
/* Mask and clear the PCS interrupt, will be enabled by link handler */
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
- return 0;
-}
-
-static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
-{
- /* Mask and clear the PCS interrupt */
- wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
- wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+ /* If we don't have link tear it all down and start over */
+ if (!fbnic_mac_get_link_status(fbd, aui, fec))
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
}
static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
@@ -867,10 +901,9 @@ exit_free:
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
- .pcs_enable = fbnic_pcs_enable_asic,
- .pcs_disable = fbnic_pcs_disable_asic,
- .pcs_get_link = fbnic_pcs_get_link_asic,
- .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_link = fbnic_mac_get_link,
+ .get_link_event = fbnic_mac_get_link_event,
+ .prepare = fbnic_mac_prepare,
.get_fec_stats = fbnic_mac_get_fec_stats,
.get_pcs_stats = fbnic_mac_get_pcs_stats,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index ede5ff0dae22..f08fe8b7c497 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -10,6 +10,24 @@ struct fbnic_dev;
#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+/* States loosely based on section 136.8.11.7.5 of IEEE 802.3-2022 Ethernet
+ * Standard. These are needed to track the state of the PHY as it has a delay
+ * of several seconds from the time link comes up until it has completed
+ * training that we need to wait to report the link.
+ *
+ * Currently we treat training as a single block as this is managed by the
+ * firmware.
+ *
+ * We have FBNIC_PMD_SEND_DATA set to 0 as the expected default at driver load
+ * and we initialize the structure containing it to zero at allocation.
+ */
+enum {
+ FBNIC_PMD_SEND_DATA = 0x0,
+ FBNIC_PMD_INITIALIZE = 0x1,
+ FBNIC_PMD_TRAINING = 0x2,
+ FBNIC_PMD_LINK_READY = 0x3,
+};
+
enum {
FBNIC_LINK_EVENT_NONE = 0,
FBNIC_LINK_EVENT_UP = 1,
@@ -38,6 +56,7 @@ enum {
FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */
FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */
FBNIC_AUI_UNKNOWN = 4,
+ __FBNIC_AUI_MAX__
};
#define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2)
@@ -55,15 +74,15 @@ enum fbnic_sensor_id {
* void (*init_regs)(struct fbnic_dev *fbd);
* Initialize MAC registers to enable Tx/Rx paths and FIFOs.
*
- * void (*pcs_enable)(struct fbnic_dev *fbd);
- * Configure and enable PCS to enable link if not already enabled
- * void (*pcs_disable)(struct fbnic_dev *fbd);
- * Shutdown the link if we are the only consumer of it.
- * bool (*pcs_get_link)(struct fbnic_dev *fbd);
- * Check PCS link status
- * int (*pcs_get_link_event)(struct fbnic_dev *fbd)
+ * int (*get_link_event)(struct fbnic_dev *fbd)
* Get the current link event status, reports true if link has
* changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ * bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Check link status
+ *
+ * void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Prepare PHY for init by fetching settings, disabling interrupts,
+ * and sending an updated PHY config to FW if needed.
*
* void (*link_down)(struct fbnic_dev *fbd);
* Configure MAC for link down event
@@ -74,10 +93,10 @@ enum fbnic_sensor_id {
struct fbnic_mac {
void (*init_regs)(struct fbnic_dev *fbd);
- int (*pcs_enable)(struct fbnic_dev *fbd);
- void (*pcs_disable)(struct fbnic_dev *fbd);
- bool (*pcs_get_link)(struct fbnic_dev *fbd);
- int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ int (*get_link_event)(struct fbnic_dev *fbd);
+ bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+
+ void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_fec_stats *fec_stats);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
new file mode 100644
index 000000000000..709041f7fc43
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/mdio.h>
+#include <linux/pcs/pcs-xpcs.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+
+#define DW_VENDOR BIT(15)
+#define FBNIC_PCS_VENDOR BIT(9)
+#define FBNIC_PCS_ZERO_MASK (DW_VENDOR - FBNIC_PCS_VENDOR)
+
+static int
+fbnic_mdio_read_pmd(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ u8 aui = FBNIC_AUI_UNKNOWN;
+ struct fbnic_net *fbn;
+ int ret = 0;
+
+ /* We don't need a second PMD, just one can handle both lanes */
+ if (addr)
+ return 0;
+
+ if (fbd->netdev) {
+ fbn = netdev_priv(fbd->netdev);
+ if (fbn->aui < FBNIC_AUI_UNKNOWN)
+ aui = fbn->aui;
+ }
+
+ switch (regnum) {
+ case MDIO_DEVID1:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID >> 16;
+ break;
+ case MDIO_DEVID2:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID & 0xffff;
+ break;
+ case MDIO_DEVS1:
+ ret = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+ break;
+ case MDIO_STAT2:
+ ret = MDIO_STAT2_DEVPRST_VAL;
+ break;
+ case MDIO_PMA_RXDET:
+ /* If training isn't complete default to 0 */
+ if (fbd->pmd_state != FBNIC_PMD_SEND_DATA)
+ break;
+ /* Report either 1 or 2 lanes detected depending on config */
+ ret = (MDIO_PMD_RXDET_GLOBAL | MDIO_PMD_RXDET_0) |
+ ((aui & FBNIC_AUI_MODE_R2) *
+ (MDIO_PMD_RXDET_1 / FBNIC_AUI_MODE_R2));
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(fbd->dev,
+ "SWMII PMD Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_pcs(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ int ret, offset = 0;
+
+ /* We will need access to both PCS instances to get config info */
+ if (addr >= 2)
+ return 0;
+
+ /* Report 0 for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return 0;
+
+ /* Intercept and return correct ID for PCS */
+ if (regnum == MDIO_DEVID1)
+ return DW_XPCS_ID >> 16;
+ if (regnum == MDIO_DEVID2)
+ return DW_XPCS_ID & 0xffff;
+ if (regnum == MDIO_DEVS1)
+ return MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ offset ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ ret = fbnic_rd32(fbd, FBNIC_PCS_PAGE(addr) + (regnum ^ offset));
+
+ dev_dbg(fbd->dev,
+ "SWMII PCS Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ return fbnic_mdio_read_pmd(fbd, addr, regnum);
+
+ if (devnum == MDIO_MMD_PCS)
+ return fbnic_mdio_read_pcs(fbd, addr, regnum);
+
+ return 0;
+}
+
+static void
+fbnic_mdio_write_pmd(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PMD Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+}
+
+static void
+fbnic_mdio_write_pcs(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PCS Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+
+ /* Allow access to both halves of PCS for 50R2 config */
+ if (addr > 2)
+ return;
+
+ /* Skip write for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ regnum ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ fbnic_wr32(fbd, FBNIC_PCS_PAGE(addr) + regnum, val);
+}
+
+static int
+fbnic_mdio_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ fbnic_mdio_write_pmd(fbd, addr, regnum, val);
+
+ if (devnum == MDIO_MMD_PCS)
+ fbnic_mdio_write_pcs(fbd, addr, regnum, val);
+
+ return 0;
+}
+
+/**
+ * fbnic_mdiobus_create - Create an MDIO bus to allow interfacing w/ PHYs
+ * @fbd: Pointer to FBNIC device structure to populate bus on
+ *
+ * Initialize an MDIO bus and place a pointer to it on the fbd struct. This bus
+ * will be used to interface with the PMA/PMD and PCS.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_mdiobus_create(struct fbnic_dev *fbd)
+{
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc(fbd->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "fbnic_mii_bus";
+ bus->read_c45 = &fbnic_mdio_read_c45;
+ bus->write_c45 = &fbnic_mdio_write_c45;
+
+ /* Disable PHY auto probing. We will add PCS manually */
+ bus->phy_mask = ~0;
+
+ bus->parent = fbd->dev;
+ bus->priv = fbd;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(fbd->dev));
+
+ err = devm_mdiobus_register(fbd->dev, bus);
+ if (err) {
+ dev_err(fbd->dev, "Failed to create MDIO bus: %d\n", err);
+ return err;
+ }
+
+ fbd->mdio_bus = bus;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index e95be0e7bd9e..81c9d5c9a4b2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -44,7 +44,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto time_stop;
- err = fbnic_pcs_request_irq(fbd);
+ err = fbnic_mac_request_irq(fbd);
if (err)
goto time_stop;
@@ -86,10 +86,10 @@ static int fbnic_stop(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ fbnic_mac_free_irq(fbn->fbd);
phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
fbnic_down(fbn);
- fbnic_pcs_free_irq(fbn->fbd);
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
@@ -697,10 +697,7 @@ void fbnic_reset_queues(struct fbnic_net *fbn,
**/
void fbnic_netdev_free(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
-
- if (fbn->phylink)
- phylink_destroy(fbn->phylink);
+ fbnic_phylink_destroy(fbd->netdev);
free_netdev(fbd->netdev);
fbd->netdev = NULL;
@@ -802,7 +799,7 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netif_tx_stop_all_queues(netdev);
- if (fbnic_phylink_init(netdev)) {
+ if (fbnic_phylink_create(netdev)) {
fbnic_netdev_free(fbd);
return NULL;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b0a87c57910f..9129a658f8fa 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -44,7 +44,7 @@ struct fbnic_net {
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs *pcs;
u8 aui;
u8 fec;
@@ -73,6 +73,8 @@ struct fbnic_net {
/* Time stamping filter config */
struct kernel_hwtstamp_config hwtstamp_config;
+
+ bool tx_pause;
};
int __fbnic_open(struct fbnic_net *fbn);
@@ -106,8 +108,10 @@ int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
struct ethtool_link_ksettings *cmd);
int fbnic_phylink_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam);
+int fbnic_phylink_create(struct net_device *netdev);
+void fbnic_phylink_destroy(struct net_device *netdev);
int fbnic_phylink_init(struct net_device *netdev);
-
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev);
bool fbnic_check_split_frames(struct bpf_prog *prog,
unsigned int mtu, u32 hds_threshold);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 4620f1847f2e..861d98099c44 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -207,6 +207,10 @@ static void fbnic_service_task(struct work_struct *work)
{
struct fbnic_dev *fbd = container_of(to_delayed_work(work),
struct fbnic_dev, service_task);
+ struct net_device *netdev = fbd->netdev;
+
+ if (netif_running(netdev))
+ fbnic_phylink_pmd_training_complete_notify(netdev);
rtnl_lock();
@@ -224,7 +228,7 @@ static void fbnic_service_task(struct work_struct *work)
netdev_unlock(fbd->netdev);
}
- if (netif_running(fbd->netdev))
+ if (netif_running(netdev))
schedule_delayed_work(&fbd->service_task, HZ);
rtnl_unlock();
@@ -335,6 +339,9 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_failure_mode;
}
+ if (fbnic_mdiobus_create(fbd))
+ goto init_failure_mode;
+
netdev = fbnic_netdev_alloc(fbd);
if (!netdev) {
dev_err(&pdev->dev, "Netdev allocation failed\n");
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 7ce3fdd25282..09c5225111be 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/phylink.h>
@@ -101,88 +102,47 @@ int fbnic_phylink_get_fecparam(struct net_device *netdev,
return 0;
}
-static struct fbnic_net *
-fbnic_pcs_to_net(struct phylink_pcs *pcs)
-{
- return container_of(pcs, struct fbnic_net, phylink_pcs);
-}
-
-static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
- struct phylink_link_state *state)
+static struct phylink_pcs *
+fbnic_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
-
- switch (fbn->aui) {
- case FBNIC_AUI_25GAUI:
- state->speed = SPEED_25000;
- break;
- case FBNIC_AUI_LAUI2:
- case FBNIC_AUI_50GAUI1:
- state->speed = SPEED_50000;
- break;
- case FBNIC_AUI_100GAUI2:
- state->speed = SPEED_100000;
- break;
- default:
- state->link = 0;
- return;
- }
-
- state->duplex = DUPLEX_FULL;
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
- state->link = fbd->mac->pcs_get_link(fbd);
+ return fbn->pcs;
}
static int
-fbnic_phylink_pcs_enable(struct phylink_pcs *pcs)
+fbnic_phylink_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
- return fbd->mac->pcs_enable(fbd);
+ fbd->mac->prepare(fbd, fbn->aui, fbn->fec);
+
+ return 0;
}
static void
-fbnic_phylink_pcs_disable(struct phylink_pcs *pcs)
+fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
-
- return fbd->mac->pcs_disable(fbd);
}
static int
-fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
- phy_interface_t interface,
- const unsigned long *advertising,
- bool permit_pause_to_mac)
-{
- return 0;
-}
-
-static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = {
- .pcs_config = fbnic_phylink_pcs_config,
- .pcs_enable = fbnic_phylink_pcs_enable,
- .pcs_disable = fbnic_phylink_pcs_disable,
- .pcs_get_state = fbnic_phylink_pcs_get_state,
-};
-
-static struct phylink_pcs *
-fbnic_phylink_mac_select_pcs(struct phylink_config *config,
- phy_interface_t interface)
+fbnic_phylink_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
{
struct net_device *netdev = to_net_dev(config->dev);
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
- return &fbn->phylink_pcs;
-}
+ /* Retest the link state and restart interrupts */
+ fbd->mac->get_link(fbd, fbn->aui, fbn->fec);
-static void
-fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
-{
+ return 0;
}
static void
@@ -208,23 +168,48 @@ fbnic_phylink_mac_link_up(struct phylink_config *config,
struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
+ fbn->tx_pause = tx_pause;
+ fbnic_config_drop_mode(fbn, tx_pause);
+
fbd->mac->link_up(fbd, tx_pause, rx_pause);
}
static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
.mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_prepare = fbnic_phylink_mac_prepare,
.mac_config = fbnic_phylink_mac_config,
+ .mac_finish = fbnic_phylink_mac_finish,
.mac_link_down = fbnic_phylink_mac_link_down,
.mac_link_up = fbnic_phylink_mac_link_up,
};
-int fbnic_phylink_init(struct net_device *netdev)
+/**
+ * fbnic_phylink_create - Phylink device creation
+ * @netdev: Network Device struct to attach phylink device
+ *
+ * Initialize and attach a phylink instance to the device. The phylink
+ * device will make use of the netdev struct to track carrier and will
+ * eventually be used to expose the current state of the MAC and PCS
+ * setup.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_phylink_create(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
+ int err;
+
+ pcs = xpcs_create_pcs_mdiodev(fbd->mdio_bus, 0);
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err(fbd->dev, "Failed to create PCS device: %d\n", err);
+ return err;
+ }
- fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
+ fbn->pcs = pcs;
fbn->phylink_config.dev = &netdev->dev;
fbn->phylink_config.type = PHYLINK_NETDEV;
@@ -247,10 +232,80 @@ int fbnic_phylink_init(struct net_device *netdev)
phylink = phylink_create(&fbn->phylink_config, NULL,
fbnic_phylink_select_interface(fbn->aui),
&fbnic_phylink_mac_ops);
- if (IS_ERR(phylink))
- return PTR_ERR(phylink);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ dev_err(netdev->dev.parent,
+ "Failed to create Phylink interface, err: %d\n", err);
+ xpcs_destroy_pcs(pcs);
+ return err;
+ }
fbn->phylink = phylink;
return 0;
}
+
+/**
+ * fbnic_phylink_destroy - Teardown phylink related interfaces
+ * @netdev: Network Device struct containing phylink device
+ *
+ * Detach and free resources related to phylink interface.
+ **/
+void fbnic_phylink_destroy(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+ if (fbn->pcs)
+ xpcs_destroy_pcs(fbn->pcs);
+}
+
+/**
+ * fbnic_phylink_pmd_training_complete_notify - PMD training complete notifier
+ * @netdev: Netdev struct phylink device attached to
+ *
+ * When the link first comes up the PMD will have a period of 2 to 3 seconds
+ * where the link will flutter due to link training. To avoid spamming the
+ * kernel log with messages about this we add a delay of 4 seconds from the
+ * time of the last PCS report of link so that we can guarantee we are unlikely
+ * to see any further link loss events due to link training.
+ **/
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ if (fbd->pmd_state != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Prevent reading end_of_pmd_training until we verified state */
+ smp_rmb();
+
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* At this point we have verified that the link has been up for
+ * the full training duration. As a first step we will try
+ * transitioning to link ready.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_TRAINING,
+ FBNIC_PMD_LINK_READY) != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Perform a follow-up check to verify that the link didn't flap
+ * just before our transition by rechecking the training timer.
+ */
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* The training timeout has been completed. We are good to swap out
+ * link_ready for send_data assuming no other events have occurred
+ * that would have pulled us back into initialization or training.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_LINK_READY,
+ FBNIC_PMD_SEND_DATA) != FBNIC_PMD_LINK_READY)
+ return;
+
+ phylink_pcs_change(fbn->pcs, false);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 57e18a68f5d2..13d508ce637f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -653,7 +653,8 @@ static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
FBNIC_TWD_TYPE_AL;
total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
- page_pool_put_page(page->pp, page, -1, pp_allow_direct);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, -1,
+ pp_allow_direct);
next_desc:
head++;
head &= ring->size_mask;
@@ -1807,7 +1808,7 @@ int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
free_vectors:
fbnic_free_napi_vectors(fbn);
- return -ENOMEM;
+ return err;
}
static void fbnic_free_ring_resources(struct device *dev,
@@ -2574,11 +2575,15 @@ write_ctl:
}
static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
- struct fbnic_ring *rcq)
+ struct fbnic_ring *rcq, bool tx_pause)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 drop_mode, rcq_ctl;
- drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ if (!tx_pause && fbn->num_rx_queues > 1)
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ else
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_NEVER;
/* Specify packet layout */
rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
@@ -2588,6 +2593,21 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause)
+{
+ int i, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ for (t = 0; t < nv->rxt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[nv->txt_count + t];
+
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl, tx_pause);
+ }
+ }
+}
+
static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
{
u32 threshold;
@@ -2637,7 +2657,7 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
u32 hds_thresh = fbn->hds_thresh;
u32 rcq_ctl = 0;
- fbnic_config_drop_mode_rcq(nv, rcq);
+ fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause);
/* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
* be split at L4. It would also result in the frames being split at
@@ -2700,7 +2720,6 @@ static void __fbnic_nv_enable(struct fbnic_napi_vector *nv)
&nv->napi);
fbnic_enable_bdq(&qt->sub0, &qt->sub1);
- fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
fbnic_enable_rcq(nv, &qt->cmpl);
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index ca37da5a0b17..27776e844e29 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -184,6 +184,7 @@ void fbnic_reset_netif_queues(struct fbnic_net *fbn);
irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause);
void fbnic_enable(struct fbnic_net *fbn);
void fbnic_disable(struct fbnic_net *fbn);
void fbnic_flush(struct fbnic_net *fbn);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index b4377b8613c3..8c40db90ee8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/ptp_classify.h>
+#include <linux/units.h>
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define LAN9X66_CLOCK_RATE 165617754
+
#define LAN966X_MAX_PTP_ID 512
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
@@ -1126,5 +1129,5 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u32 lan966x_ptp_get_period_ps(void)
{
/* This represents the system clock period in picoseconds */
- return 15125;
+ return PICO / LAN9X66_CLOCK_RATE;
}
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index effe0a2f207a..8fd70b34807a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1300,7 +1300,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
struct gdma_posted_wqe_info *wqe_info)
{
u32 client_oob_size = wqe_req->inline_oob_size;
- struct gdma_context *gc;
u32 sgl_data_size;
u32 max_wqe_size;
u32 wqe_size;
@@ -1330,11 +1329,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
if (wqe_size > max_wqe_size)
return -EINVAL;
- if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
- gc = wq->gdma_dev->gdma_context;
- dev_err(gc->dev, "unsuccessful flow control!\n");
+ if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
return -ENOSPC;
- }
if (wqe_info)
wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index cccd5b63cee6..1ad154f9db1a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/skbuff.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@@ -329,6 +330,21 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cq = &apc->tx_qp[txq_idx].tx_cq;
tx_stats = &txq->stats;
+ BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
+ if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
+ skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* GSO skb with Hardware SGE limit exceeded is not expected here
+ * as they are handled in mana_features_check() callback
+ */
+ if (skb_linearize(skb)) {
+ netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
+ skb_shinfo(skb)->nr_frags,
+ skb_is_gso(skb));
+ goto tx_drop_count;
+ }
+ apc->eth_stats.tx_linear_pkt_cnt++;
+ }
+
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@@ -442,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
} else {
@@ -478,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (err) {
(void)skb_dequeue_tail(&txq->pending_skbs);
+ mana_unmap_skb(skb, apc);
netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
- err = NETDEV_TX_BUSY;
- goto tx_busy;
+ goto free_sgl_ptr;
}
err = NETDEV_TX_OK;
@@ -500,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
-tx_busy:
if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
netif_tx_wake_queue(net_txq);
apc->eth_stats.wake_queue++;
@@ -518,6 +531,25 @@ tx_drop:
return NETDEV_TX_OK;
}
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+static netdev_features_t mana_features_check(struct sk_buff *skb,
+ struct net_device *ndev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* Exceeds HW SGE limit.
+ * GSO case:
+ * Disable GSO so the stack will software-segment the skb
+ * into smaller skbs that fit the SGE budget.
+ * Non-GSO case:
+ * The xmit path will attempt skb_linearize() as a fallback.
+ */
+ features &= ~NETIF_F_GSO_MASK;
+ }
+ return features;
+}
+#endif
+
static void mana_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *st)
{
@@ -534,6 +566,11 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
+ if (apc->ac->hwc_timeout_occurred)
+ netdev_warn_once(ndev, "HWC timeout occurred\n");
+
+ st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -878,6 +915,9 @@ static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
.ndo_select_queue = mana_select_queue,
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+ .ndo_features_check = mana_features_check,
+#endif
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
@@ -1646,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
return 0;
}
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -2809,11 +2849,12 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
-void mana_query_gf_stats(struct mana_port_context *apc)
+int mana_query_gf_stats(struct mana_context *ac)
{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_gf_stat_resp resp = {};
struct mana_query_gf_stat_req req = {};
- struct net_device *ndev = apc->ndev;
+ struct device *dev = gc->dev;
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
@@ -2847,52 +2888,54 @@ void mana_query_gf_stats(struct mana_port_context *apc)
STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
- err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ err = mana_send_request(ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to query GF stats: %d\n", err);
- return;
+ dev_err(dev, "Failed to query GF stats: %d\n", err);
+ return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
sizeof(resp));
if (err || resp.hdr.status) {
- netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
- resp.hdr.status);
- return;
+ dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err;
}
- apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
- apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
- apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
- apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
- apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
- apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
- apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
- apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
- apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
- apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
- apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
resp.tx_err_inval_vport_offset_pkt;
- apc->eth_stats.hc_tx_err_vlan_enforcement =
+ ac->hc_stats.hc_tx_err_vlan_enforcement =
resp.tx_err_vlan_enforcement;
- apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ ac->hc_stats.hc_tx_err_eth_type_enforcement =
resp.tx_err_ethtype_enforcement;
- apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
- apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ ac->hc_stats.hc_tx_err_sqpdid_enforcement =
resp.tx_err_SQPDID_enforcement;
- apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_cqpdid_enforcement =
resp.tx_err_CQPDID_enforcement;
- apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
- apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
- apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
- apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
- apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
- apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
- apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
- apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
- apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+ ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
+ ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
+ ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
+ ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
+ ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
+ ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
+ ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
+ ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+
+ return 0;
}
void mana_query_phy_stats(struct mana_port_context *apc)
@@ -3427,6 +3470,24 @@ int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type even
return 0;
}
+#define MANA_GF_STATS_PERIOD (2 * HZ)
+
+static void mana_gf_stats_work_handler(struct work_struct *work)
+{
+ struct mana_context *ac =
+ container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+ int err;
+
+ err = mana_query_gf_stats(ac);
+ if (err == -ETIMEDOUT) {
+ /* HWC timeout detected - reset stats and stop rescheduling */
+ ac->hwc_timeout_occurred = true;
+ memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+ return;
+ }
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@@ -3519,6 +3580,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = add_adev(gd, "eth");
+
+ INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+
out:
if (err) {
mana_remove(gd, false);
@@ -3543,6 +3608,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
int i;
disable_work_sync(&ac->link_change_work);
+ cancel_delayed_work_sync(&ac->gf_stats_work);
/* adev currently doesn't support suspending, always remove it */
if (gd->adev)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index a1afa75a9463..0e2f4343ac67 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -15,66 +15,71 @@ struct mana_stats_desc {
static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
- {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
+ {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ tx_cqe_unknown_type)},
+ {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats,
+ tx_linear_pkt_cnt)},
+ {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
+ rx_coalesced_err)},
+ {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ rx_cqe_unknown_type)},
+};
+
+static const struct mana_stats_desc mana_hc_stats[] = {
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats,
hc_rx_discards_no_wqe)},
- {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_rx_err_vport_disabled)},
- {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
- {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_pkts)},
- {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_bytes)},
- {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_pkts)},
- {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_bytes)},
- {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
- hc_rx_mcast_pkts)},
- {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_mcast_bytes)},
- {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gf_disabled)},
- {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vport_disabled)},
{"hc_tx_err_inval_vportoffset_pkt",
- offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_vportoffset_pkt)},
- {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vlan_enforcement)},
{"hc_tx_err_eth_type_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
- {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_sa_enforcement)},
{"hc_tx_err_sqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)},
{"hc_tx_err_cqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
- {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_mtu_violation)},
- {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_oob)},
- {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gdma)},
- {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
- {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)},
+ {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_pkts)},
- {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_bytes)},
- {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_pkts)},
- {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_bytes)},
- {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_pkts)},
- {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_bytes)},
- {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
- {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- tx_cqe_unknown_type)},
- {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
- rx_coalesced_err)},
- {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- rx_cqe_unknown_type)},
};
static const struct mana_stats_desc mana_phy_stats[] = {
@@ -138,7 +143,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) +
num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
@@ -150,10 +155,12 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
if (stringset != ETH_SS_STATS)
return;
-
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
ethtool_puts(&data, mana_eth_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++)
+ ethtool_puts(&data, mana_hc_stats[i].name);
+
for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
ethtool_puts(&data, mana_phy_stats[i].name);
@@ -186,6 +193,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *hc_stats = &apc->ac->hc_stats;
void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
@@ -207,8 +215,6 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
- /* we call mana function to update stats from GDMA */
- mana_query_gf_stats(apc);
/* We call this mana function to get the phy stats from GDMA and includes
* aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
@@ -219,6 +225,9 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++)
+ data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset);
+
for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 0e1a3800f371..85e3b19e6165 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -81,7 +81,8 @@ static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = {
static int
nfp_devlink_param_u8_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 94c5689b5abd..0c5278c0598c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -121,7 +121,8 @@ void qed_fw_reporters_destroy(struct devlink *devlink)
}
static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 847fa62c80df..e338bfc8b7b2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
+#include <linux/array_size.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -960,7 +961,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
{
int i;
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -985,7 +986,7 @@ static int qede_tpa_end(struct qede_dev *edev,
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
PAGE_SIZE, rxq->data_direction);
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
if (unlikely(i > 1))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b5d744d2586f..66ab1b9d65a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -506,25 +506,6 @@ static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
}
#endif
-static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct qede_dev *edev = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EAGAIN;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return qede_ptp_hw_ts(edev, ifr);
- default:
- DP_VERBOSE(edev, QED_MSG_DEBUG,
- "default IOCTL cmd 0x%x\n", cmd);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
char *p_sb = (char *)fp->sb_info->sb_virt;
@@ -717,7 +698,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
- .ndo_eth_ioctl = qede_ioctl,
.ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
@@ -742,6 +722,8 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_xdp_xmit = qede_xdp_transmit,
.ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_hwtstamp_get = qede_hwtstamp_get,
+ .ndo_hwtstamp_set = qede_hwtstamp_set,
};
static const struct net_device_ops qede_netdev_vf_ops = {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index a38f1e72c62b..d351be5fbda1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -199,18 +199,15 @@ static u64 qede_ptp_read_cc(struct cyclecounter *cc)
return phc_cycles;
}
-static int qede_ptp_cfg_filters(struct qede_dev *edev)
+static void qede_ptp_cfg_filters(struct qede_dev *edev)
{
enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
- if (!ptp)
- return -EIO;
-
if (!ptp->hw_ts_ioctl_called) {
DP_INFO(edev, "TS IOCTL not called\n");
- return 0;
+ return;
}
switch (ptp->tx_type) {
@@ -223,11 +220,6 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
-
- case HWTSTAMP_TX_ONESTEP_SYNC:
- case HWTSTAMP_TX_ONESTEP_P2P:
- DP_ERR(edev, "One-step timestamping is not supported\n");
- return -ERANGE;
}
spin_lock_bh(&ptp->lock);
@@ -286,39 +278,65 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
spin_unlock_bh(&ptp->lock);
-
- return 0;
}
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct qede_dev *edev = netdev_priv(netdev);
struct qede_ptp *ptp;
- int rc;
+
+ if (!netif_running(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
ptp = edev->ptp;
- if (!ptp)
+ if (!ptp) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -EIO;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ }
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ "HWTSTAMP SET: Requested tx_type = %d, requested rx_filters = %d\n",
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
ptp->hw_ts_ioctl_called = 1;
- ptp->tx_type = config.tx_type;
- ptp->rx_filter = config.rx_filter;
+ ptp->tx_type = config->tx_type;
+ ptp->rx_filter = config->rx_filter;
- rc = qede_ptp_cfg_filters(edev);
- if (rc)
- return rc;
+ qede_ptp_cfg_filters(edev);
+
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
+}
- config.rx_filter = ptp->rx_filter;
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+ struct qede_ptp *ptp;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EIO;
+
+ config->tx_type = ptp->tx_type;
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
}
int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index adafc894797e..88f168395812 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -14,7 +14,11 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0b96b6aa4214..630319604211 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -59,6 +59,7 @@
#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
#define FIRMWARE_8125K_1 "rtl_nic/rtl8125k-1.fw"
#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
+#define FIRMWARE_9151A_1 "rtl_nic/rtl9151a-1.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
@@ -111,6 +112,7 @@ static const struct rtl_chip_info {
{ 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
/* 8125D family. */
+ { 0x7cf, 0x68b, RTL_GIGA_MAC_VER_64, "RTL9151A", FIRMWARE_9151A_1 },
{ 0x7cf, 0x68a, RTL_GIGA_MAC_VER_64, "RTL8125K", FIRMWARE_8125K_1 },
{ 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
{ 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
@@ -774,6 +776,7 @@ MODULE_FIRMWARE(FIRMWARE_8125D_1);
MODULE_FIRMWARE(FIRMWARE_8125D_2);
MODULE_FIRMWARE(FIRMWARE_8125K_1);
MODULE_FIRMWARE(FIRMWARE_8125BP_2);
+MODULE_FIRMWARE(FIRMWARE_9151A_1);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
MODULE_FIRMWARE(FIRMWARE_8127A_1);
@@ -1509,6 +1512,7 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
@@ -1517,11 +1521,20 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_25 &&
- tp->mac_version != RTL_GIGA_MAC_VER_28 &&
- tp->mac_version != RTL_GIGA_MAC_VER_31 &&
- tp->mac_version != RTL_GIGA_MAC_VER_38)
- r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_38:
+ break;
+ case RTL_GIGA_MAC_VER_80:
+ r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, true);
+ break;
+ default:
+ r8169_mod_reg8_cond(tp, PMCH, D3HOT_NO_PLL_DOWN, true);
+ r8169_mod_reg8_cond(tp, PMCH, D3COLD_NO_PLL_DOWN, !enable);
+ break;
+ }
}
static void rtl_reset_packet_filter(struct rtl8169_private *tp)
@@ -2376,26 +2389,6 @@ void r8169_apply_firmware(struct rtl8169_private *tp)
}
}
-static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
-{
- /* Adjust EEE LED frequency */
- if (tp->mac_version != RTL_GIGA_MAC_VER_38)
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
-
- rtl_eri_set_bits(tp, 0x1b0, 0x0003);
-}
-
-static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
- r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
-}
-
-static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
@@ -3173,8 +3166,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
@@ -3199,8 +3190,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
-
- rtl8168_config_eee_mac(tp);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -3250,8 +3239,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
@@ -3392,8 +3379,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3441,8 +3426,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -3498,8 +3481,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3740,11 +3721,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- rtl8125a_config_eee_mac(tp);
- else
- rtl8125b_config_eee_mac(tp);
-
rtl_disable_rxdvgate(tp);
}
@@ -4747,6 +4723,41 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void rtl_enable_tx_lpi(struct rtl8169_private *tp, bool enable)
+{
+ if (!rtl_supports_eee(tp))
+ return;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_52:
+ /* Adjust EEE LED frequency */
+ if (tp->mac_version != RTL_GIGA_MAC_VER_38)
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ if (enable)
+ rtl_eri_set_bits(tp, 0x1b0, 0x0003);
+ else
+ rtl_eri_clear_bits(tp, 0x1b0, 0x0003);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ if (enable) {
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0, 0x0006);
+ } else {
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0x0006, 0);
+ }
+ break;
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
+ if (enable)
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ else
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4754,6 +4765,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
+ rtl_enable_tx_lpi(tp, tp->phydev->enable_tx_lpi);
pm_request_resume(d);
} else {
pm_runtime_idle(d);
@@ -5451,6 +5463,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
tp->aspm_manageable = !rc;
+ /* Fiber mode on RTL8127AF isn't supported */
+ if (rtl_is_8125(tp)) {
+ u16 data = r8168_mac_ocp_read(tp, 0xd006);
+
+ if ((data & 0xff) == 0x07)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Fiber mode not supported\n");
+ }
+
tp->dash_type = rtl_get_dash_type(tp);
tp->dash_enabled = rtl_dash_is_enabled(tp);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 75bad561b352..849c5a6c2af1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1521,8 +1521,10 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
skb = priv->rxq[qnum]->rx_skbuff[entry];
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
netdev_err(priv->dev, "rx descriptor is not consistent\n");
+ break;
+ }
prefetch(skb->data - NET_IP_ALIGN);
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 87c5bea6c2a2..907fe2e927f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -349,6 +349,11 @@ config DWMAC_VISCONTI
endif
+config STMMAC_LIBPCI
+ tristate
+ help
+ This option enables the PCI bus helpers for the stmmac driver.
+
config DWMAC_INTEL
tristate "Intel GMAC support"
default X86
@@ -362,16 +367,18 @@ config DWMAC_INTEL
config DWMAC_LOONGSON
tristate "Loongson PCI DWMAC support"
default MACH_LOONGSON64
- depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the LOONGSON PCI bus support for the stmmac driver,
Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
config STMMAC_PCI
tristate "STMMAC PCI bus support"
- depends on STMMAC_ETH && PCI
+ depends on PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 1681a8a28313..7bf528731034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := dwmac-socfpga.o
+obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index fb55efd52240..120a009c9992 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -83,14 +83,13 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
+ bool ret = false;
if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
- (!enh_desc && (len > BUF_SIZE_2KiB))) {
- ret = 1;
- }
+ (!enh_desc && (len > BUF_SIZE_2KiB)))
+ ret = true;
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7395bbb94aea..49df46be3669 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -548,6 +548,19 @@ struct dma_features {
#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+/* Common definitions for AXI Master Bus Mode */
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_AXI_BLEN_MASK GENMASK(7, 1)
+
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len);
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 84072c8ed741..5e0fc31762d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -34,7 +34,7 @@ static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val)
writel(val, gmac->ctl_block + reg);
}
-static int anarion_gmac_init(struct platform_device *pdev, void *priv)
+static int anarion_gmac_init(struct device *dev, void *priv)
{
uint32_t sw_config;
struct anarion_gmac *gmac = priv;
@@ -52,7 +52,7 @@ static int anarion_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
+static void anarion_gmac_exit(struct device *dev, void *priv)
{
struct anarion_gmac *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index c7cd6497d42d..d043bad4a862 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -38,8 +38,6 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
u32 burst_map = 0;
- u32 bit_index = 0;
- u32 a_index = 0;
if (!plat_dat->axi) {
plat_dat->axi = devm_kzalloc(&pdev->dev,
@@ -83,30 +81,8 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
}
device_property_read_u32(dev, "snps,burst-map", &burst_map);
- /* converts burst-map bitmask to burst array */
- for (bit_index = 0; bit_index < 7; bit_index++) {
- if (burst_map & (1 << bit_index)) {
- switch (bit_index) {
- case 0:
- plat_dat->axi->axi_blen[a_index] = 4; break;
- case 1:
- plat_dat->axi->axi_blen[a_index] = 8; break;
- case 2:
- plat_dat->axi->axi_blen[a_index] = 16; break;
- case 3:
- plat_dat->axi->axi_blen[a_index] = 32; break;
- case 4:
- plat_dat->axi->axi_blen[a_index] = 64; break;
- case 5:
- plat_dat->axi->axi_blen[a_index] = 128; break;
- case 6:
- plat_dat->axi->axi_blen[a_index] = 256; break;
- default:
- break;
- }
- a_index++;
- }
- }
+ plat_dat->axi->axi_blen_regval = FIELD_PREP(DMA_AXI_BLEN_MASK,
+ burst_map);
/* dwc-qos needs GMAC4, AAL, TSO and PMT */
plat_dat->core_type = DWMAC_CORE_GMAC4;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
index 1dcf2037001e..bcb8e000e720 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
@@ -58,14 +58,14 @@ static int eic7700_clks_config(void *priv, bool enabled)
return ret;
}
-static int eic7700_dwmac_init(struct platform_device *pdev, void *priv)
+static int eic7700_dwmac_init(struct device *dev, void *priv)
{
struct eic7700_qos_priv *dwc = priv;
return eic7700_clks_config(dwc, true);
}
-static void eic7700_dwmac_exit(struct platform_device *pdev, void *priv)
+static void eic7700_dwmac_exit(struct device *dev, void *priv)
{
struct eic7700_qos_priv *dwc = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index b2194e414ec1..aad1be1ec4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -569,26 +569,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
@@ -629,22 +609,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].chan = i;
-
- /* Disable Priority config by default */
- plat->rx_queues_cfg[i].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- }
for (i = 0; i < plat->tx_queues_to_use; i++) {
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- /* Disable Priority config by default */
- plat->tx_queues_cfg[i].use_prio = false;
/* Default TX Q0 to use TSO and rest TXQ for TBS */
if (i > 0)
plat->tx_queues_cfg[i].tbs_en = 1;
@@ -680,9 +650,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_xit_frm = 0;
plat->axi->axi_wr_osr_lmt = 1;
plat->axi->axi_rd_osr_lmt = 1;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16;
plat->ptp_max_adj = plat->clk_ptp_rate;
@@ -706,15 +675,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
/* Use the last Rx queue */
@@ -1286,7 +1246,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (!intel_priv)
return -ENOMEM;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index dd2fc39ec3e2..107a7c84ace8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
#include "dwmac_dma.h"
#include "dwmac1000.h"
@@ -95,28 +96,12 @@ static void loongson_default_data(struct pci_dev *pdev,
plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
- /* Set default value for multicast hash bins */
+ /* Increase the default value for multicast hash bins */
plat->multicast_filter_bins = 256;
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
-
plat->clk_ref_rate = 125000000;
plat->clk_ptp_rate = 125000000;
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
-
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
@@ -140,8 +125,6 @@ static void loongson_default_data(struct pci_dev *pdev,
break;
default:
ld->multichan = 0;
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
break;
}
}
@@ -520,37 +503,6 @@ static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioad
10000, 2000000);
}
-static int loongson_dwmac_suspend(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int loongson_dwmac_resume(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -559,7 +511,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
struct loongson_data *ld;
int ret;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -595,8 +547,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->mac_setup = loongson_dwmac_setup;
plat->fix_soc_reset = loongson_dwmac_fix_reset;
- plat->suspend = loongson_dwmac_suspend;
- plat->resume = loongson_dwmac_resume;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
index 894ee66f5c9b..de9aba756aac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -48,7 +48,7 @@ struct ls1x_dwmac {
struct ls1x_data {
int (*setup)(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat);
- int (*init)(struct platform_device *pdev, void *bsp_priv);
+ int (*init)(struct device *dev, void *bsp_priv);
};
static int ls1b_dwmac_setup(struct platform_device *pdev,
@@ -79,7 +79,7 @@ static int ls1b_dwmac_setup(struct platform_device *pdev,
return 0;
}
-static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+static int ls1b_dwmac_syscon_init(struct device *dev, void *priv)
{
struct ls1x_dwmac *dwmac = priv;
struct plat_stmmacenet_data *plat = dwmac->plat_dat;
@@ -98,7 +98,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
@@ -122,7 +122,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
@@ -133,7 +133,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
return 0;
}
-static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+static int ls1c_dwmac_syscon_init(struct device *dev, void *priv)
{
struct ls1x_dwmac *dwmac = priv;
struct plat_stmmacenet_data *plat = dwmac->plat_dat;
@@ -143,7 +143,7 @@ static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_interface);
if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
phy_intf_sel != PHY_INTF_SEL_RMII) {
- dev_err(&pdev->dev, "Unsupported PHY-mode %u\n",
+ dev_err(dev, "Unsupported PHY-mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 1a616a71c36a..0826a7bd32ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -116,27 +116,39 @@ struct qcom_ethqos {
bool needs_sgmii_loopback;
};
-static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+static u32 rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
{
return readl(ethqos->rgmii_base + offset);
}
-static void rgmii_writel(struct qcom_ethqos *ethqos,
- int value, unsigned int offset)
+static void rgmii_writel(struct qcom_ethqos *ethqos, u32 value,
+ unsigned int offset)
{
writel(value, ethqos->rgmii_base + offset);
}
-static void rgmii_updatel(struct qcom_ethqos *ethqos,
- int mask, int val, unsigned int offset)
+static void rgmii_updatel(struct qcom_ethqos *ethqos, u32 mask, u32 val,
+ unsigned int offset)
{
- unsigned int temp;
+ u32 temp;
temp = rgmii_readl(ethqos, offset);
temp = (temp & ~(mask)) | val;
rgmii_writel(ethqos, temp, offset);
}
+static void rgmii_setmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, mask, offset);
+}
+
+static void rgmii_clrmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, 0, offset);
+}
+
static void rgmii_dump(void *priv)
{
struct qcom_ethqos *ethqos = priv;
@@ -194,8 +206,7 @@ qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
{
qcom_ethqos_set_sgmii_loopback(ethqos, true);
- rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
- RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
static const struct ethqos_emac_por emac_v2_3_0_por[] = {
@@ -300,69 +311,55 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
struct device *dev = &ethqos->pdev->dev;
- unsigned int val;
- int retry = 1000;
+ u32 val;
/* Set CDR_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
- SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
/* Set CDR_EXT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
- SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Clear CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ SDCC_HC_REG_DLL_CONFIG);
}
/* Wait for CK_OUT_EN clear */
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (!val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ !(val & SDCC_DLL_CONFIG_CK_OUT_EN),
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Clear CK_OUT_EN timedout\n");
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Wait for CK_OUT_EN set */
- retry = 1000;
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDCC_DLL_CONFIG_CK_OUT_EN,
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Set CK_OUT_EN timedout\n");
/* Set DDR_CAL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
- SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_HC_REG_DLL_CONFIG2);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
- 0, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ SDCC_HC_REG_DLL_CONFIG2);
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
@@ -370,8 +367,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
BIT(2), SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
SDCC_HC_REG_DLL_CONFIG2);
}
@@ -392,8 +388,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
phase_shift = RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN;
/* Disable loopback mode */
- rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG2);
/* Determine if this platform wants loopback enabled after programming */
if (ethqos->rgmii_config_loopback_en)
@@ -402,29 +398,26 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
loopback = 0;
/* Select RGMII, write 0 to interface select */
- rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_INTF_SEL, RGMII_IO_MACRO_CONFIG);
switch (speed) {
case SPEED_1000:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- RGMII_CONFIG_POS_NEG_DATA_SEL,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
/* PRG_RCLK_DLY = TCXO period * TCXO_CYCLES_CNT / 2 * RX delay ns,
@@ -439,87 +432,78 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
57, SDCC_HC_REG_DDR_CONFIG);
}
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
- SDCC_DDR_CONFIG_PRG_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
BIT(6), RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_10:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
BIT(12) | GENMASK(9, 8),
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
@@ -535,8 +519,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
- volatile unsigned int dll_lock;
- unsigned int i, retry = 1000;
+ unsigned int i;
+ u32 val;
/* Reset to POR values and enable clk */
for (i = 0; i < ethqos->num_por; i++)
@@ -547,12 +531,12 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
/* Initialize the DLL first */
/* Set DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
- SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
- SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
if (speed == SPEED_1000) {
@@ -566,21 +550,18 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
}
/* Clear DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
/* Clear PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
SDCC_HC_REG_DLL_CONFIG);
/* Set USR_CTL bit 26 with mask of 3 bits */
@@ -589,14 +570,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
SDCC_USR_CTL);
/* wait for DLL LOCK */
- do {
- mdelay(1);
- dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
- if (dll_lock & SDC4_STATUS_DLL_LOCK)
- break;
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDC4_STATUS_DLL_LOCK,
+ 1000, 1000000, true,
+ ethqos, SDC4_STATUS))
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
@@ -631,15 +608,13 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
switch (speed) {
case SPEED_2500:
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_2500);
ethqos_pcs_set_inband(priv, false);
break;
case SPEED_1000:
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
ethqos_pcs_set_inband(priv, true);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index bc7bb975803c..be7f5eb2cdcf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -91,7 +91,7 @@ static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
-static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
+static int renesas_gbeth_init(struct device *dev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
struct renesas_gbeth *gbeth = priv;
@@ -113,7 +113,7 @@ static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
return ret;
}
-static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
+static void renesas_gbeth_exit(struct device *dev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
struct renesas_gbeth *gbeth = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index a5c7e03ebc63..0a95f54e725e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -149,11 +149,13 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
return clk_set_rate(clk_mac_speed, rate);
}
-#define HIWORD_UPDATE(val, mask, shift) \
- (FIELD_PREP_WM16((mask) << (shift), (val)))
+#define GRF_FIELD(hi, lo, val) \
+ FIELD_PREP_WM16(GENMASK_U16(hi, lo), val)
+#define GRF_FIELD_CONST(hi, lo, val) \
+ FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val)
-#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
-#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
#define DELAY_ENABLE(soc, tx, rx) \
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
@@ -167,9 +169,9 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
#define RK_MACPHY_ENABLE GRF_BIT(0)
#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1)
+#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234)
+#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35)
static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
{
@@ -203,7 +205,7 @@ static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
-#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9))
+#define RK_FEPHY_24M_CLK_SEL GRF_FIELD(9, 8, 3)
#define RK_FEPHY_PHY_ID GRF_BIT(11)
static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
@@ -232,15 +234,14 @@ static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
-#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define PX30_GMAC_SPEED_100M GRF_BIT(2)
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_PHY_INTF_SEL_RMII);
+ PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static int px30_set_speed(struct rk_priv_data *bsp_priv,
@@ -285,23 +286,20 @@ static const struct rk_gmac_ops px30_ops = {
#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3128_GRF_MAC_CON1 */
-#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
-#define RK3128_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
@@ -309,7 +307,7 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3128_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
@@ -320,7 +318,8 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3128_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3128_reg_speed_data = {
@@ -350,23 +349,20 @@ static const struct rk_gmac_ops rk3128_ops = {
#define RK3228_GRF_CON_MUX 0x50
/* RK3228_GRF_MAC_CON0 */
-#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3228_GRF_MAC_CON1 */
-#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3228_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
-#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
-#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0)
+#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3)
+#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2)
#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
@@ -381,7 +377,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3228_GMAC_RMII_MODE_CLR |
DELAY_ENABLE(RK3228, tx_delay, rx_delay));
@@ -393,7 +389,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3228_GMAC_RMII_MODE);
/* set MAC to RMII mode */
@@ -435,19 +431,16 @@ static const struct rk_gmac_ops rk3228_ops = {
#define RK3288_GRF_SOC_CON3 0x0250
/*RK3288_GRF_SOC_CON1*/
-#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_CLR_BIT(8))
-#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
@@ -456,14 +449,14 @@ static const struct rk_gmac_ops rk3228_ops = {
#define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RGMII |
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
@@ -474,7 +467,8 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3288_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3288_reg_speed_data = {
@@ -501,8 +495,7 @@ static const struct rk_gmac_ops rk3288_ops = {
#define RK3308_GRF_MAC_CON0 0x04a0
/* RK3308_GRF_MAC_CON0 */
-#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
- GRF_BIT(4))
+#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val)
#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
@@ -511,7 +504,7 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_PHY_INTF_SEL_RMII);
+ RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_reg_speed_data rk3308_reg_speed_data = {
@@ -537,23 +530,20 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GRF_MACPHY_CON1 0xb04
/* RK3328_GRF_MAC_CON0 */
-#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3328_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3328_GRF_MAC_CON1 */
-#define RK3328_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3328_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
-#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12))
-#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0)
+#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3)
+#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2)
#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
@@ -566,7 +556,7 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_PHY_INTF_SEL_RGMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3328_GMAC_RMII_MODE_CLR |
RK3328_GMAC_RXCLK_DLY_ENABLE |
RK3328_GMAC_TXCLK_DLY_ENABLE);
@@ -584,7 +574,7 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3328_GRF_MAC_CON1;
regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_PHY_INTF_SEL_RMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3328_GMAC_RMII_MODE);
}
@@ -630,19 +620,16 @@ static const struct rk_gmac_ops rk3328_ops = {
#define RK3366_GRF_SOC_CON7 0x041c
/* RK3366_GRF_SOC_CON6 */
-#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -651,14 +638,14 @@ static const struct rk_gmac_ops rk3328_ops = {
#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
@@ -669,7 +656,8 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3366_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3366_reg_speed_data = {
@@ -697,19 +685,16 @@ static const struct rk_gmac_ops rk3366_ops = {
#define RK3368_GRF_SOC_CON16 0x0440
/* RK3368_GRF_SOC_CON15 */
-#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -718,14 +703,14 @@ static const struct rk_gmac_ops rk3366_ops = {
#define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RGMII |
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
@@ -736,7 +721,8 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3368_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3368_reg_speed_data = {
@@ -764,19 +750,16 @@ static const struct rk_gmac_ops rk3368_ops = {
#define RK3399_GRF_SOC_CON6 0xc218
/* RK3399_GRF_SOC_CON5 */
-#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -785,14 +768,14 @@ static const struct rk_gmac_ops rk3368_ops = {
#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
@@ -803,7 +786,8 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3399_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3399_reg_speed_data = {
@@ -901,8 +885,8 @@ static const struct rk_gmac_ops rk3506_ops = {
#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
-#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
-#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
@@ -916,9 +900,9 @@ static const struct rk_gmac_ops rk3506_ops = {
#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
-#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
-#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
-#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0)
+#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3)
+#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2)
#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
@@ -1029,10 +1013,7 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GRF_GMAC1_CON1 0x038c
/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
-#define RK3568_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3568_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1041,8 +1022,8 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
-#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
@@ -1059,7 +1040,7 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3568_GMAC_RXCLK_DLY_ENABLE |
RK3568_GMAC_TXCLK_DLY_ENABLE);
}
@@ -1070,7 +1051,8 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
- regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
+ regmap_write(bsp_priv->grf, con1,
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rk3568_ops = {
@@ -1096,8 +1078,8 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
-#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* SDGMAC_GRF */
#define RK3576_GRF_GMAC_CON0 0X0020
@@ -1112,12 +1094,9 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
-#define RK3576_GMAC_CLK_RGMII_DIV1 \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
-#define RK3576_GMAC_CLK_RGMII_DIV5 \
- (GRF_BIT(6) | GRF_BIT(5))
-#define RK3576_GMAC_CLK_RGMII_DIV50 \
- (GRF_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0)
+#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3)
+#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2)
#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
@@ -1220,17 +1199,15 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
-#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
-#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
/* php_grf */
#define RK3588_GRF_GMAC_CON0 0X0008
#define RK3588_GRF_CLK_CON1 0X0070
-#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
- (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
-#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
- (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL(id, val) \
+ (GRF_FIELD(5, 3, val) << ((id) * 6))
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
@@ -1242,11 +1219,11 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
- (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5))
#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
@@ -1260,7 +1237,7 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3588_GRF_GMAC_CON8;
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+ RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RGMII_MODE(id));
@@ -1277,7 +1254,7 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+ RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
@@ -1347,8 +1324,7 @@ static const struct rk_gmac_ops rk3588_ops = {
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
-#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
@@ -1359,7 +1335,7 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_PHY_INTF_SEL_RMII);
+ RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_reg_speed_data rv1108_reg_speed_data = {
@@ -1384,10 +1360,7 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GRF_GMAC_CON2 0X0078
/* RV1126_GRF_GMAC_CON0 */
-#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RV1126_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1400,17 +1373,17 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
/* RV1126_GRF_GMAC_CON1 */
-#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RV1126_GRF_GMAC_CON2 */
-#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
@@ -1428,7 +1401,7 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RMII);
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rv1126_ops = {
@@ -1762,8 +1735,7 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
struct rk_priv_data *bsp_priv = bsp_priv_;
if (bsp_priv->ops->set_speed)
- return bsp_priv->ops->set_speed(bsp_priv, bsp_priv->phy_iface,
- speed);
+ return bsp_priv->ops->set_speed(bsp_priv, interface, speed);
return -EINVAL;
}
@@ -1790,6 +1762,22 @@ static int rk_gmac_resume(struct device *dev, void *bsp_priv_)
return 0;
}
+static int rk_gmac_init(struct device *dev, void *bsp_priv)
+{
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct device *dev, void *bsp_priv_)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ rk_gmac_powerdown(bsp_priv);
+
+ if (priv->plat->phy_node && bsp_priv->integrated_phy)
+ clk_put(bsp_priv->clk_phy);
+}
+
static int rk_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -1822,6 +1810,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->get_interfaces = rk_get_interfaces;
plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
plat_dat->suspend = rk_gmac_suspend;
plat_dat->resume = rk_gmac_resume;
@@ -1833,33 +1823,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = rk_gmac_powerup(plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_powerdown;
-
- return 0;
-
-err_gmac_powerdown:
- rk_gmac_powerdown(plat_dat->bsp_priv);
-
- return ret;
-}
-
-static void rk_gmac_remove(struct platform_device *pdev)
-{
- struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev));
- struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
-
- stmmac_dvr_remove(&pdev->dev);
-
- rk_gmac_powerdown(bsp_priv);
-
- if (priv->plat->phy_node && bsp_priv->integrated_phy)
- clk_put(bsp_priv->clk_phy);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id rk_gmac_dwmac_match[] = {
@@ -1885,7 +1849,6 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
.probe = rk_gmac_probe,
- .remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
.pm = &stmmac_simple_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 2b7ad64bfdf7..5a485ee98fa7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -47,7 +47,7 @@ static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
return 0;
}
-static int s32_gmac_init(struct platform_device *pdev, void *priv)
+static int s32_gmac_init(struct device *dev, void *priv)
{
struct s32_priv_data *gmac = priv;
int ret;
@@ -55,31 +55,31 @@ static int s32_gmac_init(struct platform_device *pdev, void *priv)
/* Set initial TX interface clock */
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- dev_err(&pdev->dev, "Can't enable tx clock\n");
+ dev_err(dev, "Can't enable tx clock\n");
return ret;
}
ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
if (ret) {
- dev_err(&pdev->dev, "Can't set tx clock\n");
+ dev_err(dev, "Can't set tx clock\n");
goto err_tx_disable;
}
/* Set initial RX interface clock */
ret = clk_prepare_enable(gmac->rx_clk);
if (ret) {
- dev_err(&pdev->dev, "Can't enable rx clock\n");
+ dev_err(dev, "Can't enable rx clock\n");
goto err_tx_disable;
}
ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
if (ret) {
- dev_err(&pdev->dev, "Can't set rx clock\n");
+ dev_err(dev, "Can't set rx clock\n");
goto err_txrx_disable;
}
/* Set interface mode */
ret = s32_gmac_write_phy_intf_select(gmac);
if (ret) {
- dev_err(&pdev->dev, "Can't set PHY interface mode\n");
+ dev_err(dev, "Can't set PHY interface mode\n");
goto err_txrx_disable;
}
@@ -92,7 +92,7 @@ err_tx_disable:
return ret;
}
-static void s32_gmac_exit(struct platform_device *pdev, void *priv)
+static void s32_gmac_exit(struct device *dev, void *priv)
{
struct s32_priv_data *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 49d651948e2b..a2b52d2c4eb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -551,7 +551,7 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
-static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+static int socfpga_dwmac_init(struct device *dev, void *bsp_priv)
{
struct socfpga_dwmac *dwmac = bsp_priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
index 3b7947a7a7ba..44d4ceb8415f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -7,11 +7,16 @@
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "stmmac_platform.h"
+struct sophgo_dwmac_data {
+ bool has_internal_rx_delay;
+};
+
static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
@@ -24,7 +29,6 @@ static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
- plat_dat->unicast_filter_entries = 1;
return 0;
}
@@ -32,6 +36,7 @@ static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
static int sophgo_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
+ const struct sophgo_dwmac_data *data;
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
int ret;
@@ -50,11 +55,23 @@ static int sophgo_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
+ data = device_get_match_data(&pdev->dev);
+ if (data && data->has_internal_rx_delay) {
+ plat_dat->phy_interface = phy_fix_phy_mode_for_mac_delays(plat_dat->phy_interface,
+ false, true);
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_NA)
+ return -EINVAL;
+ }
+
return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
}
+static const struct sophgo_dwmac_data sg2042_dwmac_data = {
+ .has_internal_rx_delay = true,
+};
+
static const struct of_device_id sophgo_dwmac_match[] = {
- { .compatible = "sophgo,sg2042-dwmac" },
+ { .compatible = "sophgo,sg2042-dwmac", .data = &sg2042_dwmac_data },
{ .compatible = "sophgo,sg2044-dwmac" },
{ /* sentinel */ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index b0509ab6b31c..f50547b67fbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -229,14 +229,14 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
-static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+static int sti_dwmac_init(struct device *dev, void *bsp_priv)
{
struct sti_dwmac *dwmac = bsp_priv;
return clk_prepare_enable(dwmac->clk);
}
-static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv)
+static void sti_dwmac_exit(struct device *dev, void *bsp_priv)
{
struct sti_dwmac *dwmac = bsp_priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 7434d4bbb526..8aa496ac85cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -571,16 +571,16 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
-static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+static int sun8i_dwmac_init(struct device *dev, void *priv)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct sunxi_priv_data *gmac = priv;
int ret;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
if (ret) {
- dev_err(&pdev->dev, "Fail to enable regulator\n");
+ dev_err(dev, "Fail to enable regulator\n");
return ret;
}
}
@@ -1005,7 +1005,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
(H3_EPHY_SHUTDOWN | H3_EPHY_SELECT));
}
-static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+static void sun8i_dwmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
@@ -1265,7 +1265,7 @@ static void sun8i_dwmac_shutdown(struct platform_device *pdev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- sun8i_dwmac_exit(pdev, gmac);
+ sun8i_dwmac_exit(&pdev->dev, gmac);
}
static const struct of_device_id sun8i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 7f560d78209d..52593ba3a3a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -27,7 +27,7 @@ struct sunxi_priv_data {
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
-static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+static int sun7i_gmac_init(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
int ret = 0;
@@ -58,7 +58,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+static void sun7i_gmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index a3378046b061..e291028ba56e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -186,7 +186,7 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
return 0;
}
-static int thead_dwmac_init(struct platform_device *pdev, void *priv)
+static int thead_dwmac_init(struct device *dev, void *priv)
{
struct thead_dwmac *dwmac = priv;
unsigned int reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 118a22406a2e..5877fec9f6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -19,7 +19,6 @@
static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
- int i;
pr_info("dwmac1000: Master AXI performs %s burst length\n",
!(value & DMA_AXI_UNDEF) ? "fixed" : "any");
@@ -39,33 +38,10 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
@@ -159,10 +135,10 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
+ csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
- csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF);
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (mode <= 32)
csr6 |= DMA_CONTROL_RTC_32;
@@ -286,6 +262,7 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.dma_rx_mode = dwmac1000_dma_operation_mode_rx,
.dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_reception = dwmac_enable_dma_reception,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
.start_tx = dwmac_dma_start_tx,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d87a8b595e6a..7b513324cfb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -18,7 +18,6 @@
static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
- int i;
pr_info("dwmac4: Master AXI performs %s burst length\n",
(value & DMA_SYS_BUS_FB) ? "fixed" : "any");
@@ -38,33 +37,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 4f980dcd3958..f27126f05551 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -69,15 +69,8 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
-#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_AAL DMA_AXI_AAL
#define DMA_SYS_BUS_EAME BIT(11)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_SYS_BUS_FB BIT(0)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
@@ -85,8 +78,6 @@
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
-
/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 5d9c18f5bbf5..054ecb20ce3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -68,23 +68,14 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_AXI_OSR_MAX 0xf
#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
(DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
-#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_AAL BIT(12)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_UNDEF BIT(0)
+#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+#define DMA_AXI_UNDEF BIT(0)
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
@@ -178,6 +169,7 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 467f1a05747e..97a803d68e3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -33,6 +33,11 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan)
+{
+ writel(1, ioaddr + DMA_CHAN_RCV_POLL_DEMAND(chan));
+}
+
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index e48cfa05000c..fecda3034d36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -338,16 +338,9 @@
#define XGMAC_RD_OSR_LMT_SHIFT 16
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
-#define XGMAC_AAL BIT(12)
+#define XGMAC_AAL DMA_AXI_AAL
#define XGMAC_EAME BIT(11)
-#define XGMAC_BLEN GENMASK(7, 1)
-#define XGMAC_BLEN256 BIT(7)
-#define XGMAC_BLEN128 BIT(6)
-#define XGMAC_BLEN64 BIT(5)
-#define XGMAC_BLEN32 BIT(4)
-#define XGMAC_BLEN16 BIT(3)
-#define XGMAC_BLEN8 BIT(2)
-#define XGMAC_BLEN4 BIT(1)
+/* XGMAC_BLEN* are now defined as DMA_AXI_BLEN* in common.h */
#define XGMAC_UNDEF BIT(0)
#define XGMAC_TX_EDMA_CTRL 0x00003040
#define XGMAC_TDPS GENMASK(29, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 4d6bb995d8d8..cc1bdc0975d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -84,7 +84,6 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
- int i;
if (axi->axi_lpi_en)
value |= XGMAC_EN_LPI;
@@ -102,32 +101,12 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (!axi->axi_fb)
value |= XGMAC_UNDEF;
- value &= ~XGMAC_BLEN;
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= XGMAC_BLEN256;
- break;
- case 128:
- value |= XGMAC_BLEN128;
- break;
- case 64:
- value |= XGMAC_BLEN64;
- break;
- case 32:
- value |= XGMAC_BLEN32;
- break;
- case 16:
- value |= XGMAC_BLEN16;
- break;
- case 8:
- value |= XGMAC_BLEN8;
- break;
- case 4:
- value |= XGMAC_BLEN4;
- break;
- }
- }
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
+ */
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index d359722100fa..df6e8a567b1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -201,6 +201,7 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_reception)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -261,6 +262,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
#define stmmac_enable_dma_transmission(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
+#define stmmac_enable_dma_reception(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_reception, __args)
#define stmmac_enable_dma_irq(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args)
#define stmmac_disable_dma_irq(__priv, __args...) \
@@ -541,7 +544,7 @@ struct stmmac_rx_queue;
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ bool (*is_jumbo_frm)(unsigned int len, bool enh_desc);
int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
int csum);
int (*set_16kib_bfsize)(int mtu);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d218412ca832..382d94a3b972 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -91,14 +91,9 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
-
- if (len >= BUF_SIZE_4KiB)
- ret = 1;
-
- return ret;
+ return len >= BUF_SIZE_4KiB;
}
static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 0ea74c88a779..012b0a477255 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -254,8 +254,8 @@ struct stmmac_priv {
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
- int sph;
- int sph_cap;
+ bool sph_active;
+ bool sph_capable;
u32 sarc_type;
u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
@@ -408,6 +408,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev);
+
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
return !!priv->xdp_prog;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
new file mode 100644
index 000000000000..5c5dd502f79a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI bus helpers for STMMAC driver
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "stmmac_libpci.h"
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_suspend);
+
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_resume);
+
+MODULE_DESCRIPTION("STMMAC PCI helper library");
+MODULE_AUTHOR("Yao Zi <ziyao@disroot.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
new file mode 100644
index 000000000000..71553184f982
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#ifndef __STMMAC_LIBPCI_H__
+#define __STMMAC_LIBPCI_H__
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv);
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv);
+
+#endif /* __STMMAC_LIBPCI_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d202f604161e..da206b24aaed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -190,6 +190,44 @@ int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
/**
+ * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
+ * @regval: pointer to a u32 for the resulting register value
+ * @blen: pointer to an array of u32 containing the burst length values in bytes
+ * @len: the number of entries in the @blen array
+ */
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
+{
+ size_t i;
+ u32 val;
+
+ for (val = i = 0; i < len; i++) {
+ u32 burst = blen[i];
+
+ /* Burst values of zero must be skipped. */
+ if (!burst)
+ continue;
+
+ /* The valid range for the burst length is 4 to 256 inclusive,
+ * and it must be a power of two.
+ */
+ if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
+ pr_err("stmmac: invalid burst length %u at index %zu\n",
+ burst, i);
+ continue;
+ }
+
+ /* Since burst is a power of two, and the register field starts
+ * with burst = 4, shift right by two bits so bit 0 of the field
+ * corresponds with the minimum value.
+ */
+ val |= burst >> 2;
+ }
+
+ *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
+}
+EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
+
+/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
* errors.
@@ -1245,7 +1283,11 @@ static int stmmac_phylink_setup(struct stmmac_priv *priv)
/* Stmmac always requires an RX clock for hardware initialization */
config->mac_requires_rxc = true;
- if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
+ /* Disable EEE RX clock stop to ensure VLAN register access works
+ * correctly.
+ */
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
+ !(priv->dev->features & NETIF_F_VLAN_FEATURES))
config->eee_rx_clk_stop_enable = true;
/* Set the default transmit clock stop bit based on the platform glue */
@@ -1523,7 +1565,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
buf->page_offset = stmmac_rx_offset(priv);
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -2109,7 +2151,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = dma_conf->dma_buf_sz;
- if (priv->sph) {
+ if (priv->sph_active) {
pp_params.offset = 0;
pp_params.max_len += stmmac_rx_offset(priv);
}
@@ -3603,7 +3645,7 @@ static int stmmac_hw_setup(struct net_device *dev)
}
/* Enable Split Header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
for (chan = 0; chan < rx_cnt; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
@@ -4579,18 +4621,18 @@ static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int first_entry, tx_packets, enh_desc;
+ bool enh_desc, has_vlan, set_ic, is_jumbo = false;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
- int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, tx_packets;
int gso = skb_shinfo(skb)->gso_type;
struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ int i, csum_insertion = 0;
int entry, first_tx;
dma_addr_t des;
u32 sdu_len;
@@ -4895,7 +4937,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4906,7 +4948,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
+ if (priv->sph_active)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
@@ -4931,6 +4973,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ /* Wake up Rx DMA from the suspend state if required */
+ stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
}
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
@@ -4941,12 +4985,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
- if (priv->sph && len)
+ if (priv->sph_active && len)
return 0;
/* First descriptor, get split header length */
stmmac_get_rx_header_len(priv, p, &hlen);
- if (priv->sph && hlen) {
+ if (priv->sph_active && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
}
@@ -4969,7 +5013,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
unsigned int plen = 0;
/* Not split header, buffer is not available */
- if (!priv->sph)
+ if (!priv->sph_active)
return 0;
/* Not last descriptor */
@@ -5352,10 +5396,10 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -6037,8 +6081,8 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- if (priv->sph_cap) {
- bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_capable) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
u32 chan;
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
@@ -6987,7 +7031,7 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* Adjust Split header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
@@ -7489,7 +7533,8 @@ static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
}
static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
struct stmmac_priv *priv = dl_priv->stmmac_priv;
@@ -7555,19 +7600,43 @@ static void stmmac_unregister_devlink(struct stmmac_priv *priv)
devlink_free(priv->devlink);
}
-/**
- * stmmac_dvr_probe
- * @device: device pointer
- * @plat_dat: platform data pointer
- * @res: stmmac resource pointer
- * Description: this is the main probe function used to
- * call the alloc_etherdev, allocate the priv structure.
- * Return:
- * returns 0 on success, otherwise errno.
- */
-int stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *res)
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ int i;
+
+ plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
+ if (!plat_dat)
+ return NULL;
+
+ /* Set the defaults:
+ * - phy autodetection
+ * - determine GMII_Address CR field from CSR clock
+ * - allow MTU up to JUMBO_LEN
+ * - hash table size
+ * - one unicast filter entry
+ */
+ plat_dat->phy_addr = -1;
+ plat_dat->clk_csr = -1;
+ plat_dat->maxmtu = JUMBO_LEN;
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat_dat->unicast_filter_entries = 1;
+
+ /* Set the mtl defaults */
+ plat_dat->tx_queues_to_use = 1;
+ plat_dat->rx_queues_to_use = 1;
+
+ /* Setup the default RX queue channel map */
+ for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
+ plat_dat->rx_queues_cfg[i].chan = i;
+
+ return plat_dat;
+}
+EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
+
+static int __stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
@@ -7702,8 +7771,8 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.sphen &&
!(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
- priv->sph_cap = true;
- priv->sph = priv->sph_cap;
+ priv->sph_capable = true;
+ priv->sph_active = priv->sph_capable;
dev_info(priv->device, "SPH feature enabled\n");
}
@@ -7868,6 +7937,34 @@ error_wq_init:
return ret;
}
+
+/**
+ * stmmac_dvr_probe
+ * @dev: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ if (plat_dat->init) {
+ ret = plat_dat->init(dev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = __stmmac_dvr_probe(dev, plat_dat, res);
+ if (ret && plat_dat->exit)
+ plat_dat->exit(dev, plat_dat->bsp_priv);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
@@ -7906,6 +8003,9 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(dev, priv->plat->bsp_priv);
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 94b3a3b27270..270ad066ced3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
@@ -27,26 +28,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static int stmmac_default_data(struct pci_dev *pdev,
@@ -81,22 +62,12 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
/* Set default number of RX and TX queues to use */
plat->tx_queues_to_use = 4;
plat->rx_queues_to_use = 4;
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
for (i = 0; i < plat->tx_queues_to_use; i++) {
- plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
if (i > 0)
@@ -104,15 +75,10 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- plat->rx_queues_cfg[i].use_prio = false;
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- plat->rx_queues_cfg[i].chan = i;
- }
plat->bus_id = 1;
- plat->phy_addr = -1;
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
@@ -127,10 +93,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->axi->axi_rd_osr_lmt = 31;
plat->axi->axi_fb = false;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
- plat->axi->axi_blen[3] = 32;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN32;
return 0;
}
@@ -139,37 +103,6 @@ static const struct stmmac_pci_info snps_gmac5_pci_info = {
.setup = snps_gmac5_default_data,
};
-static int stmmac_pci_suspend(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int stmmac_pci_resume(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
/**
* stmmac_pci_probe
*
@@ -191,7 +124,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
int ret;
int i;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -249,8 +182,8 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 1;
plat->safety_feat_cfg->tmouten = 1;
- plat->suspend = stmmac_pci_suspend;
- plat->resume = stmmac_pci_resume;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6483d52b4c0f..8979a50b5507 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -95,6 +95,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
{
struct device_node *np;
struct stmmac_axi *axi;
+ u32 axi_blen[AXI_BLEN];
np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
if (!np)
@@ -117,7 +118,8 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_wr_osr_lmt = 1;
if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
axi->axi_rd_osr_lmt = 1;
- of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_property_read_u32_array(np, "snps,blen", axi_blen, AXI_BLEN);
+ stmmac_axi_blen_to_mask(&axi->axi_blen_regval, axi_blen, AXI_BLEN);
of_node_put(np);
return axi;
@@ -137,13 +139,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
u8 queue = 0;
int ret = 0;
- /* For backwards-compatibility with device trees that don't have any
- * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
- * to one RX and TX queues each.
- */
- plat->rx_queues_to_use = 1;
- plat->tx_queues_to_use = 1;
-
/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
* to always set this, otherwise Queue will be classified as AVB
* (because MTL_QUEUE_AVB = 0).
@@ -162,9 +157,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing RX queues common config */
- if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
- &plat->rx_queues_to_use))
- plat->rx_queues_to_use = 1;
+ of_property_read_u32(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use);
if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@@ -185,18 +179,13 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
else
plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
- if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
- &plat->rx_queues_cfg[queue].chan))
- plat->rx_queues_cfg[queue].chan = queue;
+ of_property_read_u32(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan);
/* TODO: Dynamic mapping to be included in the future */
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->rx_queues_cfg[queue].prio)) {
- plat->rx_queues_cfg[queue].prio = 0;
- plat->rx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio))
plat->rx_queues_cfg[queue].use_prio = true;
- }
/* RX queue specific packet type routing */
if (of_property_read_bool(q_node, "snps,route-avcp"))
@@ -209,8 +198,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
- else
- plat->rx_queues_cfg[queue].pkt_route = 0x0;
queue++;
}
@@ -221,9 +208,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing TX queues common config */
- if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
- &plat->tx_queues_to_use))
- plat->tx_queues_to_use = 1;
+ of_property_read_u32(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use);
if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
@@ -268,13 +254,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->tx_queues_cfg[queue].prio)) {
- plat->tx_queues_cfg[queue].prio = 0;
- plat->tx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio))
plat->tx_queues_cfg[queue].use_prio = true;
- }
plat->tx_queues_cfg[queue].coe_unsupported =
of_property_read_bool(q_node, "snps,coe-unsupported");
@@ -436,7 +418,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
void *ret;
int rc;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return ERR_PTR(-ENOMEM);
@@ -480,13 +462,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->bus_id = ++bus_id;
}
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
-
- /* Default to get clk_csr from stmmac_clk_csr_set(),
- * or get clk_csr from device tree.
- */
- plat->clk_csr = -1;
if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
of_property_read_u32(np, "clk_csr", &plat->clk_csr);
@@ -515,17 +490,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
}
- /* Set the maxmtu to a default of JUMBO_LEN in case the
- * parameter is not present in the device tree.
- */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -785,40 +749,40 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_init
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's init callback (if any) and propagate
* the return value.
*/
-static int stmmac_pltfr_init(struct platform_device *pdev,
+static int stmmac_pltfr_init(struct device *dev,
struct plat_stmmacenet_data *plat)
{
int ret = 0;
if (plat->init)
- ret = plat->init(pdev, plat->bsp_priv);
+ ret = plat->init(dev, plat->bsp_priv);
return ret;
}
/**
* stmmac_pltfr_exit
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's exit callback (if any).
*/
-static void stmmac_pltfr_exit(struct platform_device *pdev,
+static void stmmac_pltfr_exit(struct device *dev,
struct plat_stmmacenet_data *plat)
{
if (plat->exit)
- plat->exit(pdev, plat->bsp_priv);
+ plat->exit(dev, plat->bsp_priv);
}
static int stmmac_plat_suspend(struct device *dev, void *bsp_priv)
{
struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
- stmmac_pltfr_exit(to_platform_device(dev), priv->plat);
+ stmmac_pltfr_exit(dev, priv->plat);
return 0;
}
@@ -827,7 +791,7 @@ static int stmmac_plat_resume(struct device *dev, void *bsp_priv)
{
struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
- return stmmac_pltfr_init(to_platform_device(dev), priv->plat);
+ return stmmac_pltfr_init(dev, priv->plat);
}
/**
@@ -842,24 +806,12 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int ret;
-
if (!plat->suspend && plat->exit)
plat->suspend = stmmac_plat_suspend;
if (!plat->resume && plat->init)
plat->resume = stmmac_plat_resume;
- ret = stmmac_pltfr_init(pdev, plat);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat, res);
- if (ret) {
- stmmac_pltfr_exit(pdev, plat);
- return ret;
- }
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat, res);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
@@ -901,12 +853,7 @@ EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe);
*/
void stmmac_pltfr_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct plat_stmmacenet_data *plat = priv->plat;
-
stmmac_dvr_remove(&pdev->dev);
- stmmac_pltfr_exit(pdev, plat);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index a01bc394d1ac..e90a2c469b9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1721,7 +1721,7 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
struct stmmac_packet_attrs attr = { };
int ret;
- if (!priv->sph)
+ if (!priv->sph_active)
return -EOPNOTSUPP;
/* Check for UDP first */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
index ff02a79c00d4..b18404dd5a8b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -122,7 +122,8 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
/* Extended Rx VLAN Filter Enable */
for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) &&
+ ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) {
ret = vlan_write_filter(dev, hw, i, 0);
if (!ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index aa6f16d3df64..d7e4db7224b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
bpf_prog_put(old_prog);
/* Disable RX SPH for XDP operation */
- priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
+ priv->sph_active = priv->sph_capable && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
stmmac_xdp_open(dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index d5f358ec9820..5924db6be3fe 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -3068,7 +3068,8 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
}
static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *common = dl_priv->common;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 8b9e2078c602..ab88d4c02cbd 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1618,7 +1618,8 @@ static const struct devlink_ops cpsw_devlink_ops = {
};
static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1753,7 +1754,8 @@ exit:
}
static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 0eed29d6187a..090aa74d3ce7 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -93,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
}
EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
+static int emac_xsk_xmit_zc(struct prueth_emac *emac,
+ unsigned int q_idx)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
+ struct xsk_buff_pool *pool = tx_chn->xsk_pool;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *host_desc;
+ dma_addr_t dma_desc, dma_buf;
+ struct prueth_swdata *swdata;
+ struct xdp_desc xdp_desc;
+ int num_tx = 0, pkt_len;
+ int descs_avail, ret;
+ u32 *epib;
+ int i;
+
+ descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (descs_avail <= MAX_SKB_FRAGS)
+ return 0;
+
+ descs_avail -= MAX_SKB_FRAGS;
+
+ for (i = 0; i < descs_avail; i++) {
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ pkt_len = xdp_desc.len;
+ xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc))
+ break;
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, 0);
+ epib = host_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
+ (emac->port_id | (q_idx << 8)));
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
+ pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ swdata->type = PRUETH_SWDATA_XSK;
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ host_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
+ host_desc, dma_desc);
+
+ if (ret) {
+ ndev->stats.tx_errors++;
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ break;
+ }
+
+ num_tx++;
+ }
+
+ xsk_tx_release(tx_chn->xsk_pool);
+ return num_tx;
+}
+
void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (swdata->type == PRUETH_SWDATA_XSK)
+ goto free_pool;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -126,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_pool:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -139,7 +216,9 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ int xsk_frames_done = 0;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -176,6 +255,11 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
total_bytes += xdpf->len;
xdp_return_frame(xdpf);
break;
+ case PRUETH_SWDATA_XSK:
+ pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ xsk_frames_done++;
+ break;
default:
prueth_xmit_free(tx_chn, desc_tx);
ndev->stats.tx_dropped++;
@@ -204,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
__netif_tx_unlock(netif_txq);
}
+ if (tx_chn->xsk_pool) {
+ if (xsk_frames_done)
+ xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ txq_trans_cond_update(netif_txq);
+ emac_xsk_xmit_zc(emac, chn);
+ }
+
return num_tx;
}
@@ -212,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
struct prueth_tx_chn *tx_chns =
container_of(timer, struct prueth_tx_chn, tx_hrtimer);
- enable_irq(tx_chns->irq);
+ if (tx_chns->irq_disabled) {
+ tx_chns->irq_disabled = false;
+ enable_irq(tx_chns->irq);
+ }
return HRTIMER_NORESTART;
}
@@ -235,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
ns_to_ktime(tx_chn->tx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(tx_chn->irq);
+ if (tx_chn->irq_disabled) {
+ tx_chn->irq_disabled = false;
+ enable_irq(tx_chn->irq);
+ }
}
}
@@ -246,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
{
struct prueth_tx_chn *tx_chn = dev_id;
+ tx_chn->irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&tx_chn->napi_tx);
@@ -362,6 +465,29 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
@@ -371,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct device *dev = emac->prueth->dev;
struct net_device *ndev = emac->ndev;
u32 fdqring_id, hdesc_size;
+ struct page_pool *pool;
int i, ret = 0, slice;
int flow_id_base;
@@ -413,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
goto fail;
}
+ pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto fail;
+ }
+
+ rx_chn->pg_pool = pool;
+
flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
emac->rx_mgm_flow_id_base = flow_id_base;
@@ -544,15 +679,15 @@ void emac_rx_timestamp(struct prueth_emac *emac,
* emac_xmit_xdp_frame - transmits an XDP frame
* @emac: emac device
* @xdpf: data to transmit
- * @page: page from page pool if already DMA mapped
* @q_idx: queue id
+ * @buff_type: Type of buffer to be transmitted
*
* Return: XDP state
*/
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
- struct page *page,
- unsigned int q_idx)
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type)
{
struct cppi5_host_desc_t *first_desc;
struct net_device *ndev = emac->ndev;
@@ -560,6 +695,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
struct prueth_swdata *swdata;
+ struct page *page;
u32 *epib;
int ret;
@@ -576,7 +712,12 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
return ICSSG_XDP_CONSUMED; /* drop */
}
- if (page) { /* already DMA mapped by page_pool */
+ if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
+ page = virt_to_head_page(xdpf->data);
+ if (unlikely(!page)) {
+ netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
+ goto drop_free_descs;
+ }
buf_dma = page_pool_get_dma_addr(page);
buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
} else { /* Map the linear buffer */
@@ -631,13 +772,11 @@ EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
* emac_run_xdp - run an XDP program
* @emac: emac device
* @xdp: XDP buffer containing the frame
- * @page: page with RX data if already DMA mapped
* @len: Rx descriptor packet length
*
* Return: XDP state
*/
-static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
- struct page *page, u32 *len)
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
{
struct net_device *ndev = emac->ndev;
struct netdev_queue *netif_txq;
@@ -664,7 +803,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
q_idx = cpu % emac->tx_ch_num;
netif_txq = netdev_get_tx_queue(ndev, q_idx);
__netif_tx_lock(netif_txq, cpu);
- result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_TX);
__netif_tx_unlock(netif_txq);
if (result == ICSSG_XDP_CONSUMED) {
ndev->stats.tx_dropped++;
@@ -689,11 +829,188 @@ drop:
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ndev->stats.rx_dropped++;
- page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
return ICSSG_XDP_CONSUMED;
}
}
+static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct xdp_buff *xdp)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ int buf_len;
+
+ buf_dma = xsk_buff_xdp_get_dma(xdp);
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ swdata->type = PRUETH_SWDATA_XSK;
+ swdata->data.xdp = xdp;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
+ desc_rx, desc_dma);
+}
+
+static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ struct xdp_buff *xdp;
+ int i, ret;
+
+ for (i = 0; i < budget; i++) {
+ xdp = xsk_buff_alloc(rx_chn->xsk_pool);
+ if (!xdp)
+ break;
+
+ ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp);
+ if (ret) {
+ netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n");
+ xsk_buff_free(xdp);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata)
+{
+ unsigned int headroom = xdp->data - xdp->data_hard_start;
+ unsigned int pkt_len = xdp->data_end - xdp->data;
+ struct net_device *ndev = emac->ndev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+}
+
+static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id,
+ int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma, buf_dma;
+ struct xdp_buff *xdp;
+ int xdp_status = 0;
+ int count = 0;
+ u32 *psdata;
+ int ret;
+
+ while (count < budget) {
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ break;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
+ break;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type != PRUETH_SWDATA_XSK) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ break;
+ }
+
+ xdp = swdata->data.xdp;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ count++;
+ xsk_buff_set_size(xdp, pkt_len);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (prueth_xdp_is_enabled(emac)) {
+ ret = emac_run_xdp(emac, xdp, &pkt_len);
+ switch (ret) {
+ case ICSSG_XDP_PASS:
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_CONSUMED:
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_TX:
+ case ICSSG_XDP_REDIR:
+ xdp_status |= ret;
+ break;
+ }
+ } else {
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ }
+ }
+
+ if (xdp_status & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
+ /* Allocate xsk buffers from the pool for the "count" number of
+ * packets processed in order to be able to receive more packets.
+ */
+ ret = prueth_rx_alloc_zc(emac, count);
+
+ if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) {
+ /* If the user space doesn't provide enough buffers then it must
+ * explicitly wake up the kernel when new buffers are available
+ */
+ if (ret < count)
+ xsk_set_rx_need_wakeup(rx_chn->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_chn->xsk_pool);
+ }
+
+ return count;
+}
+
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
@@ -719,8 +1036,10 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
return ret;
}
- if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
return 0;
+ }
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
@@ -738,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
/* firmware adds 4 CRC bytes, strip them */
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
/* if allocation fails we drop the packet but push the
@@ -752,11 +1070,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
}
pa = page_address(page);
- if (emac->xdp_prog) {
+ if (prueth_xdp_is_enabled(emac)) {
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
- *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
if (*xdp_state != ICSSG_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
@@ -804,24 +1122,29 @@ requeue:
return ret;
}
-static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
struct prueth_swdata *swdata;
struct page_pool *pool;
+ struct xdp_buff *xdp;
struct page *page;
pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- if (swdata->type == PRUETH_SWDATA_PAGE) {
+ if (rx_chn->xsk_pool) {
+ xdp = swdata->data.xdp;
+ xsk_buff_free(xdp);
+ } else {
page = swdata->data.page;
page_pool_recycle_direct(pool, page);
}
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
}
+EXPORT_SYMBOL_GPL(prueth_rx_cleanup);
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
{
@@ -1025,10 +1348,11 @@ drop_stop_q_busy:
}
EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
-static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct xsk_buff_pool *xsk_pool;
struct prueth_swdata *swdata;
struct xdp_frame *xdpf;
struct sk_buff *skb;
@@ -1045,17 +1369,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
xdpf = swdata->data.xdpf;
xdp_return_frame(xdpf);
break;
+ case PRUETH_SWDATA_XSK:
+ xsk_pool = tx_chn->xsk_pool;
+ xsk_tx_completed(xsk_pool, 1);
+ break;
default:
break;
}
prueth_xmit_free(tx_chn, desc_tx);
}
+EXPORT_SYMBOL_GPL(prueth_tx_cleanup);
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
+ emac->rx_chns.irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&emac->napi_rx);
@@ -1083,6 +1413,7 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
@@ -1090,14 +1421,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
int ret;
while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = emac_rx_packet(emac, flow, &xdp_state);
- xdp_state_or |= xdp_state;
- if (ret)
- break;
- num_rx++;
+ if (rx_chn->xsk_pool) {
+ num_rx = emac_rx_packet_zc(emac, flow, budget);
+ } else {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
+ break;
+ num_rx++;
+ }
}
if (num_rx >= budget)
@@ -1113,7 +1448,11 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
ns_to_ktime(emac->rx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
}
}
@@ -1121,62 +1460,48 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
-static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
- struct device *dma_dev,
- int size)
-{
- struct page_pool_params pp_params = { 0 };
- struct page_pool *pool;
-
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = size;
- pp_params.nid = dev_to_node(emac->prueth->dev);
- pp_params.dma_dir = DMA_BIDIRECTIONAL;
- pp_params.dev = dma_dev;
- pp_params.napi = &emac->napi_rx;
- pp_params.max_len = PAGE_SIZE;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- netdev_err(emac->ndev, "cannot create rx page pool\n");
-
- return pool;
-}
-
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct page_pool *pool;
struct page *page;
+ int desc_avail;
int i, ret;
- pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
-
- chn->pg_pool = pool;
+ desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool);
+ if (desc_avail < chn->descs_num)
+ netdev_warn(emac->ndev,
+ "not enough RX descriptors available %d < %d\n",
+ desc_avail, chn->descs_num);
- for (i = 0; i < chn->descs_num; i++) {
- /* NOTE: we're not using memory efficiently here.
- * 1 full page (4KB?) used here instead of
- * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ if (chn->xsk_pool) {
+ /* get pages from xsk_pool and push to RX ring
+ * queue as much as possible
*/
- page = page_pool_dev_alloc_pages(pool);
- if (!page) {
- netdev_err(emac->ndev, "couldn't allocate rx page\n");
- ret = -ENOMEM;
+ ret = prueth_rx_alloc_zc(emac, desc_avail);
+ if (!ret)
goto recycle_alloc_pg;
- }
+ } else {
+ for (i = 0; i < desc_avail; i++) {
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(chn->pg_pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
- ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
- if (ret < 0) {
- netdev_err(emac->ndev,
- "cannot submit page for rx chan %s ret %d\n",
- chn->name, ret);
- page_pool_recycle_direct(pool, page);
- goto recycle_alloc_pg;
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit page for rx chan %s ret %d\n",
+ chn->name, ret);
+ page_pool_recycle_direct(chn->pg_pool, page);
+ goto recycle_alloc_pg;
+ }
}
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 57a7d1ceab08..f65041662173 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -47,6 +47,9 @@
NETIF_F_HW_HSR_TAG_INS | \
NETIF_F_HW_HSR_TAG_RM)
+#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
+ DMA_ATTR_WEAK_ORDERING)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -392,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
return HRTIMER_NORESTART;
}
@@ -566,31 +573,41 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+}
+
static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
struct page_pool *pool = emac->rx_chns.pg_pool;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int ret;
ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
if (ret)
return ret;
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- xdp_rxq_info_unreg(rxq);
-
- return ret;
-}
-
-static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
-{
- struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ if (rx_chn->xsk_pool) {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (ret)
+ goto xdp_unreg;
+ xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
+ } else {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto xdp_unreg;
+ }
- if (!xdp_rxq_info_is_reg(rxq))
- return;
+ return 0;
- xdp_rxq_info_unreg(rxq);
+xdp_unreg:
+ prueth_destroy_xdp_rxqs(emac);
+ return ret;
}
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
@@ -735,6 +752,128 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
return 0;
}
+static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != queue_id) {
+ rx_chn->xsk_pool = NULL;
+ tx_chn->xsk_pool = NULL;
+ } else {
+ rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ }
+}
+
+static void prueth_destroy_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "tx teardown timeout\n");
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_destroy_rxq(struct prueth_emac *emac)
+{
+ int i, ret;
+
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ /* When RX DMA Channel Teardown is initiated, it will result in an
+ * interrupt and a Teardown Completion Marker (TDCM) is queued into
+ * the RX Completion queue. Acknowledging the interrupt involves
+ * popping the TDCM descriptor from the RX Completion queue via the
+ * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
+ * be popped, schedule the RX NAPI handler to run immediately.
+ */
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (napi_schedule_prep(&emac->napi_rx))
+ __napi_schedule(&emac->napi_rx);
+ }
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "rx teardown timeout\n");
+
+ for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
+ napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
+ &emac->rx_chns,
+ prueth_rx_cleanup);
+ }
+
+ prueth_destroy_xdp_rxqs(emac);
+ k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
+}
+
+static int prueth_create_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ }
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+ return ret;
+}
+
+static int prueth_create_rxq(struct prueth_emac *emac)
+{
+ int ret;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ return ret;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = prueth_create_xdp_rxqs(emac);
+ if (ret)
+ goto reset_rx_chn;
+
+ napi_enable(&emac->napi_rx);
+ return 0;
+
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+ return ret;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -746,7 +885,7 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- int ret, i, num_data_chn = emac->tx_ch_num;
+ int ret, num_data_chn = emac->tx_ch_num;
struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
@@ -767,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
+ emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -819,28 +959,13 @@ static int emac_ndo_open(struct net_device *ndev)
goto stop;
/* Prepare RX */
- ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ ret = prueth_create_rxq(emac);
if (ret)
goto free_tx_ts_irq;
- ret = prueth_create_xdp_rxqs(emac);
- if (ret)
- goto reset_rx_chn;
-
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_txq(emac);
if (ret)
- goto destroy_xdp_rxqs;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
- if (ret)
- goto reset_tx_chan;
- }
-
- /* Enable NAPI in Tx and Rx direction */
- for (i = 0; i < emac->tx_ch_num; i++)
- napi_enable(&emac->tx_chns[i].napi_tx);
- napi_enable(&emac->napi_rx);
+ goto destroy_rxq;
/* start PHY */
phy_start(ndev->phydev);
@@ -851,15 +976,8 @@ static int emac_ndo_open(struct net_device *ndev)
return 0;
-reset_tx_chan:
- /* Since interface is not yet up, there is wouldn't be
- * any SKB for completion. So set false to free_skb
- */
- prueth_reset_tx_chan(emac, i, false);
-destroy_xdp_rxqs:
- prueth_destroy_xdp_rxqs(emac);
-reset_rx_chn:
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+destroy_rxq:
+ prueth_destroy_rxq(emac);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
@@ -889,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
- int rx_flow = PRUETH_RX_FLOW_DATA;
- int max_rx_flows;
- int ret, i;
/* inform the upper layers. */
netif_tx_stop_all_queues(ndev);
@@ -905,32 +1020,8 @@ static int emac_ndo_stop(struct net_device *ndev)
else
__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
- atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- /* tear down and disable UDMA channels */
- reinit_completion(&emac->tdown_complete);
- for (i = 0; i < emac->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
-
- ret = wait_for_completion_timeout(&emac->tdown_complete,
- msecs_to_jiffies(1000));
- if (!ret)
- netdev_err(ndev, "tx teardown timeout\n");
-
- prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++) {
- napi_disable(&emac->tx_chns[i].napi_tx);
- hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
- }
-
- max_rx_flows = PRUETH_MAX_RX_FLOWS;
- k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
-
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
- prueth_destroy_xdp_rxqs(emac);
- napi_disable(&emac->napi_rx);
- hrtimer_cancel(&emac->rx_hrtimer);
+ prueth_destroy_txq(emac);
+ prueth_destroy_rxq(emac);
cancel_work_sync(&emac->rx_mode_work);
@@ -943,10 +1034,10 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->tx_ts_irq, emac);
- free_irq(emac->rx_chns.irq[rx_flow], emac);
+ free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
prueth_cleanup_tx_chns(emac);
prueth->emacs_initialized--;
@@ -1108,7 +1199,8 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
__netif_tx_lock(netif_txq, cpu);
for (i = 0; i < n; i++) {
xdpf = frames[i];
- err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ err = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO);
if (err != ICSSG_XDP_TX) {
ndev->stats.tx_dropped++;
break;
@@ -1141,6 +1233,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
return 0;
}
+static int prueth_xsk_pool_enable(struct prueth_emac *emac,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 frame_size;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < PRUETH_MAX_PKT_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
+ return ret;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ emac->xsk_qid = queue_id;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (emac->xsk_qid != queue_id) {
+ netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
+ return -EINVAL;
+ }
+
+ pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ if (!pool) {
+ netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
+ emac->xsk_qid = -EINVAL;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* emac_ndo_bpf - implements ndo_bpf for icssg_prueth
* @ndev: network adapter device
@@ -1155,11 +1350,58 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return emac_xdp_setup(emac, bpf);
+ case XDP_SETUP_XSK_POOL:
+ return bpf->xsk.pool ?
+ prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
+ prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
default:
return -EINVAL;
}
}
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != qid) {
+ netdev_err(ndev, "XSK queue %d not registered\n", qid);
+ return -EINVAL;
+ }
+
+ if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
+ netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!tx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!rx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (flags & XDP_WAKEUP_TX) {
+ if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) {
+ if (likely(napi_schedule_prep(&tx_chn->napi_tx)))
+ __napi_schedule(&tx_chn->napi_tx);
+ }
+ }
+
+ if (flags & XDP_WAKEUP_RX) {
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (likely(napi_schedule_prep(&emac->napi_rx)))
+ __napi_schedule(&emac->napi_rx);
+ }
+ }
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1178,6 +1420,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_xdp_xmit = emac_xdp_xmit,
.ndo_hwtstamp_get = icssg_ndo_get_ts_config,
.ndo_hwtstamp_set = icssg_ndo_set_ts_config,
+ .ndo_xsk_wakeup = prueth_xsk_wakeup,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1311,7 +1554,8 @@ static int prueth_netdev_init(struct prueth *prueth,
xdp_set_features_flag(ndev,
NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT);
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f0fa9688d9a0..10eadd356650 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -38,6 +38,8 @@
#include <net/devlink.h>
#include <net/xdp.h>
#include <net/page_pool/helpers.h>
+#include <net/xsk_buff_pool.h>
+#include <net/xdp_sock_drv.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -126,6 +128,8 @@ struct prueth_tx_chn {
char name[32];
struct hrtimer tx_hrtimer;
unsigned long tx_pace_timeout_ns;
+ struct xsk_buff_pool *xsk_pool;
+ bool irq_disabled;
};
struct prueth_rx_chn {
@@ -138,6 +142,8 @@ struct prueth_rx_chn {
char name[32];
struct page_pool *pg_pool;
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
+ bool irq_disabled;
};
enum prueth_swdata_type {
@@ -146,6 +152,12 @@ enum prueth_swdata_type {
PRUETH_SWDATA_PAGE,
PRUETH_SWDATA_CMD,
PRUETH_SWDATA_XDPF,
+ PRUETH_SWDATA_XSK,
+};
+
+enum prueth_tx_buff_type {
+ PRUETH_TX_BUFF_TYPE_XDP_TX,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO,
};
struct prueth_swdata {
@@ -155,6 +167,7 @@ struct prueth_swdata {
struct page *page;
u32 cmd;
struct xdp_frame *xdpf;
+ struct xdp_buff *xdp;
} data;
};
@@ -241,6 +254,7 @@ struct prueth_emac {
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
struct bpf_prog *xdp_prog;
struct xdp_attachment_info xdpi;
+ int xsk_qid;
};
/* The buf includes headroom compatible with both skb and xdpf */
@@ -499,7 +513,14 @@ void prueth_put_cores(struct prueth *prueth, int slice);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
- struct page *page,
- unsigned int q_idx);
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type);
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma);
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma);
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);
+static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac)
+{
+ return !!READ_ONCE(emac->xdp_prog);
+}
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 4f6cc6cd1f03..8f46e9be76b1 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2657,7 +2657,7 @@ static int gbe_hwtstamp_set(void *intf_priv, struct kernel_hwtstamp_config *cfg,
phy = gbe_intf->slave->phy;
if (phy_has_hwtstamp(phy))
- return phy->mii_ts->hwtstamp(phy->mii_ts, cfg, extack);
+ return phy->mii_ts->hwtstamp_set(phy->mii_ts, cfg, extack);
switch (cfg->tx_type) {
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 5ee8e8980393..591866fc9055 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -260,6 +260,7 @@ void gelic_card_down(struct gelic_card *card)
if (atomic_dec_if_positive(&card->users) == 0) {
pr_debug("%s: real do\n", __func__);
napi_disable(&card->napi);
+ timer_delete_sync(&card->rx_oom_timer);
/*
* Disable irq. Wireless interrupts will
* be disabled later if any
@@ -970,7 +971,8 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
* gelic_card_decode_one_descr - processes an rx descriptor
* @card: card structure
*
- * returns 1 if a packet has been sent to the stack, otherwise 0
+ * returns 1 if a packet has been sent to the stack, -ENOMEM on skb alloc
+ * failure, otherwise 0
*
* processes an rx descriptor by iommu-unmapping the data buffer and passing
* the packet up to the stack
@@ -981,16 +983,18 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
struct gelic_descr_chain *chain = &card->rx_chain;
struct gelic_descr *descr = chain->head;
struct net_device *netdev = NULL;
- int dmac_chain_ended;
+ int dmac_chain_ended = 0;
+ int prepare_rx_ret;
status = gelic_descr_get_status(descr);
if (status == GELIC_DESCR_DMA_CARDOWNED)
return 0;
- if (status == GELIC_DESCR_DMA_NOT_IN_USE) {
+ if (status == GELIC_DESCR_DMA_NOT_IN_USE || !descr->skb) {
dev_dbg(ctodev(card), "dormant descr? %p\n", descr);
- return 0;
+ dmac_chain_ended = 1;
+ goto refill;
}
/* netdevice select */
@@ -1048,9 +1052,10 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
refill:
/* is the current descriptor terminated with next_descr == NULL? */
- dmac_chain_ended =
- be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
- GELIC_DESCR_RX_DMA_CHAIN_END;
+ if (!dmac_chain_ended)
+ dmac_chain_ended =
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
+ GELIC_DESCR_RX_DMA_CHAIN_END;
/*
* So that always DMAC can see the end
* of the descriptor chain to avoid
@@ -1062,10 +1067,11 @@ refill:
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
/*
- * this call can fail, but for now, just leave this
- * descriptor without skb
+ * this call can fail, propagate the error
*/
- gelic_descr_prepare_rx(card, descr);
+ prepare_rx_ret = gelic_descr_prepare_rx(card, descr);
+ if (prepare_rx_ret)
+ return prepare_rx_ret;
chain->tail = descr;
chain->head = descr->next;
@@ -1087,6 +1093,13 @@ refill:
return 1;
}
+static void gelic_rx_oom_timer(struct timer_list *t)
+{
+ struct gelic_card *card = timer_container_of(card, t, rx_oom_timer);
+
+ napi_schedule(&card->napi);
+}
+
/**
* gelic_net_poll - NAPI poll function called by the stack to return packets
* @napi: napi structure
@@ -1099,14 +1112,22 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
{
struct gelic_card *card = container_of(napi, struct gelic_card, napi);
int packets_done = 0;
+ int work_result = 0;
while (packets_done < budget) {
- if (!gelic_card_decode_one_descr(card))
+ work_result = gelic_card_decode_one_descr(card);
+ if (work_result != 1)
break;
packets_done++;
}
+ if (work_result == -ENOMEM) {
+ napi_complete_done(napi, packets_done);
+ mod_timer(&card->rx_oom_timer, jiffies + 1);
+ return packets_done;
+ }
+
if (packets_done < budget) {
napi_complete_done(napi, packets_done);
gelic_card_rx_irq_on(card);
@@ -1576,6 +1597,8 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
mutex_init(&card->updown_lock);
atomic_set(&card->users, 0);
+ timer_setup(&card->rx_oom_timer, gelic_rx_oom_timer, 0);
+
return card;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index f7d7931e51b7..c10f1984a5a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -268,6 +268,7 @@ struct gelic_vlan_id {
struct gelic_card {
struct napi_struct napi;
struct net_device *netdev[GELIC_PORT_MAX];
+ struct timer_list rx_oom_timer;
/*
* hypervisor requires irq_status should be
* 8 bytes aligned, but u64 member is
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index 9aa3964187e1..f362e51c73ee 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -240,9 +240,6 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
@@ -261,9 +258,6 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
@@ -273,9 +267,6 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return;
-
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
@@ -285,9 +276,6 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b1a6ef5709a9..29e5c5470c94 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1249,7 +1249,7 @@ enum wx_pf_flags {
WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
WX_FLAG_PTP_PPS_ENABLED,
WX_FLAG_NEED_LINK_CONFIG,
- WX_FLAG_NEED_SFP_RESET,
+ WX_FLAG_NEED_MODULE_RESET,
WX_FLAG_NEED_UPDATE_LINK,
WX_FLAG_NEED_DO_RESET,
WX_FLAG_RX_MERGE_ENABLED,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
index 35eebdb07761..62d7f47d4f8d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -17,10 +17,15 @@
void txgbe_gpio_init_aml(struct wx *wx)
{
- u32 status;
+ u32 status, mod_rst;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_rst = TXGBE_GPIOBIT_4;
+ else
+ mod_rst = TXGBE_GPIOBIT_2;
- wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2);
- wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2);
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, mod_rst);
+ wr32(wx, WX_GPIO_INTEN, mod_rst);
status = rd32(wx, WX_GPIO_INTSTATUS);
for (int i = 0; i < 6; i++) {
@@ -33,13 +38,18 @@ irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data)
{
struct txgbe *txgbe = data;
struct wx *wx = txgbe->wx;
- u32 status;
+ u32 status, mod_rst;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_rst = TXGBE_GPIOBIT_4;
+ else
+ mod_rst = TXGBE_GPIOBIT_2;
wr32(wx, WX_GPIO_INTMASK, 0xFF);
status = rd32(wx, WX_GPIO_INTSTATUS);
- if (status & TXGBE_GPIOBIT_2) {
- set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
- wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2);
+ if (status & mod_rst) {
+ set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+ wr32(wx, WX_GPIO_EOI, mod_rst);
wx_service_event_schedule(wx);
}
@@ -51,7 +61,7 @@ int txgbe_test_hostif(struct wx *wx)
{
struct txgbe_hic_ephy_getlink buffer;
- if (wx->mac.type != wx_mac_aml)
+ if (wx->mac.type == wx_mac_sp)
return 0;
buffer.hdr.cmd = FW_PHY_GET_LINK_CMD;
@@ -63,15 +73,49 @@ int txgbe_test_hostif(struct wx *wx)
WX_HI_COMMAND_TIMEOUT, true);
}
-static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer)
+int txgbe_read_eeprom_hostif(struct wx *wx,
+ struct txgbe_hic_i2c_read *buffer,
+ u32 length, u8 *data)
{
- buffer->hdr.cmd = FW_READ_SFP_INFO_CMD;
+ u32 dword_len, offset, value, i;
+ int err;
+
+ buffer->hdr.cmd = FW_READ_EEPROM_CMD;
buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) -
sizeof(struct wx_hic_hdr);
buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ err = wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_i2c_read),
+ WX_HI_COMMAND_TIMEOUT, false);
+ if (err != 0)
+ return err;
+
+ /* buffer length offset to read return data */
+ offset = sizeof(struct txgbe_hic_i2c_read) >> 2;
+ dword_len = round_up(length, 4) >> 2;
+
+ for (i = 0; i < dword_len; i++) {
+ value = rd32a(wx, WX_FW2SW_MBOX, i + offset);
+ le32_to_cpus(&value);
+
+ memcpy(data, &value, 4);
+ data += 4;
+ }
+
+ return 0;
+}
+
+static int txgbe_identify_module_hostif(struct wx *wx,
+ struct txgbe_hic_get_module_info *buffer)
+{
+ buffer->hdr.cmd = FW_GET_MODULE_INFO_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_get_module_info) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
return wx_host_interface_command(wx, (u32 *)buffer,
- sizeof(struct txgbe_hic_i2c_read),
+ sizeof(struct txgbe_hic_get_module_info),
WX_HI_COMMAND_TIMEOUT, true);
}
@@ -85,6 +129,9 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
switch (speed) {
+ case SPEED_40000:
+ buffer.speed = TXGBE_LINK_SPEED_40GB_FULL;
+ break;
case SPEED_25000:
buffer.speed = TXGBE_LINK_SPEED_25GB_FULL;
break;
@@ -104,17 +151,21 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
WX_HI_COMMAND_TIMEOUT, true);
}
-static void txgbe_get_link_capabilities(struct wx *wx, int *speed, int *duplex)
+static void txgbe_get_link_capabilities(struct wx *wx, int *speed,
+ int *autoneg, int *duplex)
{
struct txgbe *txgbe = wx->priv;
- if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces))
+ if (test_bit(PHY_INTERFACE_MODE_XLGMII, txgbe->link_interfaces))
+ *speed = SPEED_40000;
+ else if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->link_interfaces))
*speed = SPEED_25000;
- else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces))
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->link_interfaces))
*speed = SPEED_10000;
else
*speed = SPEED_UNKNOWN;
+ *autoneg = phylink_test(txgbe->advertising, Autoneg);
*duplex = *speed == SPEED_UNKNOWN ? DUPLEX_HALF : DUPLEX_FULL;
}
@@ -125,6 +176,8 @@ static void txgbe_get_mac_link(struct wx *wx, int *speed)
status = rd32(wx, TXGBE_CFG_PORT_ST);
if (!(status & TXGBE_CFG_PORT_ST_LINK_UP))
*speed = SPEED_UNKNOWN;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_40G)
+ *speed = SPEED_40000;
else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G)
*speed = SPEED_25000;
else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G)
@@ -135,11 +188,11 @@ static void txgbe_get_mac_link(struct wx *wx, int *speed)
int txgbe_set_phy_link(struct wx *wx)
{
- int speed, duplex, err;
+ int speed, autoneg, duplex, err;
- txgbe_get_link_capabilities(wx, &speed, &duplex);
+ txgbe_get_link_capabilities(wx, &speed, &autoneg, &duplex);
- err = txgbe_set_phy_link_hostif(wx, speed, 0, duplex);
+ err = txgbe_set_phy_link_hostif(wx, speed, autoneg, duplex);
if (err) {
wx_err(wx, "Failed to setup link\n");
return err;
@@ -148,40 +201,128 @@ int txgbe_set_phy_link(struct wx *wx)
return 0;
}
-static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
+static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
DECLARE_PHY_INTERFACE_MASK(interfaces);
struct txgbe *txgbe = wx->priv;
- if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE |
- TXGBE_SFF_25GBASEER_CAPABLE |
- TXGBE_SFF_25GBASELR_CAPABLE)) {
- phylink_set(modes, 25000baseSR_Full);
+ if (id->cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ if (id->com_25g_code == TXGBE_SFF_25GBASECR_91FEC ||
+ id->com_25g_code == TXGBE_SFF_25GBASECR_74FEC ||
+ id->com_25g_code == TXGBE_SFF_25GBASECR_NOFEC) {
+ phylink_set(modes, 25000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ } else {
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ } else if (id->cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 25000baseCR_Full);
__set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ } else {
+ if (id->com_25g_code == TXGBE_SFF_25GBASESR_CAPABLE ||
+ id->com_25g_code == TXGBE_SFF_25GBASEER_CAPABLE ||
+ id->com_25g_code == TXGBE_SFF_25GBASELR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 10000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 10000baseLR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
}
- if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
- phylink_set(modes, 10000baseSR_Full);
- __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported SFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+
+ if (!linkmode_equal(txgbe->link_support, modes)) {
+ linkmode_copy(txgbe->link_support, modes);
+ phy_interface_and(txgbe->link_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
}
- if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
- phylink_set(modes, 10000baseLR_Full);
+
+ return 0;
+}
+
+static int txgbe_qsfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
__set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
}
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 40000baseSR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 40000baseLR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_RSRVD) {
+ if (id->sff_opt1 & TXGBE_SFF_ETHERNET_100G_CR4) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ phylink_set(modes, 25000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ }
if (phy_interface_empty(interfaces)) {
- wx_err(wx, "unsupported SFP module\n");
+ wx_err(wx, "unsupported QSFP module\n");
return -EINVAL;
}
phylink_set(modes, Pause);
phylink_set(modes, Asym_Pause);
phylink_set(modes, FIBRE);
- txgbe->link_port = PORT_FIBRE;
- if (!linkmode_equal(txgbe->sfp_support, modes)) {
- linkmode_copy(txgbe->sfp_support, modes);
- phy_interface_and(txgbe->sfp_interfaces,
+ if (!linkmode_equal(txgbe->link_support, modes)) {
+ linkmode_copy(txgbe->link_support, modes);
+ phy_interface_and(txgbe->link_interfaces,
wx->phylink_config.supported_interfaces,
interfaces);
linkmode_copy(txgbe->advertising, modes);
@@ -192,40 +333,53 @@ static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
return 0;
}
-int txgbe_identify_sfp(struct wx *wx)
+int txgbe_identify_module(struct wx *wx)
{
- struct txgbe_hic_i2c_read buffer;
- struct txgbe_sfp_id *id;
+ struct txgbe_hic_get_module_info buffer;
+ struct txgbe_sff_id *id;
int err = 0;
+ u32 mod_abs;
u32 gpio;
+ if (wx->mac.type == wx_mac_aml40)
+ mod_abs = TXGBE_GPIOBIT_4;
+ else
+ mod_abs = TXGBE_GPIOBIT_2;
+
gpio = rd32(wx, WX_GPIO_EXT);
- if (gpio & TXGBE_GPIOBIT_2)
+ if (gpio & mod_abs)
return -ENODEV;
- err = txgbe_identify_sfp_hostif(wx, &buffer);
+ err = txgbe_identify_module_hostif(wx, &buffer);
if (err) {
- wx_err(wx, "Failed to identify SFP module\n");
+ wx_err(wx, "Failed to identify module\n");
return err;
}
id = &buffer.id;
- if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) {
- wx_err(wx, "Invalid SFP module\n");
+ if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP_PLUS &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP28) {
+ wx_err(wx, "Invalid module\n");
return -ENODEV;
}
- return txgbe_sfp_to_linkmodes(wx, id);
+ if (id->transceiver_type == 0xFF)
+ return txgbe_sfp_to_linkmodes(wx, id);
+
+ return txgbe_qsfp_to_linkmodes(wx, id);
}
void txgbe_setup_link(struct wx *wx)
{
struct txgbe *txgbe = wx->priv;
- phy_interface_zero(txgbe->sfp_interfaces);
- linkmode_zero(txgbe->sfp_support);
+ phy_interface_zero(txgbe->link_interfaces);
+ linkmode_zero(txgbe->link_support);
- txgbe_identify_sfp(wx);
+ set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+ wx_service_event_schedule(wx);
}
static void txgbe_get_link_state(struct phylink_config *config,
@@ -278,6 +432,9 @@ static void txgbe_mac_link_up_aml(struct phylink_config *config,
txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
switch (speed) {
+ case SPEED_40000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
+ break;
case SPEED_25000:
txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
break;
@@ -342,7 +499,18 @@ int txgbe_phylink_init_aml(struct txgbe *txgbe)
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
config->get_fixed_state = txgbe_get_link_state;
- phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ if (wx->mac.type == wx_mac_aml40) {
+ config->mac_capabilities |= MAC_40000FD;
+ phy_mode = PHY_INTERFACE_MODE_XLGMII;
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, config->supported_interfaces);
+ state.speed = SPEED_40000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ state.speed = SPEED_25000;
+ state.duplex = DUPLEX_FULL;
+ }
+
__set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
@@ -350,8 +518,6 @@ int txgbe_phylink_init_aml(struct txgbe *txgbe)
if (IS_ERR(phylink))
return PTR_ERR(phylink);
- state.speed = SPEED_25000;
- state.duplex = DUPLEX_FULL;
err = phylink_set_fixed_link(phylink, &state);
if (err) {
wx_err(wx, "Failed to set fixed link\n");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
index 25d4971ca0d9..4f6df0ee860b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
@@ -7,8 +7,11 @@
void txgbe_gpio_init_aml(struct wx *wx);
irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data);
int txgbe_test_hostif(struct wx *wx);
+int txgbe_read_eeprom_hostif(struct wx *wx,
+ struct txgbe_hic_i2c_read *buffer,
+ u32 length, u8 *data);
int txgbe_set_phy_link(struct wx *wx);
-int txgbe_identify_sfp(struct wx *wx);
+int txgbe_identify_module(struct wx *wx);
void txgbe_setup_link(struct wx *wx);
int txgbe_phylink_init_aml(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index e285b088c7b2..f3cb00109529 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -10,6 +10,7 @@
#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
#include "txgbe_fdir.h"
+#include "txgbe_aml.h"
#include "txgbe_ethtool.h"
int txgbe_get_link_ksettings(struct net_device *netdev,
@@ -19,9 +20,6 @@ int txgbe_get_link_ksettings(struct net_device *netdev,
struct txgbe *txgbe = wx->priv;
int err;
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
err = wx_get_link_ksettings(netdev, cmd);
if (err)
return err;
@@ -30,8 +28,9 @@ int txgbe_get_link_ksettings(struct net_device *netdev,
return 0;
cmd->base.port = txgbe->link_port;
- cmd->base.autoneg = AUTONEG_DISABLE;
- linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support);
+ cmd->base.autoneg = phylink_test(txgbe->advertising, Autoneg) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ linkmode_copy(cmd->link_modes.supported, txgbe->link_support);
linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
return 0;
@@ -536,6 +535,34 @@ static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int
+txgbe_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe_hic_i2c_read buffer;
+ int err;
+
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return -EOPNOTSUPP;
+
+ buffer.length = cpu_to_be32(page_data->length);
+ buffer.offset = cpu_to_be32(page_data->offset);
+ buffer.page = page_data->page;
+ buffer.bank = page_data->bank;
+ buffer.i2c_address = page_data->i2c_address;
+
+ err = txgbe_read_eeprom_hostif(wx, &buffer, page_data->length,
+ page_data->data);
+ if (err) {
+ wx_err(wx, "Failed to read module EEPROM\n");
+ return err;
+ }
+
+ return page_data->length;
+}
+
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
@@ -570,6 +597,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_msglevel = wx_set_msglevel,
.get_ts_info = wx_get_ts_info,
.get_ts_stats = wx_get_ptp_stats,
+ .get_module_eeprom_by_page = txgbe_get_module_eeprom_by_page,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 3885283681ec..aa14958d439a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -23,7 +23,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
{
u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
- if (wx->mac.type == wx_mac_aml) {
+ if (wx->mac.type != wx_mac_sp) {
misc_ien |= TXGBE_PX_MISC_GPIO;
txgbe_gpio_init_aml(wx);
}
@@ -201,10 +201,7 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
- if (txgbe->wx->mac.type == wx_mac_aml40)
- return;
-
- if (txgbe->wx->mac.type == wx_mac_aml)
+ if (txgbe->wx->mac.type != wx_mac_sp)
free_irq(txgbe->gpio_irq, txgbe);
free_irq(txgbe->link_irq, txgbe);
@@ -219,9 +216,6 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- if (wx->mac.type == wx_mac_aml40)
- goto skip_sp_irq;
-
txgbe->misc.nirqs = TXGBE_IRQ_MAX;
txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index daa761e48f9d..0de051450a82 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -89,21 +89,21 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-static void txgbe_sfp_detection_subtask(struct wx *wx)
+static void txgbe_module_detection_subtask(struct wx *wx)
{
int err;
- if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags))
+ if (!test_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags))
return;
- /* wait for SFP module ready */
+ /* wait for SFF module ready */
msleep(200);
- err = txgbe_identify_sfp(wx);
+ err = txgbe_identify_module(wx);
if (err)
return;
- clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+ clear_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
}
static void txgbe_link_config_subtask(struct wx *wx)
@@ -128,7 +128,7 @@ static void txgbe_service_task(struct work_struct *work)
{
struct wx *wx = container_of(work, struct wx, service_task);
- txgbe_sfp_detection_subtask(wx);
+ txgbe_module_detection_subtask(wx);
txgbe_link_config_subtask(wx);
wx_service_event_complete(wx);
@@ -144,7 +144,6 @@ static void txgbe_init_service(struct wx *wx)
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
- u32 reg;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -155,12 +154,8 @@ static void txgbe_up_complete(struct wx *wx)
switch (wx->mac.type) {
case wx_mac_aml40:
- reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
- reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
- reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
- wr32(wx, WX_MAC_TX_CFG, reg);
- txgbe_enable_sec_tx_path(wx);
- netif_carrier_on(wx->netdev);
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
break;
case wx_mac_aml:
/* Enable TX laser */
@@ -276,7 +271,7 @@ void txgbe_down(struct wx *wx)
switch (wx->mac.type) {
case wx_mac_aml40:
- netif_carrier_off(wx->netdev);
+ phylink_stop(wx->phylink);
break;
case wx_mac_aml:
phylink_stop(wx->phylink);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 03f1b9bc604d..8ea7aa07ae4e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -579,7 +579,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
switch (wx->mac.type) {
case wx_mac_aml40:
- return 0;
case wx_mac_aml:
return txgbe_phylink_init_aml(txgbe);
case wx_mac_sp:
@@ -653,7 +652,6 @@ void txgbe_remove_phy(struct txgbe *txgbe)
{
switch (txgbe->wx->mac.type) {
case wx_mac_aml40:
- return;
case wx_mac_aml:
phylink_destroy(txgbe->wx->phylink);
return;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index b9a4ba48f5b9..82433e9cb0e3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -98,6 +98,7 @@
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0)
+#define TXGBE_CFG_PORT_ST_LINK_AML_40G BIT(2)
#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3)
#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4)
#define TXGBE_CFG_VXLAN 0x14410
@@ -317,8 +318,12 @@ void txgbe_do_reset(struct net_device *netdev);
#define TXGBE_LINK_SPEED_UNKNOWN 0
#define TXGBE_LINK_SPEED_10GB_FULL 4
#define TXGBE_LINK_SPEED_25GB_FULL 0x10
+#define TXGBE_LINK_SPEED_40GB_FULL 0x20
#define TXGBE_SFF_IDENTIFIER_SFP 0x3
+#define TXGBE_SFF_IDENTIFIER_QSFP 0xC
+#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define TXGBE_SFF_IDENTIFIER_QSFP28 0x11
#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4
@@ -331,6 +336,12 @@ void txgbe_do_reset(struct net_device *netdev);
#define TXGBE_SFF_25GBASECR_91FEC 0xB
#define TXGBE_SFF_25GBASECR_74FEC 0xC
#define TXGBE_SFF_25GBASECR_NOFEC 0xD
+#define TXGBE_SFF_ETHERNET_RSRVD BIT(7)
+#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3)
+#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2)
+#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1)
+#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0)
+#define TXGBE_SFF_ETHERNET_100G_CR4 0xB
#define TXGBE_PHY_FEC_RS BIT(0)
#define TXGBE_PHY_FEC_BASER BIT(1)
@@ -341,9 +352,10 @@ void txgbe_do_reset(struct net_device *netdev);
#define FW_PHY_GET_LINK_CMD 0xC0
#define FW_PHY_SET_LINK_CMD 0xC1
-#define FW_READ_SFP_INFO_CMD 0xC5
+#define FW_GET_MODULE_INFO_CMD 0xC5
+#define FW_READ_EEPROM_CMD 0xC6
-struct txgbe_sfp_id {
+struct txgbe_sff_id {
u8 identifier; /* A0H 0x00 */
u8 com_1g_code; /* A0H 0x06 */
u8 com_10g_code; /* A0H 0x03 */
@@ -358,9 +370,9 @@ struct txgbe_sfp_id {
u8 reserved[5];
};
-struct txgbe_hic_i2c_read {
+struct txgbe_hic_get_module_info {
struct wx_hic_hdr hdr;
- struct txgbe_sfp_id id;
+ struct txgbe_sff_id id;
};
struct txgbe_hic_ephy_setlink {
@@ -383,6 +395,16 @@ struct txgbe_hic_ephy_getlink {
u8 resv[6];
};
+struct txgbe_hic_i2c_read {
+ struct wx_hic_hdr hdr;
+ __be32 offset;
+ __be32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 resv;
+};
+
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
.name = _NAME, \
@@ -451,8 +473,8 @@ struct txgbe {
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
- DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(link_interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 link_port;
};
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 39c892e46cb0..3d47d749ef9f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1624,22 +1624,15 @@ netvsc_get_rxfh_fields(struct net_device *ndev,
return 0;
}
-static int
-netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules)
+static u32 netvsc_get_rx_ring_count(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev)
- return -ENODEV;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nvdev->num_chn;
return 0;
- }
- return -EOPNOTSUPP;
+
+ return nvdev->num_chn;
}
static int
@@ -1969,7 +1962,7 @@ static const struct ethtool_ops ethtool_ops = {
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_rxnfc = netvsc_get_rxnfc,
+ .get_rx_ring_count = netvsc_get_rx_ring_count,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d7e3ddbcab6f..dea411e132db 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -52,8 +52,8 @@ static u8 ipvlan_get_v4_hash(const void *iaddr)
{
const struct in_addr *ip4_addr = iaddr;
- return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
- IPVLAN_HASH_MASK;
+ return jhash_1word((__force u32)ip4_addr->s_addr, ipvlan_jhash_secret) &
+ IPVLAN_HASH_MASK;
}
static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 9b41d4697a40..ba7091518265 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -92,11 +92,6 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
if (fwnode_property_read_bool(child, "broken-turn-around"))
mdio->phy_ignore_ta_mask |= 1 << addr;
- fwnode_property_read_u32(child, "reset-assert-us",
- &phy->mdio.reset_assert_delay);
- fwnode_property_read_u32(child, "reset-deassert-us",
- &phy->mdio.reset_deassert_delay);
-
/* Associate the fwnode with the device structure so it
* can be looked up later
*/
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 1357348e01d5..b8d298c04d3f 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -63,14 +63,11 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
/* Associate the OF node with the device structure so it
* can be looked up later.
*/
- fwnode_handle_get(fwnode);
- device_set_node(&mdiodev->dev, fwnode);
+ device_set_node(&mdiodev->dev, fwnode_handle_get(fwnode));
/* All data is now stored in the mdiodev struct; register it. */
rc = mdio_device_register(mdiodev);
if (rc) {
- device_set_node(&mdiodev->dev, NULL);
- fwnode_handle_put(fwnode);
mdio_device_free(mdiodev);
return rc;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index bb6e03a92956..9cb4dfc242f5 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
#define MAX_EXTRADATA_NAME_LEN (MAX_EXTRADATA_ENTRY_LEN - \
MAX_EXTRADATA_VALUE_LEN - 3)
-#define MAX_EXTRADATA_ITEMS 16
+#define MAX_USERDATA_ITEMS 256
#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
@@ -115,6 +115,8 @@ enum sysdata_feature {
SYSDATA_RELEASE = BIT(2),
/* Include a per-target message ID as part of sysdata */
SYSDATA_MSGID = BIT(3),
+ /* Sentinel: highest bit position */
+ MAX_SYSDATA_ITEMS = 4,
};
/**
@@ -122,8 +124,9 @@ enum sysdata_feature {
* @list: Links this target into the target_list.
* @group: Links us into the configfs subsystem hierarchy.
* @userdata_group: Links to the userdata configfs hierarchy
- * @extradata_complete: Cached, formatted string of append
- * @userdata_length: String length of usedata in extradata_complete.
+ * @userdata: Cached, formatted string of append
+ * @userdata_length: String length of userdata.
+ * @sysdata: Cached, formatted string of append
* @sysdata_fields: Sysdata features enabled.
* @msgcounter: Message sent counter.
* @stats: Packet send stats for the target. Used for debugging.
@@ -152,8 +155,10 @@ struct netconsole_target {
#ifdef CONFIG_NETCONSOLE_DYNAMIC
struct config_group group;
struct config_group userdata_group;
- char extradata_complete[MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS];
+ char *userdata;
size_t userdata_length;
+ char sysdata[MAX_EXTRADATA_ENTRY_LEN * MAX_SYSDATA_ITEMS];
+
/* bit-wise with sysdata_feature bits */
u32 sysdata_fields;
/* protected by target_list_lock */
@@ -802,28 +807,14 @@ out_unlock:
return ret;
}
-/* Count number of entries we have in extradata.
- * This is important because the extradata_complete only supports
- * MAX_EXTRADATA_ITEMS entries. Before enabling any new {user,sys}data
- * feature, number of entries needs to checked for available space.
+/* Count number of entries we have in userdata.
+ * This is important because userdata only supports MAX_USERDATA_ITEMS
+ * entries. Before enabling any new userdata feature, number of entries needs
+ * to checked for available space.
*/
-static size_t count_extradata_entries(struct netconsole_target *nt)
+static size_t count_userdata_entries(struct netconsole_target *nt)
{
- size_t entries;
-
- /* Userdata entries */
- entries = list_count_nodes(&nt->userdata_group.cg_children);
- /* Plus sysdata entries */
- if (nt->sysdata_fields & SYSDATA_CPU_NR)
- entries += 1;
- if (nt->sysdata_fields & SYSDATA_TASKNAME)
- entries += 1;
- if (nt->sysdata_fields & SYSDATA_RELEASE)
- entries += 1;
- if (nt->sysdata_fields & SYSDATA_MSGID)
- entries += 1;
-
- return entries;
+ return list_count_nodes(&nt->userdata_group.cg_children);
}
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
@@ -884,45 +875,77 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0]));
}
-static void update_userdata(struct netconsole_target *nt)
+/* Navigate configfs and calculate the lentgh of the formatted string
+ * representing userdata.
+ * Must be called holding netconsole_subsys.su_mutex
+ */
+static int calc_userdata_len(struct netconsole_target *nt)
{
+ struct userdatum *udm_item;
+ struct config_item *item;
struct list_head *entry;
- int child_count = 0;
- unsigned long flags;
+ int len = 0;
- spin_lock_irqsave(&target_list_lock, flags);
+ list_for_each(entry, &nt->userdata_group.cg_children) {
+ item = container_of(entry, struct config_item, ci_entry);
+ udm_item = to_userdatum(item);
+ /* Skip userdata with no value set */
+ if (udm_item->value[0]) {
+ len += snprintf(NULL, 0, " %s=%s\n", item->ci_name,
+ udm_item->value);
+ }
+ }
+ return len;
+}
- /* Clear the current string in case the last userdatum was deleted */
- nt->userdata_length = 0;
- nt->extradata_complete[0] = 0;
+static int update_userdata(struct netconsole_target *nt)
+{
+ struct userdatum *udm_item;
+ struct config_item *item;
+ struct list_head *entry;
+ char *old_buf = NULL;
+ char *new_buf = NULL;
+ unsigned long flags;
+ int offset = 0;
+ int len;
- list_for_each(entry, &nt->userdata_group.cg_children) {
- struct userdatum *udm_item;
- struct config_item *item;
+ /* Calculate required buffer size */
+ len = calc_userdata_len(nt);
- if (child_count >= MAX_EXTRADATA_ITEMS) {
- spin_unlock_irqrestore(&target_list_lock, flags);
- WARN_ON_ONCE(1);
- return;
- }
- child_count++;
+ if (WARN_ON_ONCE(len > MAX_EXTRADATA_ENTRY_LEN * MAX_USERDATA_ITEMS))
+ return -ENOSPC;
+
+ /* Allocate new buffer */
+ if (len) {
+ new_buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!new_buf)
+ return -ENOMEM;
+ }
+ /* Write userdata to new buffer */
+ list_for_each(entry, &nt->userdata_group.cg_children) {
item = container_of(entry, struct config_item, ci_entry);
udm_item = to_userdatum(item);
-
/* Skip userdata with no value set */
- if (strnlen(udm_item->value, MAX_EXTRADATA_VALUE_LEN) == 0)
- continue;
-
- /* This doesn't overflow extradata_complete since it will write
- * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
- * checked to not exceed MAX items with child_count above
- */
- nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length],
- MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
- item->ci_name, udm_item->value);
+ if (udm_item->value[0]) {
+ offset += scnprintf(&new_buf[offset], len + 1 - offset,
+ " %s=%s\n", item->ci_name,
+ udm_item->value);
+ }
}
+
+ WARN_ON_ONCE(offset != len);
+
+ /* Switch to new buffer and free old buffer */
+ spin_lock_irqsave(&target_list_lock, flags);
+ old_buf = nt->userdata;
+ nt->userdata = new_buf;
+ nt->userdata_length = offset;
spin_unlock_irqrestore(&target_list_lock, flags);
+
+ kfree(old_buf);
+
+ return 0;
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
@@ -946,7 +969,9 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ud = to_userdata(item->ci_parent);
nt = userdata_to_target(ud);
- update_userdata(nt);
+ ret = update_userdata(nt);
+ if (ret < 0)
+ goto out_unlock;
ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
@@ -962,7 +987,7 @@ static void disable_sysdata_feature(struct netconsole_target *nt,
enum sysdata_feature feature)
{
nt->sysdata_fields &= ~feature;
- nt->extradata_complete[nt->userdata_length] = 0;
+ nt->sysdata[0] = 0;
}
static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
@@ -982,12 +1007,6 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
if (msgid_enabled == curr)
goto unlock_ok;
- if (msgid_enabled &&
- count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
- ret = -ENOSPC;
- goto unlock;
- }
-
if (msgid_enabled)
nt->sysdata_fields |= SYSDATA_MSGID;
else
@@ -995,7 +1014,6 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
-unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
@@ -1018,12 +1036,6 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
if (release_enabled == curr)
goto unlock_ok;
- if (release_enabled &&
- count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
- ret = -ENOSPC;
- goto unlock;
- }
-
if (release_enabled)
nt->sysdata_fields |= SYSDATA_RELEASE;
else
@@ -1031,7 +1043,6 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
-unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
@@ -1054,12 +1065,6 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
if (taskname_enabled == curr)
goto unlock_ok;
- if (taskname_enabled &&
- count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
- ret = -ENOSPC;
- goto unlock;
- }
-
if (taskname_enabled)
nt->sysdata_fields |= SYSDATA_TASKNAME;
else
@@ -1067,7 +1072,6 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
unlock_ok:
ret = strnlen(buf, count);
-unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
@@ -1092,27 +1096,16 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
/* no change requested */
goto unlock_ok;
- if (cpu_nr_enabled &&
- count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
- /* user wants the new feature, but there is no space in the
- * buffer.
- */
- ret = -ENOSPC;
- goto unlock;
- }
-
if (cpu_nr_enabled)
nt->sysdata_fields |= SYSDATA_CPU_NR;
else
- /* This is special because extradata_complete might have
- * remaining data from previous sysdata, and it needs to be
- * cleaned.
+ /* This is special because sysdata might have remaining data
+ * from previous sysdata, and it needs to be cleaned.
*/
disable_sysdata_feature(nt, SYSDATA_CPU_NR);
unlock_ok:
ret = strnlen(buf, count);
-unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
@@ -1156,7 +1149,7 @@ static struct config_item *userdatum_make_item(struct config_group *group,
ud = to_userdata(&group->cg_item);
nt = userdata_to_target(ud);
- if (count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS)
+ if (count_userdata_entries(nt) >= MAX_USERDATA_ITEMS)
return ERR_PTR(-ENOSPC);
udm = kzalloc(sizeof(*udm), GFP_KERNEL);
@@ -1234,7 +1227,10 @@ static struct configfs_attribute *netconsole_target_attrs[] = {
static void netconsole_target_release(struct config_item *item)
{
- kfree(to_target(item));
+ struct netconsole_target *nt = to_target(item);
+
+ kfree(nt->userdata);
+ kfree(nt);
}
static struct configfs_item_operations netconsole_target_item_ops = {
@@ -1363,22 +1359,21 @@ static void populate_configfs_item(struct netconsole_target *nt,
static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset)
{
- /* Append cpu=%d at extradata_complete after userdata str */
- return scnprintf(&nt->extradata_complete[offset],
+ return scnprintf(&nt->sysdata[offset],
MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n",
raw_smp_processor_id());
}
static int sysdata_append_taskname(struct netconsole_target *nt, int offset)
{
- return scnprintf(&nt->extradata_complete[offset],
+ return scnprintf(&nt->sysdata[offset],
MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n",
current->comm);
}
static int sysdata_append_release(struct netconsole_target *nt, int offset)
{
- return scnprintf(&nt->extradata_complete[offset],
+ return scnprintf(&nt->sysdata[offset],
MAX_EXTRADATA_ENTRY_LEN, " release=%s\n",
init_utsname()->release);
}
@@ -1386,46 +1381,36 @@ static int sysdata_append_release(struct netconsole_target *nt, int offset)
static int sysdata_append_msgid(struct netconsole_target *nt, int offset)
{
wrapping_assign_add(nt->msgcounter, 1);
- return scnprintf(&nt->extradata_complete[offset],
+ return scnprintf(&nt->sysdata[offset],
MAX_EXTRADATA_ENTRY_LEN, " msgid=%u\n",
nt->msgcounter);
}
/*
- * prepare_extradata - append sysdata at extradata_complete in runtime
+ * prepare_sysdata - append sysdata in runtime
* @nt: target to send message to
*/
-static int prepare_extradata(struct netconsole_target *nt)
+static int prepare_sysdata(struct netconsole_target *nt)
{
- int extradata_len;
-
- /* userdata was appended when configfs write helper was called
- * by update_userdata().
- */
- extradata_len = nt->userdata_length;
+ int sysdata_len = 0;
if (!nt->sysdata_fields)
goto out;
if (nt->sysdata_fields & SYSDATA_CPU_NR)
- extradata_len += sysdata_append_cpu_nr(nt, extradata_len);
+ sysdata_len += sysdata_append_cpu_nr(nt, sysdata_len);
if (nt->sysdata_fields & SYSDATA_TASKNAME)
- extradata_len += sysdata_append_taskname(nt, extradata_len);
+ sysdata_len += sysdata_append_taskname(nt, sysdata_len);
if (nt->sysdata_fields & SYSDATA_RELEASE)
- extradata_len += sysdata_append_release(nt, extradata_len);
+ sysdata_len += sysdata_append_release(nt, sysdata_len);
if (nt->sysdata_fields & SYSDATA_MSGID)
- extradata_len += sysdata_append_msgid(nt, extradata_len);
+ sysdata_len += sysdata_append_msgid(nt, sysdata_len);
- WARN_ON_ONCE(extradata_len >
- MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS);
+ WARN_ON_ONCE(sysdata_len >
+ MAX_EXTRADATA_ENTRY_LEN * MAX_SYSDATA_ITEMS);
out:
- return extradata_len;
-}
-#else /* CONFIG_NETCONSOLE_DYNAMIC not set */
-static int prepare_extradata(struct netconsole_target *nt)
-{
- return 0;
+ return sysdata_len;
}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
@@ -1527,11 +1512,13 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
int msg_len,
int release_len)
{
- const char *extradata = NULL;
+ const char *userdata = NULL;
+ const char *sysdata = NULL;
const char *release;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- extradata = nt->extradata_complete;
+ userdata = nt->userdata;
+ sysdata = nt->sysdata;
#endif
if (release_len) {
@@ -1543,10 +1530,15 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
memcpy(nt->buf, msg, msg_len);
}
- if (extradata)
+ if (userdata)
msg_len += scnprintf(&nt->buf[msg_len],
- MAX_PRINT_CHUNK - msg_len,
- "%s", extradata);
+ MAX_PRINT_CHUNK - msg_len, "%s",
+ userdata);
+
+ if (sysdata)
+ msg_len += scnprintf(&nt->buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len, "%s",
+ sysdata);
send_udp(nt, nt->buf, msg_len);
}
@@ -1560,89 +1552,93 @@ static void append_release(char *buf)
}
static void send_fragmented_body(struct netconsole_target *nt,
- const char *msgbody, int header_len,
- int msgbody_len, int extradata_len)
+ const char *msgbody_ptr, int header_len,
+ int msgbody_len, int sysdata_len)
{
- int sent_extradata, preceding_bytes;
- const char *extradata = NULL;
- int body_len, offset = 0;
+ const char *userdata_ptr = NULL;
+ const char *sysdata_ptr = NULL;
+ int data_len, data_sent = 0;
+ int userdata_offset = 0;
+ int sysdata_offset = 0;
+ int msgbody_offset = 0;
+ int userdata_len = 0;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- extradata = nt->extradata_complete;
+ userdata_ptr = nt->userdata;
+ sysdata_ptr = nt->sysdata;
+ userdata_len = nt->userdata_length;
#endif
+ if (WARN_ON_ONCE(!userdata_ptr && userdata_len != 0))
+ return;
- /* body_len represents the number of bytes that will be sent. This is
+ if (WARN_ON_ONCE(!sysdata_ptr && sysdata_len != 0))
+ return;
+
+ /* data_len represents the number of bytes that will be sent. This is
* bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple
* packets
*/
- body_len = msgbody_len + extradata_len;
+ data_len = msgbody_len + userdata_len + sysdata_len;
/* In each iteration of the while loop below, we send a packet
- * containing the header and a portion of the body. The body is
- * composed of two parts: msgbody and extradata. We keep track of how
- * many bytes have been sent so far using the offset variable, which
- * ranges from 0 to the total length of the body.
+ * containing the header and a portion of the data. The data is
+ * composed of three parts: msgbody, userdata, and sysdata.
+ * We keep track of how many bytes have been sent from each part using
+ * the *_offset variables.
+ * We keep track of how many bytes have been sent overall using the
+ * data_sent variable, which ranges from 0 to the total bytes to be
+ * sent.
*/
- while (offset < body_len) {
- int this_header = header_len;
- bool msgbody_written = false;
- int this_offset = 0;
+ while (data_sent < data_len) {
+ int userdata_left = userdata_len - userdata_offset;
+ int sysdata_left = sysdata_len - sysdata_offset;
+ int msgbody_left = msgbody_len - msgbody_offset;
+ int buf_offset = 0;
int this_chunk = 0;
- this_header += scnprintf(nt->buf + this_header,
- MAX_PRINT_CHUNK - this_header,
- ",ncfrag=%d/%d;", offset,
- body_len);
-
- /* Not all msgbody data has been written yet */
- if (offset < msgbody_len) {
- this_chunk = min(msgbody_len - offset,
- MAX_PRINT_CHUNK - this_header);
- if (WARN_ON_ONCE(this_chunk <= 0))
- return;
- memcpy(nt->buf + this_header, msgbody + offset,
- this_chunk);
- this_offset += this_chunk;
+ /* header is already populated in nt->buf, just append to it */
+ buf_offset = header_len;
+
+ buf_offset += scnprintf(nt->buf + buf_offset,
+ MAX_PRINT_CHUNK - buf_offset,
+ ",ncfrag=%d/%d;", data_sent,
+ data_len);
+
+ /* append msgbody first */
+ this_chunk = min(msgbody_left, MAX_PRINT_CHUNK - buf_offset);
+ memcpy(nt->buf + buf_offset, msgbody_ptr + msgbody_offset,
+ this_chunk);
+ msgbody_offset += this_chunk;
+ buf_offset += this_chunk;
+ data_sent += this_chunk;
+
+ /* after msgbody, append userdata */
+ if (userdata_ptr && userdata_left) {
+ this_chunk = min(userdata_left,
+ MAX_PRINT_CHUNK - buf_offset);
+ memcpy(nt->buf + buf_offset,
+ userdata_ptr + userdata_offset, this_chunk);
+ userdata_offset += this_chunk;
+ buf_offset += this_chunk;
+ data_sent += this_chunk;
}
- /* msgbody was finally written, either in the previous
- * messages and/or in the current buf. Time to write
- * the extradata.
- */
- msgbody_written |= offset + this_offset >= msgbody_len;
-
- /* Msg body is fully written and there is pending extradata to
- * write, append extradata in this chunk
- */
- if (msgbody_written && offset + this_offset < body_len) {
- /* Track how much user data was already sent. First
- * time here, sent_userdata is zero
- */
- sent_extradata = (offset + this_offset) - msgbody_len;
- /* offset of bytes used in current buf */
- preceding_bytes = this_chunk + this_header;
-
- if (WARN_ON_ONCE(sent_extradata < 0))
- return;
-
- this_chunk = min(extradata_len - sent_extradata,
- MAX_PRINT_CHUNK - preceding_bytes);
- if (WARN_ON_ONCE(this_chunk < 0))
- /* this_chunk could be zero if all the previous
- * message used all the buffer. This is not a
- * problem, extradata will be sent in the next
- * iteration
- */
- return;
-
- memcpy(nt->buf + this_header + this_offset,
- extradata + sent_extradata,
- this_chunk);
- this_offset += this_chunk;
+ /* after userdata, append sysdata */
+ if (sysdata_ptr && sysdata_left) {
+ this_chunk = min(sysdata_left,
+ MAX_PRINT_CHUNK - buf_offset);
+ memcpy(nt->buf + buf_offset,
+ sysdata_ptr + sysdata_offset, this_chunk);
+ sysdata_offset += this_chunk;
+ buf_offset += this_chunk;
+ data_sent += this_chunk;
}
- send_udp(nt, nt->buf, this_header + this_offset);
- offset += this_offset;
+ /* if all is good, send the packet out */
+ if (WARN_ON_ONCE(data_sent > data_len))
+ return;
+
+ send_udp(nt, nt->buf, buf_offset);
}
}
@@ -1650,7 +1646,7 @@ static void send_msg_fragmented(struct netconsole_target *nt,
const char *msg,
int msg_len,
int release_len,
- int extradata_len)
+ int sysdata_len)
{
int header_len, msgbody_len;
const char *msgbody;
@@ -1679,7 +1675,7 @@ static void send_msg_fragmented(struct netconsole_target *nt,
* will be replaced
*/
send_fragmented_body(nt, msgbody, header_len, msgbody_len,
- extradata_len);
+ sysdata_len);
}
/**
@@ -1695,19 +1691,22 @@ static void send_msg_fragmented(struct netconsole_target *nt,
static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
int msg_len)
{
+ int userdata_len = 0;
int release_len = 0;
- int extradata_len;
-
- extradata_len = prepare_extradata(nt);
+ int sysdata_len = 0;
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ sysdata_len = prepare_sysdata(nt);
+ userdata_len = nt->userdata_length;
+#endif
if (nt->release)
release_len = strlen(init_utsname()->release) + 1;
- if (msg_len + release_len + extradata_len <= MAX_PRINT_CHUNK)
+ if (msg_len + release_len + sysdata_len + userdata_len <= MAX_PRINT_CHUNK)
return send_msg_no_fragmentation(nt, msg, msg_len, release_len);
return send_msg_fragmented(nt, msg, msg_len, release_len,
- extradata_len);
+ sysdata_len);
}
static void write_ext_msg(struct console *con, const char *msg,
@@ -1912,6 +1911,9 @@ fail:
static void free_param_target(struct netconsole_target *nt)
{
netpoll_cleanup(&nt->np);
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ kfree(nt->userdata);
+#endif
kfree(nt);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 95f66c1f59db..2683a989873e 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -320,6 +320,8 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
&nsim_dev->test1);
+ debugfs_create_u32("test2", 0600, nsim_dev->ddir,
+ &nsim_dev->test2);
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
@@ -521,8 +523,53 @@ err_out:
enum nsim_devlink_param_id {
NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
NSIM_DEVLINK_PARAM_ID_TEST1,
+ NSIM_DEVLINK_PARAM_ID_TEST2,
};
+static int
+nsim_devlink_param_test2_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ ctx->val.vu32 = nsim_dev->test2;
+ return 0;
+}
+
+static int
+nsim_devlink_param_test2_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ nsim_dev->test2 = ctx->val.vu32;
+ return 0;
+}
+
+#define NSIM_DEV_TEST2_DEFAULT 1234
+
+static int
+nsim_devlink_param_test2_get_default(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ ctx->val.vu32 = NSIM_DEV_TEST2_DEFAULT;
+ return 0;
+}
+
+static int
+nsim_devlink_param_test2_reset_default(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
+ return 0;
+}
+
static const struct devlink_param nsim_devlink_params[] = {
DEVLINK_PARAM_GENERIC(MAX_MACS,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
@@ -531,6 +578,14 @@ static const struct devlink_param nsim_devlink_params[] = {
"test1", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(NSIM_DEVLINK_PARAM_ID_TEST2,
+ "test2", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ nsim_devlink_param_test2_get,
+ nsim_devlink_param_test2_set,
+ NULL,
+ nsim_devlink_param_test2_get_default,
+ nsim_devlink_param_test2_reset_default),
};
static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
@@ -1590,6 +1645,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->fw_update_flash_chunk_time_ms = NSIM_DEV_FLASH_CHUNK_TIME_MS_DEFAULT;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+ nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index fa1d97885caa..6927c1962277 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -133,15 +133,21 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!nsim_ipsec_tx(ns, skb))
goto out_drop_any;
- peer_ns = rcu_dereference(ns->peer);
- if (!peer_ns)
- goto out_drop_any;
+ /* Check if loopback mode is enabled */
+ if (dev->features & NETIF_F_LOOPBACK) {
+ peer_ns = ns;
+ peer_dev = dev;
+ } else {
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out_drop_any;
+ peer_dev = peer_ns->netdev;
+ }
dr = nsim_do_psp(skb, ns, peer_ns, &psp_ext);
if (dr)
goto out_drop_free;
- peer_dev = peer_ns->netdev;
rxq = skb_get_queue_mapping(skb);
if (rxq >= peer_dev->num_rx_queues)
rxq = rxq % peer_dev->num_rx_queues;
@@ -433,13 +439,8 @@ static int nsim_rcv(struct nsim_rq *rq, int budget)
}
/* skb might be discard at netif_receive_skb, save the len */
- skblen = skb->len;
- skb_mark_napi_id(skb, &rq->napi);
- ret = netif_receive_skb(skb);
- if (ret == NET_RX_SUCCESS)
- dev_dstats_rx_add(dev, skblen);
- else
- dev_dstats_rx_dropped(dev);
+ dev_dstats_rx_add(dev, skb->len);
+ napi_gro_receive(&rq->napi, skb);
}
nsim_start_peer_tx_queue(dev, rq);
@@ -981,7 +982,8 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
NETIF_F_LRO |
- NETIF_F_TSO;
+ NETIF_F_TSO |
+ NETIF_F_LOOPBACK;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
dev->max_mtu = ETH_MAX_MTU;
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_HW_OFFLOAD;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index af6fcfcda8ba..d1a941e2b18f 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -331,6 +331,7 @@ struct nsim_dev {
u32 fw_update_flash_chunk_time_ms;
u32 max_macs;
bool test1;
+ u32 test2;
bool dont_allow_reload;
bool fail_reload;
struct devlink_region *dummy_region;
diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c
index 14298188c5f1..ecbe9dcf4f7d 100644
--- a/drivers/net/ovpn/netlink-gen.c
+++ b/drivers/net/ovpn/netlink-gen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/ovpn.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h
index 220b5b2fdd4f..b2301580770f 100644
--- a/drivers/net/ovpn/netlink-gen.h
+++ b/drivers/net/ovpn/netlink-gen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/ovpn.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_OVPN_GEN_H
#define _LINUX_OVPN_GEN_H
diff --git a/drivers/net/pcs/pcs-xpcs-plat.c b/drivers/net/pcs/pcs-xpcs-plat.c
index 9e1ccc319a1d..b8c48f9effbf 100644
--- a/drivers/net/pcs/pcs-xpcs-plat.c
+++ b/drivers/net/pcs/pcs-xpcs-plat.c
@@ -365,9 +365,6 @@ static int xpcs_plat_init_dev(struct dw_xpcs_plat *pxpcs)
err_clean_data:
mdiodev->dev.platform_data = NULL;
- fwnode_handle_put(dev_fwnode(&mdiodev->dev));
- device_set_node(&mdiodev->dev, NULL);
-
mdio_device_free(mdiodev);
return ret;
@@ -456,5 +453,5 @@ static struct platform_driver xpcs_plat_driver = {
module_platform_driver(xpcs_plat_driver);
MODULE_DESCRIPTION("Synopsys DesignWare XPCS platform device driver");
-MODULE_AUTHOR("Signed-off-by: Serge Semin <fancer.lancer@gmail.com>");
+MODULE_AUTHOR("Serge Semin <fancer.lancer@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 3d1bd5aac093..9679f2b35a44 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -37,6 +37,16 @@ static const int xpcs_10gkr_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_25gbaser_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_xlgmii_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -67,6 +77,40 @@ static const int xpcs_xlgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_50gbaser_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_50gbaser2_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_100gbasep_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_10gbaser_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -523,9 +567,57 @@ static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs,
return speed;
}
-static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
- struct phylink_link_state *state)
+static int xpcs_c45_read_pcs_speed(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state)
{
+ int pcs_ctrl1;
+
+ pcs_ctrl1 = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (pcs_ctrl1 < 0)
+ return pcs_ctrl1;
+
+ switch (pcs_ctrl1 & MDIO_CTRL1_SPEEDSEL) {
+ case MDIO_PCS_CTRL1_SPEED25G:
+ state->speed = SPEED_25000;
+ break;
+ case MDIO_PCS_CTRL1_SPEED50G:
+ state->speed = SPEED_50000;
+ break;
+ case MDIO_PCS_CTRL1_SPEED100G:
+ state->speed = SPEED_100000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int xpcs_resolve_pma(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state)
+{
+ int pmd_rxdet, err = 0;
+
+ /* The Meta Platforms FBNIC PMD will go into a training state for
+ * about 4 seconds when the link first comes up. During this time the
+ * PCS link will bounce. To avoid reporting link up too soon we include
+ * the PMD state provided by the driver.
+ */
+ if (xpcs->info.pma == MP_FBNIC_XPCS_PMA_100G_ID) {
+ pmd_rxdet = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_PMA_RXDET);
+ if (pmd_rxdet < 0) {
+ state->link = false;
+ return pmd_rxdet;
+ }
+
+ /* Verify Rx lanes are trained before reporting link up */
+ if (!(pmd_rxdet & MDIO_PMD_RXDET_GLOBAL)) {
+ state->link = false;
+ return 0;
+ }
+ }
+
state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
state->duplex = DUPLEX_FULL;
@@ -536,10 +628,18 @@ static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
case PHY_INTERFACE_MODE_XLGMII:
state->speed = xpcs_get_max_xlgmii_speed(xpcs, state);
break;
+ case PHY_INTERFACE_MODE_100GBASEP:
+ case PHY_INTERFACE_MODE_LAUI:
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ err = xpcs_c45_read_pcs_speed(xpcs, state);
+ break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
+
+ return err;
}
static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
@@ -945,10 +1045,10 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
phylink_resolve_c73(state);
} else {
- xpcs_resolve_pma(xpcs, state);
+ ret = xpcs_resolve_pma(xpcs, state);
}
- return 0;
+ return ret;
}
static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
@@ -1284,17 +1384,16 @@ static int xpcs_read_ids(struct dw_xpcs *xpcs)
if (ret < 0)
return ret;
- id = ret;
+ id = ret << 16;
ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2);
if (ret < 0)
return ret;
- /* Note the inverted dword order and masked out Model/Revision numbers
- * with respect to what is done with the PCS ID...
+ /* For now we only record the OUI for the PMAPMD, we may want to
+ * add the model number at some point in the future.
*/
- ret = (ret >> 10) & 0x3F;
- id |= ret << 16;
+ id |= ret & MDIO_DEVID2_OUI;
/* Set the PMA ID if it hasn't been pre-initialized */
if (xpcs->info.pma == DW_XPCS_PMA_ID_NATIVE)
@@ -1313,10 +1412,26 @@ static const struct dw_xpcs_compat synopsys_xpcs_compat[] = {
.supported = xpcs_10gkr_features,
.an_mode = DW_AN_C73,
}, {
+ .interface = PHY_INTERFACE_MODE_25GBASER,
+ .supported = xpcs_25gbaser_features,
+ .an_mode = DW_AN_C73,
+ }, {
.interface = PHY_INTERFACE_MODE_XLGMII,
.supported = xpcs_xlgmii_features,
.an_mode = DW_AN_C73,
}, {
+ .interface = PHY_INTERFACE_MODE_50GBASER,
+ .supported = xpcs_50gbaser_features,
+ .an_mode = DW_AN_C73,
+ }, {
+ .interface = PHY_INTERFACE_MODE_LAUI,
+ .supported = xpcs_50gbaser2_features,
+ .an_mode = DW_AN_C73,
+ }, {
+ .interface = PHY_INTERFACE_MODE_100GBASEP,
+ .supported = xpcs_100gbasep_features,
+ .an_mode = DW_AN_C73,
+ }, {
.interface = PHY_INTERFACE_MODE_10GBASER,
.supported = xpcs_10gbaser_features,
.an_mode = DW_10GBASER,
@@ -1495,7 +1610,8 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev)
xpcs_get_interfaces(xpcs, xpcs->pcs.supported_interfaces);
- if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID ||
+ xpcs->info.pma == MP_FBNIC_XPCS_PMA_100G_ID)
xpcs->pcs.poll = false;
else
xpcs->need_reset = true;
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index bd7a47a903ac..8f9753d4318c 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -192,16 +192,15 @@ static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
- int val;
- val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- ADIN_CRSM_SFT_PD_CNTRL, val);
+ ADIN_CRSM_SFT_PD_CNTRL,
+ en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0);
if (ret < 0)
return ret;
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
- (ret & ADIN_CRSM_SFT_PD_RDY) == val,
+ !!(ret & ADIN_CRSM_SFT_PD_RDY) == en,
1000, 30000, true);
}
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index d3501f8487d9..65d609ed69fb 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -780,9 +780,21 @@ out:
kfree_skb(skb);
}
-static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int bcm_ptp_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+
+ cfg->rx_filter = priv->hwts_rx ? HWTSTAMP_FILTER_PTP_V2_EVENT
+ : HWTSTAMP_FILTER_NONE;
+ cfg->tx_type = priv->tx_type;
+
+ return 0;
+}
+
+static int bcm_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
u16 mode, ctrl;
@@ -898,7 +910,8 @@ static void bcm_ptp_init(struct bcm_ptp_private *priv)
priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp;
priv->mii_ts.txtstamp = bcm_ptp_txtstamp;
- priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp;
+ priv->mii_ts.hwtstamp_set = bcm_ptp_hwtstamp_set;
+ priv->mii_ts.hwtstamp_get = bcm_ptp_hwtstamp_get;
priv->mii_ts.ts_info = bcm_ptp_ts_info;
priv->phydev->mii_ts = &priv->mii_ts;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 74396453f5bb..b950acc9c49b 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1176,9 +1176,21 @@ static irqreturn_t dp83640_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int dp83640_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
+
+ cfg->rx_filter = dp83640->hwts_rx_en;
+ cfg->tx_type = dp83640->hwts_tx_en;
+
+ return 0;
+}
+
+static int dp83640_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct dp83640_private *dp83640 =
container_of(mii_ts, struct dp83640_private, mii_ts);
@@ -1198,7 +1210,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
@@ -1206,7 +1218,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
@@ -1214,7 +1226,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
@@ -1222,7 +1234,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V2_EVENT;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
@@ -1407,7 +1419,8 @@ static int dp83640_probe(struct phy_device *phydev)
dp83640->phydev = phydev;
dp83640->mii_ts.rxtstamp = dp83640_rxtstamp;
dp83640->mii_ts.txtstamp = dp83640_txtstamp;
- dp83640->mii_ts.hwtstamp = dp83640_hwtstamp;
+ dp83640->mii_ts.hwtstamp_set = dp83640_hwtstamp_set;
+ dp83640->mii_ts.hwtstamp_get = dp83640_hwtstamp_get;
dp83640->mii_ts.ts_info = dp83640_ts_info;
INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 36a0c1b7f59c..5f5de01c41e1 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -937,15 +937,15 @@ static void dp83867_link_change_notify(struct phy_device *phydev)
* whenever there is a link change.
*/
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- int val = 0;
+ int val;
- val = phy_clear_bits(phydev, DP83867_CFG2,
- DP83867_SGMII_AUTONEG_EN);
- if (val < 0)
- return;
+ val = phy_modify_changed(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN, 0);
- phy_set_bits(phydev, DP83867_CFG2,
- DP83867_SGMII_AUTONEG_EN);
+ /* Keep the in-band setting made by dp83867_config_inband() */
+ if (val != 0)
+ phy_set_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
}
}
@@ -1116,6 +1116,25 @@ static int dp83867_led_polarity_set(struct phy_device *phydev, int index,
DP83867_LED_POLARITY(index), polarity);
}
+static unsigned int dp83867_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ return LINK_INBAND_ENABLE | LINK_INBAND_DISABLE;
+
+ return 0;
+}
+
+static int dp83867_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int val = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ val = DP83867_SGMII_AUTONEG_EN;
+
+ return phy_modify(phydev, DP83867_CFG2, DP83867_SGMII_AUTONEG_EN, val);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -1149,6 +1168,9 @@ static struct phy_driver dp83867_driver[] = {
.led_hw_control_set = dp83867_led_hw_control_set,
.led_hw_control_get = dp83867_led_hw_control_get,
.led_polarity_set = dp83867_led_polarity_set,
+
+ .inband_caps = dp83867_inband_caps,
+ .config_inband = dp83867_config_inband,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 715f0356f895..50684271f81a 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -19,7 +19,6 @@
#include <linux/of.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
-#include <linux/linkmode.h>
#include "swphy.h"
@@ -125,6 +124,7 @@ static int __fixed_phy_add(int phy_addr,
fp->addr = phy_addr;
fp->status = *status;
+ fp->status.link = true;
list_add_tail(&fp->node, &fmb_phys);
@@ -173,40 +173,10 @@ struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
return ERR_PTR(-EINVAL);
}
- /* propagate the fixed link values to struct phy_device */
- phy->link = 1;
- phy->speed = status->speed;
- phy->duplex = status->duplex;
- phy->pause = status->pause;
- phy->asym_pause = status->asym_pause;
-
of_node_get(np);
phy->mdio.dev.of_node = np;
phy->is_pseudo_fixed_link = true;
- switch (status->speed) {
- case SPEED_1000:
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phy->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phy->supported);
- fallthrough;
- case SPEED_100:
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- phy->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- phy->supported);
- fallthrough;
- case SPEED_10:
- default:
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- phy->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- phy->supported);
- }
-
- phy_advertise_supported(phy);
-
ret = phy_device_register(phy);
if (ret) {
phy_device_free(phy);
diff --git a/drivers/net/phy/mdio-private.h b/drivers/net/phy/mdio-private.h
new file mode 100644
index 000000000000..8bc6d9088af1
--- /dev/null
+++ b/drivers/net/phy/mdio-private.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __MDIO_PRIVATE_H
+#define __MDIO_PRIVATE_H
+
+/* MDIO internal helpers
+ */
+
+int mdio_device_register_reset(struct mdio_device *mdiodev);
+void mdio_device_unregister_reset(struct mdio_device *mdiodev);
+
+#endif /* __MDIO_PRIVATE_H */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4354241137d5..afdf1ad6c0e6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -29,37 +29,11 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include "mdio-private.h"
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
-static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
-{
- /* Deassert the optional reset signal */
- mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev,
- "reset", GPIOD_OUT_LOW);
- if (IS_ERR(mdiodev->reset_gpio))
- return PTR_ERR(mdiodev->reset_gpio);
-
- if (mdiodev->reset_gpio)
- gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset");
-
- return 0;
-}
-
-static int mdiobus_register_reset(struct mdio_device *mdiodev)
-{
- struct reset_control *reset;
-
- reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
- if (IS_ERR(reset))
- return PTR_ERR(reset);
-
- mdiodev->reset_ctrl = reset;
-
- return 0;
-}
-
int mdiobus_register_device(struct mdio_device *mdiodev)
{
int err;
@@ -68,17 +42,10 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
return -EBUSY;
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
- err = mdiobus_register_gpiod(mdiodev);
+ err = mdio_device_register_reset(mdiodev);
if (err)
return err;
- err = mdiobus_register_reset(mdiodev);
- if (err) {
- gpiod_put(mdiodev->reset_gpio);
- mdiodev->reset_gpio = NULL;
- return err;
- }
-
/* Assert the reset signal */
mdio_device_reset(mdiodev, 1);
}
@@ -94,8 +61,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
- gpiod_put(mdiodev->reset_gpio);
- reset_control_put(mdiodev->reset_ctrl);
+ mdio_device_unregister_reset(mdiodev);
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
@@ -373,7 +339,7 @@ EXPORT_SYMBOL_GPL(mdio_bus_class);
* mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
* @mdio_name: The name of a mdiobus.
*
- * Returns a reference to the mii_bus, or NULL if none found. The
+ * Return: a reference to the mii_bus, or NULL if none found. The
* embedded struct device will have its reference count incremented,
* and this must be put_deviced'ed once the bus is finished with.
*/
@@ -391,7 +357,7 @@ EXPORT_SYMBOL(mdio_find_bus);
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
* @mdio_bus_np: Pointer to the mii_bus.
*
- * Returns a reference to the mii_bus, or NULL if none found. The
+ * Return: a reference to the mii_bus, or NULL if none found. The
* embedded struct device will have its reference count incremented,
* and this must be put once the bus is finished with.
*
@@ -439,6 +405,8 @@ out:
* @addr: the phy address
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* Read a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -471,6 +439,8 @@ EXPORT_SYMBOL(__mdiobus_read);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* Write a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -504,8 +474,11 @@ EXPORT_SYMBOL(__mdiobus_write);
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
+ *
* Read, modify, and if any change, write the register value back to the
- * device. Any error returns a negative number.
+ * device.
*
* NOTE: MUST NOT be called from interrupt context.
*/
@@ -535,6 +508,8 @@ EXPORT_SYMBOL_GPL(__mdiobus_modify_changed);
* @devad: device address to read
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* Read a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -568,6 +543,8 @@ EXPORT_SYMBOL(__mdiobus_c45_read);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* Write a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -603,6 +580,9 @@ EXPORT_SYMBOL(__mdiobus_c45_write);
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
+ *
* Read, modify, and if any change, write the register value back to the
* device. Any error returns a negative number.
*
@@ -633,6 +613,8 @@ static int __mdiobus_c45_modify_changed(struct mii_bus *bus, int addr,
* @addr: the phy address
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -658,6 +640,8 @@ EXPORT_SYMBOL(mdiobus_read_nested);
* @addr: the phy address
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -681,6 +665,8 @@ EXPORT_SYMBOL(mdiobus_read);
* @devad: device address to read
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -704,6 +690,8 @@ EXPORT_SYMBOL(mdiobus_c45_read);
* @devad: device address to read
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -731,6 +719,8 @@ EXPORT_SYMBOL(mdiobus_c45_read_nested);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -757,6 +747,8 @@ EXPORT_SYMBOL(mdiobus_write_nested);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -781,6 +773,8 @@ EXPORT_SYMBOL(mdiobus_write);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -806,6 +800,8 @@ EXPORT_SYMBOL(mdiobus_c45_write);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -834,6 +830,8 @@ EXPORT_SYMBOL(mdiobus_c45_write_nested);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 0 on success, negative on any error condition
*/
int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set)
@@ -854,6 +852,8 @@ EXPORT_SYMBOL_GPL(__mdiobus_modify);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 0 on success, negative on any error condition
*/
int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
{
@@ -876,6 +876,8 @@ EXPORT_SYMBOL_GPL(mdiobus_modify);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 0 on success, negative on any error condition
*/
int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
u16 mask, u16 set)
@@ -899,6 +901,9 @@ EXPORT_SYMBOL_GPL(mdiobus_c45_modify);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
*/
int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set)
@@ -922,6 +927,9 @@ EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
*/
int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
u32 regnum, u16 mask, u16 set)
@@ -942,10 +950,10 @@ EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed);
* @dev: target MDIO device
* @drv: given MDIO driver
*
- * Description: Given a MDIO device, and a MDIO driver, return 1 if
- * the driver supports the device. Otherwise, return 0. This may
- * require calling the devices own match function, since different classes
- * of MDIO devices have different match criteria.
+ * Return: 1 if the driver supports the device, 0 otherwise
+ *
+ * Description: This may require calling the devices own match function,
+ * since different classes of MDIO devices have different match criteria.
*/
static int mdio_bus_match(struct device *dev, const struct device_driver *drv)
{
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index f64176e0e197..6e90ed42cd98 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <linux/unistd.h>
#include <linux/property.h>
+#include "mdio-private.h"
void mdio_device_free(struct mdio_device *mdiodev)
{
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mdio_device_create);
/**
* mdio_device_register - Register the mdio device on the MDIO bus
* @mdiodev: mdio_device structure to be added to the MDIO bus
+ *
+ * Return: Zero if successful, negative error code on failure
*/
int mdio_device_register(struct mdio_device *mdiodev)
{
@@ -118,6 +121,59 @@ void mdio_device_remove(struct mdio_device *mdiodev)
}
EXPORT_SYMBOL(mdio_device_remove);
+/**
+ * mdio_device_register_reset - Read and initialize the reset properties of
+ * an mdio device
+ * @mdiodev: mdio_device structure
+ *
+ * Return: Zero if successful, negative error code on failure
+ */
+int mdio_device_register_reset(struct mdio_device *mdiodev)
+{
+ struct reset_control *reset;
+
+ /* Deassert the optional reset signal */
+ mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mdiodev->reset_gpio))
+ return PTR_ERR(mdiodev->reset_gpio);
+
+ if (mdiodev->reset_gpio)
+ gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset");
+
+ reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
+ if (IS_ERR(reset)) {
+ gpiod_put(mdiodev->reset_gpio);
+ mdiodev->reset_gpio = NULL;
+ return PTR_ERR(reset);
+ }
+
+ mdiodev->reset_ctrl = reset;
+
+ /* Read optional firmware properties */
+ device_property_read_u32(&mdiodev->dev, "reset-assert-us",
+ &mdiodev->reset_assert_delay);
+ device_property_read_u32(&mdiodev->dev, "reset-deassert-us",
+ &mdiodev->reset_deassert_delay);
+
+ return 0;
+}
+
+/**
+ * mdio_device_unregister_reset - uninitialize the reset properties of
+ * an mdio device
+ * @mdiodev: mdio_device structure
+ */
+void mdio_device_unregister_reset(struct mdio_device *mdiodev)
+{
+ gpiod_put(mdiodev->reset_gpio);
+ mdiodev->reset_gpio = NULL;
+ reset_control_put(mdiodev->reset_ctrl);
+ mdiodev->reset_ctrl = NULL;
+ mdiodev->reset_assert_delay = 0;
+ mdiodev->reset_deassert_delay = 0;
+}
+
void mdio_device_reset(struct mdio_device *mdiodev, int value)
{
unsigned int d;
@@ -152,6 +208,8 @@ EXPORT_SYMBOL(mdio_device_reset);
*
* Description: Take care of setting up the mdio_device structure
* and calling the driver to probe the device.
+ *
+ * Return: Zero if successful, negative error code on failure
*/
static int mdio_probe(struct device *dev)
{
@@ -202,6 +260,8 @@ static void mdio_shutdown(struct device *dev)
/**
* mdio_driver_register - register an mdio_driver with the MDIO layer
* @drv: new mdio_driver to register
+ *
+ * Return: Zero if successful, negative error code on failure
*/
int mdio_driver_register(struct mdio_driver *drv)
{
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 57ea947369fe..05de68b9f719 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2988,6 +2988,8 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
+#define LAN8814_QSGMII_TX_CONFIG 0x35
+#define LAN8814_QSGMII_TX_CONFIG_QSGMII BIT(3)
#define LAN8814_QSGMII_SOFT_RESET 0x43
#define LAN8814_QSGMII_SOFT_RESET_BIT BIT(0)
#define LAN8814_QSGMII_PCS1G_ANEG_CONFIG 0x13
@@ -3145,9 +3147,9 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS);
}
-static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static int lan8814_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct kszphy_ptp_priv *ptp_priv =
container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
@@ -4387,7 +4389,7 @@ static void lan8814_ptp_init(struct phy_device *phydev)
ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
- ptp_priv->mii_ts.hwtstamp = lan8814_hwtstamp;
+ ptp_priv->mii_ts.hwtstamp_set = lan8814_hwtstamp_set;
ptp_priv->mii_ts.ts_info = lan8814_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
@@ -4501,12 +4503,24 @@ static void lan8814_setup_led(struct phy_device *phydev, int val)
static int lan8814_config_init(struct phy_device *phydev)
{
struct kszphy_priv *lan8814 = phydev->priv;
+ int ret;
- /* Disable ANEG with QSGMII PCS Host side */
- lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
- LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
- LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
- 0);
+ /* Based on the interface type select how the advertise ability is
+ * encoded, to set as SGMII or as USGMII.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_TX_CONFIG,
+ LAN8814_QSGMII_TX_CONFIG_QSGMII,
+ LAN8814_QSGMII_TX_CONFIG_QSGMII);
+ else
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_TX_CONFIG,
+ LAN8814_QSGMII_TX_CONFIG_QSGMII,
+ 0);
+
+ if (ret < 0)
+ return ret;
/* MDI-X setting for swap A,B transmit */
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_ALIGN_SWAP,
@@ -5028,9 +5042,9 @@ static void lan8841_ptp_enable_processing(struct kszphy_ptp_priv *ptp_priv,
#define LAN8841_PTP_TX_TIMESTAMP_EN 443
#define LAN8841_PTP_TX_MOD 445
-static int lan8841_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static int lan8841_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct phy_device *phydev = ptp_priv->phydev;
@@ -5910,7 +5924,7 @@ static int lan8841_probe(struct phy_device *phydev)
ptp_priv->mii_ts.rxtstamp = lan8841_rxtstamp;
ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
- ptp_priv->mii_ts.hwtstamp = lan8841_hwtstamp;
+ ptp_priv->mii_ts.hwtstamp_set = lan8841_hwtstamp_set;
ptp_priv->mii_ts.ts_info = lan8841_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
@@ -6640,6 +6654,8 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
.config_intr = lan8814_config_intr,
+ .inband_caps = lan8842_inband_caps,
+ .config_inband = lan8842_config_inband,
.handle_interrupt = lan8814_handle_interrupt,
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
index e6514ce04c29..4c6326b0ceaf 100644
--- a/drivers/net/phy/microchip_rds_ptp.c
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -476,9 +476,9 @@ static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
return true;
}
-static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mchp_rds_ptp_clock *clock =
container_of(mii_ts, struct mchp_rds_ptp_clock,
@@ -1281,7 +1281,7 @@ struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
- clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp;
+ clock->mii_ts.hwtstamp_set = mchp_rds_ptp_hwtstamp_set;
clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
phydev->mii_ts = &clock->mii_ts;
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 2eef5956b9cc..65c9d7bd9315 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -85,6 +85,10 @@ enum rgmii_clock_delay {
#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
+#define MSCC_PHY_LED_BEHAVIOR 30
+#define LED_COMBINE_DIS_MASK(x) BIT(x)
+#define LED_COMBINE_DIS(x, dis) (((dis) ? 1 : 0) << (x))
+
#define MSCC_EXT_PAGE_CSR_CNTL_17 17
#define MSCC_EXT_PAGE_CSR_CNTL_18 18
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 8678ebf89cca..2b9fb8a675a6 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -22,6 +22,24 @@
#include "mscc_serdes.h"
#include "mscc.h"
+struct vsc85xx_probe_config {
+ const struct vsc85xx_hw_stat *hw_stats;
+ size_t shared_size;
+ size_t nstats;
+ u16 supp_led_modes;
+ u8 nleds;
+ bool check_rate_magic;
+ bool use_package;
+ bool has_ptp;
+};
+
+static const u32 vsc85xx_default_led_modes_4[] = {
+ VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY,
+ VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION
+};
+
static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
{
.string = "phy_receive_errors",
@@ -177,17 +195,19 @@ static int vsc85xx_led_cntl_set(struct phy_device *phydev,
u8 led_num,
u8 mode)
{
- int rc;
- u16 reg_val;
+ u16 mask = LED_MODE_SEL_MASK(led_num);
+ u16 val = LED_MODE_SEL(led_num, mode);
- mutex_lock(&phydev->lock);
- reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
- reg_val &= ~LED_MODE_SEL_MASK(led_num);
- reg_val |= LED_MODE_SEL(led_num, (u16)mode);
- rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val);
- mutex_unlock(&phydev->lock);
+ return phy_modify(phydev, MSCC_PHY_LED_MODE_SEL, mask, val);
+}
- return rc;
+static int vsc85xx_led_combine_disable_set(struct phy_device *phydev,
+ u8 led_num, bool combine_disable)
+{
+ u16 val = LED_COMBINE_DIS(led_num, combine_disable);
+ u16 mask = LED_COMBINE_DIS_MASK(led_num);
+
+ return phy_modify(phydev, MSCC_PHY_LED_BEHAVIOR, mask, val);
}
static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix)
@@ -443,7 +463,7 @@ static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
#endif /* CONFIG_OF_MDIO */
static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
- u32 *default_mode)
+ const u32 *default_mode)
{
struct vsc8531_private *priv = phydev->priv;
char led_dt_prop[28];
@@ -2218,12 +2238,13 @@ static int vsc85xx_config_inband(struct phy_device *phydev, unsigned int modes)
reg_val);
}
-static int vsc8514_probe(struct phy_device *phydev)
+static int vsc85xx_probe_common(struct phy_device *phydev,
+ const struct vsc85xx_probe_config *cfg,
+ const u32 *default_led_mode)
{
struct vsc8531_private *vsc8531;
- u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
- VSC8531_DUPLEX_COLLISION};
+ struct device_node *np;
+ int ret;
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
@@ -2231,119 +2252,291 @@ static int vsc8514_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
- vsc8584_get_base_addr(phydev);
- devm_phy_package_join(&phydev->mdio.dev, phydev,
- vsc8531->base_addr, 0);
+ /* Check rate magic if needed (only for non-package PHYs) */
+ if (cfg->check_rate_magic) {
+ ret = vsc85xx_edge_rate_magic_get(phydev);
+ if (ret < 0)
+ return ret;
+
+ vsc8531->rate_magic = ret;
+ }
+
+ /* Set up package if needed */
+ if (cfg->use_package) {
+ vsc8584_get_base_addr(phydev);
+ ret = devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr,
+ cfg->shared_size);
+ if (ret)
+ return ret;
+ }
- vsc8531->nleds = 4;
- vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc85xx_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+ /* Configure LED settings */
+ vsc8531->nleds = cfg->nleds;
+ vsc8531->supp_led_modes = cfg->supp_led_modes;
+
+ /* Configure hardware stats */
+ vsc8531->hw_stats = cfg->hw_stats;
+ vsc8531->nstats = cfg->nstats;
vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ /* PTP setup for VSC8584 */
+ if (cfg->has_ptp) {
+ if (phy_package_probe_once(phydev)) {
+ ret = vsc8584_ptp_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = vsc8584_ptp_probe(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Check for LED configuration in device tree if available
+ * or fall back to default `vsc8531,led-x-mode` DT properties.
+ */
+ np = of_get_child_by_name(phydev->mdio.dev.of_node, "leds");
+ if (np) {
+ of_node_put(np);
+
+ /* Force to defaults */
+ for (unsigned int i = 0; i < vsc8531->nleds; i++)
+ vsc8531->leds_mode[i] = default_led_mode[i];
+
+ return 0;
+ }
+
+ /* Parse LED modes from device tree */
+ return vsc85xx_dt_led_modes_get(phydev, default_led_mode);
}
-static int vsc8574_probe(struct phy_device *phydev)
+static int vsc85xx_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
{
- struct vsc8531_private *vsc8531;
- u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
- VSC8531_DUPLEX_COLLISION};
+ struct vsc8531_private *vsc8531 = phydev->priv;
- vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
- if (!vsc8531)
- return -ENOMEM;
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
- phydev->priv = vsc8531;
+ return vsc85xx_led_cntl_set(phydev, index, value == LED_OFF ?
+ VSC8531_FORCE_LED_OFF : VSC8531_FORCE_LED_ON);
+}
- vsc8584_get_base_addr(phydev);
- devm_phy_package_join(&phydev->mdio.dev, phydev,
- vsc8531->base_addr, 0);
+static int vsc85xx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ static const unsigned long supported = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+ struct vsc8531_private *vsc8531 = phydev->priv;
- vsc8531->nleds = 4;
- vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc8584_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
- if (!vsc8531->stats)
- return -ENOMEM;
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
+
+ if (rules & ~supported)
+ return -EOPNOTSUPP;
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ return 0;
}
-static int vsc8584_probe(struct phy_device *phydev)
+static int vsc85xx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
{
- struct vsc8531_private *vsc8531;
- u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
- VSC8531_DUPLEX_COLLISION};
- int ret;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u8 mode, behavior;
+ int rc;
- vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
- if (!vsc8531)
- return -ENOMEM;
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
- phydev->priv = vsc8531;
+ rc = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
+ if (rc < 0)
+ return rc;
+ mode = (rc & LED_MODE_SEL_MASK(index)) >> LED_MODE_SEL_POS(index);
+
+ rc = phy_read(phydev, MSCC_PHY_LED_BEHAVIOR);
+ if (rc < 0)
+ return rc;
+ behavior = (rc & LED_COMBINE_DIS_MASK(index)) >> index;
- vsc8584_get_base_addr(phydev);
- devm_phy_package_join(&phydev->mdio.dev, phydev, vsc8531->base_addr,
- sizeof(struct vsc85xx_shared_private));
+ switch (mode) {
+ case VSC8531_LINK_ACTIVITY:
+ case VSC8531_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
- vsc8531->nleds = 4;
- vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc8584_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
- if (!vsc8531->stats)
- return -ENOMEM;
+ case VSC8531_LINK_1000_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
- if (phy_package_probe_once(phydev)) {
- ret = vsc8584_ptp_probe_once(phydev);
- if (ret)
- return ret;
+ case VSC8531_LINK_100_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_10_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_100_1000_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_10_1000_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_10_100_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ default:
+ *rules = 0;
+ break;
}
- ret = vsc8584_ptp_probe(phydev);
- if (ret)
+ if (!behavior && *rules)
+ *rules |= BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+}
+
+static int vsc85xx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u8 mode = VSC8531_FORCE_LED_ON;
+ bool combine_disable = false;
+ bool has_rx, has_tx;
+ int ret;
+
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK))
+ mode = VSC8531_LINK_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = VSC8531_LINK_10_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = VSC8531_LINK_100_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = VSC8531_LINK_1000_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100) &&
+ rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = VSC8531_LINK_100_1000_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10) &&
+ rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = VSC8531_LINK_10_1000_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10) &&
+ rules & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = VSC8531_LINK_10_100_ACTIVITY;
+
+ /*
+ * The VSC85xx PHYs provides an option to control LED behavior. By
+ * default, the LEDx combine function is enabled, meaning the LED
+ * will be on when there is link/activity or duplex/collision. If
+ * the combine function is disabled, the LED will be on only for
+ * link or duplex.
+ *
+ * To control this behavior, we check the selected rules. If both
+ * RX and TX activity are not selected, the LED combine function
+ * is disabled; otherwise, it remains enabled.
+ */
+ has_rx = !!(rules & BIT(TRIGGER_NETDEV_RX));
+ has_tx = !!(rules & BIT(TRIGGER_NETDEV_TX));
+ if (!has_rx && !has_tx)
+ combine_disable = true;
+
+ ret = vsc85xx_led_combine_disable_set(phydev, index, combine_disable);
+ if (ret < 0)
return ret;
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ return vsc85xx_led_cntl_set(phydev, index, mode);
}
-static int vsc85xx_probe(struct phy_device *phydev)
+static int vsc8514_probe(struct phy_device *phydev)
{
- struct vsc8531_private *vsc8531;
- int rate_magic;
- u32 default_mode[2] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY};
+ static const struct vsc85xx_probe_config vsc8514_cfg = {
+ .nleds = 4,
+ .supp_led_modes = VSC85XX_SUPP_LED_MODES,
+ .hw_stats = vsc85xx_hw_stats,
+ .nstats = ARRAY_SIZE(vsc85xx_hw_stats),
+ .use_package = true,
+ .shared_size = 0,
+ .has_ptp = false,
+ .check_rate_magic = false,
+ };
- rate_magic = vsc85xx_edge_rate_magic_get(phydev);
- if (rate_magic < 0)
- return rate_magic;
+ return vsc85xx_probe_common(phydev, &vsc8514_cfg, vsc85xx_default_led_modes_4);
+}
- vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
- if (!vsc8531)
- return -ENOMEM;
+static int vsc8574_probe(struct phy_device *phydev)
+{
+ static const struct vsc85xx_probe_config vsc8574_cfg = {
+ .nleds = 4,
+ .supp_led_modes = VSC8584_SUPP_LED_MODES,
+ .hw_stats = vsc8584_hw_stats,
+ .nstats = ARRAY_SIZE(vsc8584_hw_stats),
+ .use_package = true,
+ .shared_size = 0,
+ .has_ptp = false,
+ .check_rate_magic = false,
+ };
- phydev->priv = vsc8531;
+ return vsc85xx_probe_common(phydev, &vsc8574_cfg, vsc85xx_default_led_modes_4);
+}
- vsc8531->rate_magic = rate_magic;
- vsc8531->nleds = 2;
- vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc85xx_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
- vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
- if (!vsc8531->stats)
- return -ENOMEM;
+static int vsc8584_probe(struct phy_device *phydev)
+{
+ static const struct vsc85xx_probe_config vsc8584_cfg = {
+ .nleds = 4,
+ .supp_led_modes = VSC8584_SUPP_LED_MODES,
+ .hw_stats = vsc8584_hw_stats,
+ .nstats = ARRAY_SIZE(vsc8584_hw_stats),
+ .use_package = true,
+ .shared_size = sizeof(struct vsc85xx_shared_private),
+ .has_ptp = true,
+ .check_rate_magic = false,
+ };
+
+ return vsc85xx_probe_common(phydev, &vsc8584_cfg, vsc85xx_default_led_modes_4);
+}
+
+static int vsc85xx_probe(struct phy_device *phydev)
+{
+ static const struct vsc85xx_probe_config vsc85xx_cfg = {
+ .nleds = 2,
+ .supp_led_modes = VSC85XX_SUPP_LED_MODES,
+ .hw_stats = vsc85xx_hw_stats,
+ .nstats = ARRAY_SIZE(vsc85xx_hw_stats),
+ .use_package = false,
+ .has_ptp = false,
+ .check_rate_magic = true,
+ };
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ return vsc85xx_probe_common(phydev, &vsc85xx_cfg, vsc85xx_default_led_modes_4);
}
static void vsc85xx_remove(struct phy_device *phydev)
@@ -2376,6 +2569,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8502,
@@ -2400,6 +2597,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8504,
@@ -2427,6 +2628,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8514,
@@ -2452,6 +2657,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8530,
@@ -2476,6 +2685,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8531,
@@ -2500,6 +2713,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8540,
@@ -2524,6 +2741,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -2548,6 +2769,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8552,
@@ -2574,6 +2799,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_VSC856X),
@@ -2597,6 +2826,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8572,
@@ -2625,6 +2858,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8574,
@@ -2653,6 +2890,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_VSC8575),
@@ -2678,6 +2919,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_VSC8582),
@@ -2703,6 +2948,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_VSC8584),
@@ -2729,6 +2978,10 @@ static struct phy_driver vsc85xx_driver[] = {
.link_change_notify = &vsc85xx_link_change_notify,
.inband_caps = vsc85xx_inband_caps,
.config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
}
};
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index d692df7d975c..4865eac74b0e 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1051,9 +1051,21 @@ static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
val);
}
-static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int vsc85xx_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+
+ cfg->tx_type = vsc8531->ptp->tx_type;
+ cfg->rx_filter = vsc8531->ptp->rx_filter;
+
+ return 0;
+}
+
+static int vsc85xx_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
@@ -1611,7 +1623,8 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
- vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
+ vsc8531->mii_ts.hwtstamp_set = vsc85xx_hwtstamp_set;
+ vsc8531->mii_ts.hwtstamp_get = vsc85xx_hwtstamp_get;
vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
phydev->mii_ts = &vsc8531->mii_ts;
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 0c8dc16ee7bd..8e2fd6b942b6 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -30,6 +30,9 @@
#define PHY_ID_GPY241B 0x67C9DE40
#define PHY_ID_GPY241BM 0x67C9DE80
#define PHY_ID_GPY245B 0x67C9DEC0
+#define PHY_ID_MXL86211C 0xC1335400
+#define PHY_ID_MXL86252 0xC1335520
+#define PHY_ID_MXL86282 0xC1335500
#define PHY_CTL1 0x13
#define PHY_CTL1_MDICD BIT(3)
@@ -199,6 +202,29 @@ static int gpy_hwmon_read(struct device *dev,
return 0;
}
+static int mxl862x2_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ long tmp;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_TEMP_STA);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENODATA;
+
+ tmp = (s16)ret;
+ tmp *= 78125;
+ tmp /= 10000;
+
+ *value = tmp;
+
+ return 0;
+}
+
static umode_t gpy_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
u32 attr, int channel)
@@ -216,19 +242,35 @@ static const struct hwmon_ops gpy_hwmon_hwmon_ops = {
.read = gpy_hwmon_read,
};
+static const struct hwmon_ops mxl862x2_hwmon_hwmon_ops = {
+ .is_visible = gpy_hwmon_is_visible,
+ .read = mxl862x2_hwmon_read,
+};
+
static const struct hwmon_chip_info gpy_hwmon_chip_info = {
.ops = &gpy_hwmon_hwmon_ops,
.info = gpy_hwmon_info,
};
+static const struct hwmon_chip_info mxl862x2_hwmon_chip_info = {
+ .ops = &mxl862x2_hwmon_hwmon_ops,
+ .info = gpy_hwmon_info,
+};
+
static int gpy_hwmon_register(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
+ const struct hwmon_chip_info *info;
struct device *hwmon_dev;
+ if (phy_id_compare_model(phydev->phy_id, PHY_ID_MXL86252) ||
+ phy_id_compare_model(phydev->phy_id, PHY_ID_MXL86282))
+ info = &mxl862x2_hwmon_chip_info;
+ else
+ info = &gpy_hwmon_chip_info;
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
- &gpy_hwmon_chip_info,
- NULL);
+ info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -540,7 +582,7 @@ static int gpy_update_interface(struct phy_device *phydev)
/* Interface mode is fixed for USXGMII and integrated PHY */
if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
+ return 0;
/* Automatically switch SERDES interface between SGMII and 2500-BaseX
* according to speed. Disable ANEG in 2500-BaseX mode.
@@ -578,13 +620,7 @@ static int gpy_update_interface(struct phy_device *phydev)
break;
}
- if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
- ret = genphy_read_master_slave(phydev);
- if (ret < 0)
- return ret;
- }
-
- return gpy_update_mdix(phydev);
+ return 0;
}
static int gpy_read_status(struct phy_device *phydev)
@@ -639,6 +675,16 @@ static int gpy_read_status(struct phy_device *phydev)
ret = gpy_update_interface(phydev);
if (ret < 0)
return ret;
+
+ if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = gpy_update_mdix(phydev);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -1268,6 +1314,72 @@ static struct phy_driver gpy_drivers[] = {
.get_wol = gpy_get_wol,
.set_loopback = gpy_loopback,
},
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_MXL86211C),
+ .name = "Maxlinear Ethernet MxL86211C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ .led_brightness_set = gpy_led_brightness_set,
+ .led_hw_is_supported = gpy_led_hw_is_supported,
+ .led_hw_control_get = gpy_led_hw_control_get,
+ .led_hw_control_set = gpy_led_hw_control_set,
+ .led_polarity_set = gpy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_MXL86252),
+ .name = "MaxLinear Ethernet MxL86252",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ .led_brightness_set = gpy_led_brightness_set,
+ .led_hw_is_supported = gpy_led_hw_is_supported,
+ .led_hw_control_get = gpy_led_hw_control_get,
+ .led_hw_control_set = gpy_led_hw_control_set,
+ .led_polarity_set = gpy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_MXL86282),
+ .name = "MaxLinear Ethernet MxL86282",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ .led_brightness_set = gpy_led_brightness_set,
+ .led_hw_is_supported = gpy_led_hw_is_supported,
+ .led_hw_control_get = gpy_led_hw_control_get,
+ .led_hw_control_set = gpy_led_hw_control_set,
+ .led_polarity_set = gpy_led_polarity_set,
+ },
};
module_phy_driver(gpy_drivers);
@@ -1284,6 +1396,9 @@ static const struct mdio_device_id __maybe_unused gpy_tbl[] = {
{PHY_ID_MATCH_MODEL(PHY_ID_GPY241B)},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM)},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY245B)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_MXL86211C)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_MXL86252)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_MXL86282)},
{ }
};
MODULE_DEVICE_TABLE(mdio, gpy_tbl);
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 87adb6508017..f526528d2e32 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1012,9 +1012,22 @@ static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
return true;
}
-static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int nxp_c45_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
+ mii_ts);
+
+ cfg->tx_type = priv->hwts_tx;
+ cfg->rx_filter = priv->hwts_rx ? HWTSTAMP_FILTER_PTP_V2_L2_EVENT
+ : HWTSTAMP_FILTER_NONE;
+
+ return 0;
+}
+
+static int nxp_c45_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
mii_ts);
@@ -1749,7 +1762,8 @@ static int nxp_c45_probe(struct phy_device *phydev)
IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
priv->mii_ts.txtstamp = nxp_c45_txtstamp;
- priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
+ priv->mii_ts.hwtstamp_set = nxp_c45_hwtstamp_set;
+ priv->mii_ts.hwtstamp_get = nxp_c45_hwtstamp_get;
priv->mii_ts.ts_info = nxp_c45_ts_info;
phydev->mii_ts = &priv->mii_ts;
ret = nxp_c45_init_ptp_clock(priv);
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index e8e5be4684ab..f5e23b53994f 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -148,12 +148,12 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
ctrl2 |= MDIO_PMA_CTRL2_1000BT;
break;
case SPEED_2500:
- ctrl1 |= MDIO_CTRL1_SPEED2_5G;
+ ctrl1 |= MDIO_PMA_CTRL1_SPEED2_5G;
/* Assume 2.5Gbase-T */
ctrl2 |= MDIO_PMA_CTRL2_2_5GBT;
break;
case SPEED_5000:
- ctrl1 |= MDIO_CTRL1_SPEED5G;
+ ctrl1 |= MDIO_PMA_CTRL1_SPEED5G;
/* Assume 5Gbase-T */
ctrl2 |= MDIO_PMA_CTRL2_5GBT;
break;
@@ -618,10 +618,10 @@ int genphy_c45_read_pma(struct phy_device *phydev)
case MDIO_PMA_CTRL1_SPEED1000:
phydev->speed = SPEED_1000;
break;
- case MDIO_CTRL1_SPEED2_5G:
+ case MDIO_PMA_CTRL1_SPEED2_5G:
phydev->speed = SPEED_2500;
break;
- case MDIO_CTRL1_SPEED5G:
+ case MDIO_PMA_CTRL1_SPEED5G:
phydev->speed = SPEED_5000;
break;
case MDIO_CTRL1_SPEED10G:
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
index b7f0c6a3037a..4951a39f3828 100644
--- a/drivers/net/phy/phy-caps.h
+++ b/drivers/net/phy/phy-caps.h
@@ -29,6 +29,7 @@ enum {
LINK_CAPA_200000FD,
LINK_CAPA_400000FD,
LINK_CAPA_800000FD,
+ LINK_CAPA_1600000FD,
__LINK_CAPA_MAX,
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 605ca20ae192..277c034bc32f 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -17,7 +17,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 121,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 125,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -55,6 +55,8 @@ const char *phy_speed_to_str(int speed)
return "400Gbps";
case SPEED_800000:
return "800Gbps";
+ case SPEED_1600000:
+ return "1600Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -102,6 +104,49 @@ const char *phy_rate_matching_to_str(int rate_matching)
EXPORT_SYMBOL_GPL(phy_rate_matching_to_str);
/**
+ * phy_fix_phy_mode_for_mac_delays - Convenience function for fixing PHY
+ * mode based on whether mac adds internal delay
+ *
+ * @interface: The current interface mode of the port
+ * @mac_txid: True if the mac adds internal tx delay
+ * @mac_rxid: True if the mac adds internal rx delay
+ *
+ * Return: fixed PHY mode, or PHY_INTERFACE_MODE_NA if the interface can
+ * not apply the internal delay
+ */
+phy_interface_t phy_fix_phy_mode_for_mac_delays(phy_interface_t interface,
+ bool mac_txid, bool mac_rxid)
+{
+ if (!phy_interface_mode_is_rgmii(interface))
+ return interface;
+
+ if (mac_txid && mac_rxid) {
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_NA;
+ }
+
+ if (mac_txid) {
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_RXID;
+ if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_NA;
+ }
+
+ if (mac_rxid) {
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_TXID;
+ if (interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_NA;
+ }
+
+ return interface;
+}
+EXPORT_SYMBOL_GPL(phy_fix_phy_mode_for_mac_delays);
+
+/**
* phy_interface_num_ports - Return the number of links that can be carried by
* a given MAC-PHY physical link. Returns 0 if this is
* unknown, the number of links else.
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 02da4a203ddd..13dd1691886d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -405,12 +405,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->mii_ts && phydev->mii_ts->hwtstamp) {
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp_set) {
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
hwtstamp_config_to_kernel(&kernel_cfg, &cfg);
- ret = phydev->mii_ts->hwtstamp(phydev->mii_ts, &kernel_cfg, &extack);
+ ret = phydev->mii_ts->hwtstamp_set(phydev->mii_ts,
+ &kernel_cfg,
+ &extack);
if (ret)
return ret;
@@ -476,6 +478,9 @@ int __phy_hwtstamp_get(struct phy_device *phydev,
if (!phydev)
return -ENODEV;
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp_get)
+ return phydev->mii_ts->hwtstamp_get(phydev->mii_ts, config);
+
return -EOPNOTSUPP;
}
@@ -493,8 +498,9 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
if (!phydev)
return -ENODEV;
- if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, config, extack);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp_set)
+ return phydev->mii_ts->hwtstamp_set(phydev->mii_ts, config,
+ extack);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 23c808b59b6f..3a05982b39bf 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -25,6 +25,7 @@ static struct link_capabilities link_caps[__LINK_CAPA_MAX] __ro_after_init = {
{ SPEED_200000, DUPLEX_FULL, {0} }, /* LINK_CAPA_200000FD */
{ SPEED_400000, DUPLEX_FULL, {0} }, /* LINK_CAPA_400000FD */
{ SPEED_800000, DUPLEX_FULL, {0} }, /* LINK_CAPA_800000FD */
+ { SPEED_1600000, DUPLEX_FULL, {0} }, /* LINK_CAPA_1600000FD */
};
static int speed_duplex_to_capa(int speed, unsigned int duplex)
@@ -52,6 +53,7 @@ static int speed_duplex_to_capa(int speed, unsigned int duplex)
case SPEED_200000: return LINK_CAPA_200000FD;
case SPEED_400000: return LINK_CAPA_400000FD;
case SPEED_800000: return LINK_CAPA_800000FD;
+ case SPEED_1600000: return LINK_CAPA_1600000FD;
}
return -EINVAL;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6e1243bf68aa..43d8380aaefb 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -640,6 +640,9 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
static void phylink_fill_fixedlink_supported(unsigned long *supported)
{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 417f9a88aab6..67ecf3d4af2b 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -90,6 +90,14 @@
#define RTL8211F_LEDCR_MASK GENMASK(4, 0)
#define RTL8211F_LEDCR_SHIFT 5
+/* RTL8211F(D)(I)-VD-CG CLKOUT configuration is specified via magic values
+ * to undocumented register pages. The names here do not reflect the datasheet.
+ * Unlike other PHY models, CLKOUT configuration does not go through PHYCR2.
+ */
+#define RTL8211FVD_CLKOUT_PAGE 0xd05
+#define RTL8211FVD_CLKOUT_REG 0x11
+#define RTL8211FVD_CLKOUT_EN BIT(8)
+
/* RTL8211F RGMII configuration */
#define RTL8211F_RGMII_PAGE 0xd08
@@ -193,9 +201,8 @@ MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
struct rtl821x_priv {
- u16 phycr1;
- u16 phycr2;
- bool has_phycr2;
+ bool enable_aldps;
+ bool disable_clk_out;
struct clk *clk;
/* rtl8211f */
u16 iner;
@@ -245,8 +252,6 @@ static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct rtl821x_priv *priv;
- u32 phy_id = phydev->drv->phy_id;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -257,24 +262,10 @@ static int rtl821x_probe(struct phy_device *phydev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get phy clock\n");
- ret = phy_read_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1);
- if (ret < 0)
- return ret;
-
- priv->phycr1 = ret & (RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF);
- if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
- priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
-
- priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
- if (priv->has_phycr2) {
- ret = phy_read_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2);
- if (ret < 0)
- return ret;
-
- priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
- if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
- priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
- }
+ priv->enable_aldps = of_property_read_bool(dev->of_node,
+ "realtek,aldps-enable");
+ priv->disable_clk_out = of_property_read_bool(dev->of_node,
+ "realtek,clkout-disable");
phydev->priv = priv;
@@ -587,22 +578,11 @@ static int rtl8211c_config_init(struct phy_device *phydev)
CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
}
-static int rtl8211f_config_init(struct phy_device *phydev)
+static int rtl8211f_config_rgmii_delay(struct phy_device *phydev)
{
- struct rtl821x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
int ret;
- ret = phy_modify_paged_changed(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1,
- RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
- priv->phycr1);
- if (ret < 0) {
- dev_err(dev, "aldps mode configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
val_txdly = 0;
@@ -632,53 +612,118 @@ static int rtl8211f_config_init(struct phy_device *phydev)
RTL8211F_TXCR, RTL8211F_TX_DELAY,
val_txdly);
if (ret < 0) {
- dev_err(dev, "Failed to update the TX delay register\n");
+ phydev_err(phydev, "Failed to update the TX delay register: %pe\n",
+ ERR_PTR(ret));
return ret;
} else if (ret) {
- dev_dbg(dev,
- "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
- str_enable_disable(val_txdly));
+ phydev_dbg(phydev,
+ "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
+ str_enable_disable(val_txdly));
} else {
- dev_dbg(dev,
- "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
- str_enabled_disabled(val_txdly));
+ phydev_dbg(phydev,
+ "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
+ str_enabled_disabled(val_txdly));
}
ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
RTL8211F_RXCR, RTL8211F_RX_DELAY,
val_rxdly);
if (ret < 0) {
- dev_err(dev, "Failed to update the RX delay register\n");
+ phydev_err(phydev, "Failed to update the RX delay register: %pe\n",
+ ERR_PTR(ret));
return ret;
} else if (ret) {
- dev_dbg(dev,
- "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
- str_enable_disable(val_rxdly));
+ phydev_dbg(phydev,
+ "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
+ str_enable_disable(val_rxdly));
} else {
- dev_dbg(dev,
- "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
- str_enabled_disabled(val_rxdly));
+ phydev_dbg(phydev,
+ "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
+ str_enabled_disabled(val_rxdly));
}
- if (!priv->has_phycr2)
+ return 0;
+}
+
+static int rtl8211f_config_clk_out(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ int ret;
+
+ /* The value is preserved if the device tree property is absent */
+ if (!priv->disable_clk_out)
+ return 0;
+
+ if (phydev->drv->phy_id == RTL_8211FVD_PHYID)
+ ret = phy_modify_paged(phydev, RTL8211FVD_CLKOUT_PAGE,
+ RTL8211FVD_CLKOUT_REG,
+ RTL8211FVD_CLKOUT_EN, 0);
+ else
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
+ RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN, 0);
+ if (ret)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+/* Advance Link Down Power Saving (ALDPS) mode changes crystal/clock behaviour,
+ * which causes the RXC clock signal to stop for tens to hundreds of
+ * milliseconds.
+ *
+ * Some MACs need the RXC clock to support their internal RX logic, so ALDPS is
+ * only enabled based on an opt-in device tree property.
+ */
+static int rtl8211f_config_aldps(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ u16 mask = RTL8211F_ALDPS_PLL_OFF |
+ RTL8211F_ALDPS_ENABLE |
+ RTL8211F_ALDPS_XTAL_OFF;
+
+ /* The value is preserved if the device tree property is absent */
+ if (!priv->enable_aldps)
+ return 0;
+
+ return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1,
+ mask, mask);
+}
+
+static int rtl8211f_config_phy_eee(struct phy_device *phydev)
+{
+ /* RTL8211FVD has no PHYCR2 register */
+ if (phydev->drv->phy_id == RTL_8211FVD_PHYID)
return 0;
/* Disable PHY-mode EEE so LPI is passed to the MAC */
- ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
- RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
+ return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
+ RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
+}
+
+static int rtl8211f_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+
+ ret = rtl8211f_config_aldps(phydev);
+ if (ret) {
+ dev_err(dev, "aldps mode configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = rtl8211f_config_rgmii_delay(phydev);
if (ret)
return ret;
- ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
- RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN,
- priv->phycr2);
- if (ret < 0) {
+ ret = rtl8211f_config_clk_out(phydev);
+ if (ret) {
dev_err(dev, "clkout configuration failed: %pe\n",
ERR_PTR(ret));
return ret;
}
- return genphy_soft_reset(phydev);
+ return rtl8211f_config_phy_eee(phydev);
}
static int rtl821x_suspend(struct phy_device *phydev)
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index 29dc04c299a3..0a41d2b45d8c 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -1134,10 +1134,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EPERM;
}
- err = team_dev_type_check_change(dev, port_dev);
- if (err)
- return err;
-
if (port_dev->flags & IFF_UP) {
NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
@@ -1155,10 +1151,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
INIT_LIST_HEAD(&port->qom_list);
port->orig.mtu = port_dev->mtu;
- err = dev_set_mtu(port_dev, dev->mtu);
- if (err) {
- netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
- goto err_set_mtu;
+ /*
+ * MTU assignment will be handled in team_dev_type_check_change
+ * if dev and port_dev are of different types
+ */
+ if (dev->type == port_dev->type) {
+ err = dev_set_mtu(port_dev, dev->mtu);
+ if (err) {
+ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+ goto err_set_mtu;
+ }
}
memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
@@ -1233,6 +1235,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
+ err = team_dev_type_check_change(dev, port_dev);
+ if (err)
+ goto err_set_dev_type;
+
if (dev->flags & IFF_UP) {
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
@@ -1251,6 +1257,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return 0;
+err_set_dev_type:
err_set_slave_promisc:
__team_option_inst_del_port(team, port);
diff --git a/drivers/net/team/team_nl.c b/drivers/net/team/team_nl.c
index 208424ab78f5..6db21725f9cc 100644
--- a/drivers/net/team/team_nl.c
+++ b/drivers/net/team/team_nl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/team.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/net/team/team_nl.h b/drivers/net/team/team_nl.h
index c9ec1b22ac4d..74816b193475 100644
--- a/drivers/net/team/team_nl.h
+++ b/drivers/net/team/team_nl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/team.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_TEAM_GEN_H
#define _LINUX_TEAM_GEN_H
diff --git a/drivers/net/tun_vnet.h b/drivers/net/tun_vnet.h
index 81662328b2c7..a5f93b6c4482 100644
--- a/drivers/net/tun_vnet.h
+++ b/drivers/net/tun_vnet.h
@@ -244,7 +244,7 @@ tun_vnet_hdr_tnl_from_skb(unsigned int flags,
if (virtio_net_hdr_tnl_from_skb(skb, tnl_hdr, has_tnl_offload,
tun_vnet_is_little_endian(flags),
- vlan_hlen)) {
+ vlan_hlen, true)) {
struct virtio_net_hdr_v1 *hdr = &tnl_hdr->hash_hdr.hdr;
struct skb_shared_info *sinfo = skb_shinfo(skb);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3d10cf791c51..1d9faa70ba3b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -142,16 +142,16 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
if (alt->desc.bAlternateSetting != 0 ||
!(dev->driver_info->flags & FLAG_NO_SETINT)) {
- tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
- alt->desc.bAlternateSetting);
+ tmp = usb_set_interface(dev->udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
if (tmp < 0)
return tmp;
}
- dev->in = usb_rcvbulkpipe (dev->udev,
- in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->out = usb_sndbulkpipe (dev->udev,
- out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
dev->status = status;
return 0;
}
@@ -163,7 +163,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
int tmp = -1, ret;
unsigned char buf [13];
- ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
+ ret = usb_string(dev->udev, iMACAddress, buf, sizeof(buf));
if (ret == 12)
tmp = hex2bin(addr, buf, 6);
if (tmp < 0) {
@@ -215,7 +215,7 @@ static void intr_complete(struct urb *urb)
break;
}
- status = usb_submit_urb (urb, GFP_ATOMIC);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status != 0)
netif_err(dev, timer, dev->net,
"intr resubmit --> %d\n", status);
@@ -231,24 +231,24 @@ static int init_status(struct usbnet *dev, struct usb_interface *intf)
if (!dev->driver_info->status)
return 0;
- pipe = usb_rcvintpipe (dev->udev,
- dev->status->desc.bEndpointAddress
- & USB_ENDPOINT_NUMBER_MASK);
+ pipe = usb_rcvintpipe(dev->udev,
+ dev->status->desc.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK);
maxp = usb_maxpacket(dev->udev, pipe);
/* avoid 1 msec chatter: min 8 msec poll rate */
period = max ((int) dev->status->desc.bInterval,
(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
- buf = kmalloc (maxp, GFP_KERNEL);
+ buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
- dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
+ dev->interrupt = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt) {
- kfree (buf);
+ kfree(buf);
return -ENOMEM;
} else {
usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
- buf, maxp, intr_complete, dev, period);
+ buf, maxp, intr_complete, dev, period);
dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
dev_dbg(&intf->dev,
"status ep%din, %d bytes period %d\n",
@@ -339,7 +339,7 @@ void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb)
/* only update if unset to allow minidriver rx_fixup override */
if (skb->protocol == 0)
- skb->protocol = eth_type_trans (skb, dev->net);
+ skb->protocol = eth_type_trans(skb, dev->net);
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
u64_stats_inc(&stats64->rx_packets);
@@ -347,8 +347,8 @@ void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb)
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
- skb->len + sizeof (struct ethhdr), skb->protocol);
- memset (skb->cb, 0, sizeof (struct skb_data));
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+ memset(skb->cb, 0, sizeof(struct skb_data));
if (skb_defer_rx_timestamp(skb))
return;
@@ -511,8 +511,8 @@ static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
- usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
- usb_free_urb (urb);
+ usbnet_defer_kevent(dev, EVENT_RX_MEMORY);
+ usb_free_urb(urb);
return -ENOMEM;
}
@@ -521,27 +521,27 @@ static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
entry->dev = dev;
entry->length = 0;
- usb_fill_bulk_urb (urb, dev->udev, dev->in,
- skb->data, size, rx_complete, skb);
+ usb_fill_bulk_urb(urb, dev->udev, dev->in,
+ skb->data, size, rx_complete, skb);
- spin_lock_irqsave (&dev->rxq.lock, lockflags);
+ spin_lock_irqsave(&dev->rxq.lock, lockflags);
- if (netif_running (dev->net) &&
- netif_device_present (dev->net) &&
+ if (netif_running(dev->net) &&
+ netif_device_present(dev->net) &&
test_bit(EVENT_DEV_OPEN, &dev->flags) &&
- !test_bit (EVENT_RX_HALT, &dev->flags) &&
- !test_bit (EVENT_DEV_ASLEEP, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ !test_bit(EVENT_DEV_ASLEEP, &dev->flags) &&
!usbnet_going_away(dev)) {
- switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
+ switch (retval = usb_submit_urb(urb, GFP_ATOMIC)) {
case -EPIPE:
- usbnet_defer_kevent (dev, EVENT_RX_HALT);
+ usbnet_defer_kevent(dev, EVENT_RX_HALT);
break;
case -ENOMEM:
- usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
+ usbnet_defer_kevent(dev, EVENT_RX_MEMORY);
break;
case -ENODEV:
netif_dbg(dev, ifdown, dev->net, "device gone\n");
- netif_device_detach (dev->net);
+ netif_device_detach(dev->net);
break;
case -EHOSTUNREACH:
retval = -ENOLINK;
@@ -558,10 +558,10 @@ static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
retval = -ENOLINK;
}
- spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
+ spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
if (retval) {
- dev_kfree_skb_any (skb);
- usb_free_urb (urb);
+ dev_kfree_skb_any(skb);
+ usb_free_urb(urb);
}
return retval;
}
@@ -572,7 +572,7 @@ static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
static inline int rx_process(struct usbnet *dev, struct sk_buff *skb)
{
if (dev->driver_info->rx_fixup &&
- !dev->driver_info->rx_fixup (dev, skb)) {
+ !dev->driver_info->rx_fixup(dev, skb)) {
/* With RX_ASSEMBLE, rx_fixup() must update counters */
if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
dev->net->stats.rx_errors++;
@@ -605,7 +605,7 @@ static void rx_complete(struct urb *urb)
int urb_status = urb->status;
enum skb_state state;
- skb_put (skb, urb->actual_length);
+ skb_put(skb, urb->actual_length);
state = rx_done;
entry->urb = NULL;
@@ -621,7 +621,7 @@ static void rx_complete(struct urb *urb)
*/
case -EPIPE:
dev->net->stats.rx_errors++;
- usbnet_defer_kevent (dev, EVENT_RX_HALT);
+ usbnet_defer_kevent(dev, EVENT_RX_HALT);
fallthrough;
/* software-driven interface shutdown */
@@ -639,8 +639,8 @@ static void rx_complete(struct urb *urb)
case -ETIME:
case -EILSEQ:
dev->net->stats.rx_errors++;
- if (!timer_pending (&dev->delay)) {
- mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
+ if (!timer_pending(&dev->delay)) {
+ mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES);
netif_dbg(dev, link, dev->net,
"rx throttle %d\n", urb_status);
}
@@ -676,14 +676,14 @@ block:
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
- if (netif_running (dev->net) &&
- !test_bit (EVENT_RX_HALT, &dev->flags) &&
+ if (netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
state != unlink_start) {
- rx_submit (dev, urb, GFP_ATOMIC);
+ rx_submit(dev, urb, GFP_ATOMIC);
usb_mark_last_busy(dev->udev);
return;
}
- usb_free_urb (urb);
+ usb_free_urb(urb);
}
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
@@ -761,7 +761,7 @@ found:
spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
- retval = usb_unlink_urb (urb);
+ retval = usb_unlink_urb(urb);
if (retval != -EINPROGRESS && retval != 0)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
@@ -769,7 +769,7 @@ found:
usb_put_urb(urb);
spin_lock_irqsave(&q->lock, flags);
}
- spin_unlock_irqrestore (&q->lock, flags);
+ spin_unlock_irqrestore(&q->lock, flags);
return count;
}
@@ -830,7 +830,7 @@ int usbnet_stop(struct net_device *net)
int retval, pm, mpn;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
- netif_stop_queue (net);
+ netif_stop_queue(net);
netdev_reset_queue(net);
netif_info(dev, ifdown, dev->net,
@@ -910,23 +910,29 @@ int usbnet_open(struct net_device *net)
}
// put into "known safe" state
- if (info->reset && (retval = info->reset (dev)) < 0) {
- netif_info(dev, ifup, dev->net,
- "open reset fail (%d) usbnet usb-%s-%s, %s\n",
- retval,
- dev->udev->bus->bus_name,
- dev->udev->devpath,
- info->description);
- goto done;
+ if (info->reset) {
+ retval = info->reset(dev);
+ if (retval < 0) {
+ netif_info(dev, ifup, dev->net,
+ "open reset fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
+ goto done;
+ }
}
/* hard_mtu or rx_urb_size may change in reset() */
usbnet_update_max_qlen(dev);
// insist peer be connected
- if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
- netif_err(dev, ifup, dev->net, "can't open; %d\n", retval);
- goto done;
+ if (info->check_connect) {
+ retval = info->check_connect(dev);
+ if (retval < 0) {
+ netif_err(dev, ifup, dev->net, "can't open; %d\n", retval);
+ goto done;
+ }
}
/* start any status interrupt transfer */
@@ -1056,7 +1062,7 @@ u32 usbnet_get_link(struct net_device *net)
/* If a check_connect is defined, return its result */
if (dev->driver_info->check_connect)
- return dev->driver_info->check_connect (dev) == 0;
+ return dev->driver_info->check_connect(dev) == 0;
/* if the device has mii operations, use those */
if (dev->mii.mdio_read)
@@ -1085,7 +1091,7 @@ void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
strscpy(info->driver, dev->driver_name, sizeof(info->driver));
strscpy(info->fw_version, dev->driver_info->description,
sizeof(info->fw_version));
- usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
@@ -1175,64 +1181,64 @@ usbnet_deferred_kevent(struct work_struct *work)
int status;
/* usb_clear_halt() needs a thread context */
- if (test_bit (EVENT_TX_HALT, &dev->flags)) {
- unlink_urbs (dev, &dev->txq);
+ if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->txq);
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_pipe;
- status = usb_clear_halt (dev->udev, dev->out);
+ status = usb_clear_halt(dev->udev, dev->out);
usb_autopm_put_interface(dev->intf);
if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) {
- if (netif_msg_tx_err (dev))
+ if (netif_msg_tx_err(dev))
fail_pipe:
netdev_err(dev->net, "can't clear tx halt, status %d\n",
status);
} else {
- clear_bit (EVENT_TX_HALT, &dev->flags);
+ clear_bit(EVENT_TX_HALT, &dev->flags);
if (status != -ESHUTDOWN)
- netif_wake_queue (dev->net);
+ netif_wake_queue(dev->net);
}
}
- if (test_bit (EVENT_RX_HALT, &dev->flags)) {
- unlink_urbs (dev, &dev->rxq);
+ if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->rxq);
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_halt;
- status = usb_clear_halt (dev->udev, dev->in);
+ status = usb_clear_halt(dev->udev, dev->in);
usb_autopm_put_interface(dev->intf);
if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) {
- if (netif_msg_rx_err (dev))
+ if (netif_msg_rx_err(dev))
fail_halt:
netdev_err(dev->net, "can't clear rx halt, status %d\n",
status);
} else {
- clear_bit (EVENT_RX_HALT, &dev->flags);
+ clear_bit(EVENT_RX_HALT, &dev->flags);
if (!usbnet_going_away(dev))
queue_work(system_bh_wq, &dev->bh_work);
}
}
/* work could resubmit itself forever if memory is tight */
- if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
+ if (test_bit(EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
int resched = 1;
- if (netif_running (dev->net))
- urb = usb_alloc_urb (0, GFP_KERNEL);
+ if (netif_running(dev->net))
+ urb = usb_alloc_urb(0, GFP_KERNEL);
else
- clear_bit (EVENT_RX_MEMORY, &dev->flags);
+ clear_bit(EVENT_RX_MEMORY, &dev->flags);
if (urb != NULL) {
- clear_bit (EVENT_RX_MEMORY, &dev->flags);
+ clear_bit(EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
if (status < 0) {
usb_free_urb(urb);
goto fail_lowmem;
}
- if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+ if (rx_submit(dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
fail_lowmem:
@@ -1246,7 +1252,7 @@ fail_lowmem:
const struct driver_info *info = dev->driver_info;
int retval = 0;
- clear_bit (EVENT_LINK_RESET, &dev->flags);
+ clear_bit(EVENT_LINK_RESET, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto skip_reset;
@@ -1266,10 +1272,10 @@ skip_reset:
__handle_link_change(dev);
}
- if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
+ if (test_bit(EVENT_LINK_CHANGE, &dev->flags))
__handle_link_change(dev);
- if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
+ if (test_bit(EVENT_SET_RX_MODE, &dev->flags))
__handle_set_rx_mode(dev);
@@ -1298,7 +1304,7 @@ static void tx_complete(struct urb *urb)
switch (urb->status) {
case -EPIPE:
- usbnet_defer_kevent (dev, EVENT_TX_HALT);
+ usbnet_defer_kevent(dev, EVENT_TX_HALT);
break;
/* software-driven interface shutdown */
@@ -1313,13 +1319,13 @@ static void tx_complete(struct urb *urb)
case -ETIME:
case -EILSEQ:
usb_mark_last_busy(dev->udev);
- if (!timer_pending (&dev->delay)) {
- mod_timer (&dev->delay,
- jiffies + THROTTLE_JIFFIES);
+ if (!timer_pending(&dev->delay)) {
+ mod_timer(&dev->delay,
+ jiffies + THROTTLE_JIFFIES);
netif_dbg(dev, link, dev->net,
"tx throttle %d\n", urb->status);
}
- netif_stop_queue (dev->net);
+ netif_stop_queue(dev->net);
break;
default:
netif_dbg(dev, tx_err, dev->net,
@@ -1338,7 +1344,7 @@ void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct usbnet *dev = netdev_priv(net);
- unlink_urbs (dev, &dev->txq);
+ unlink_urbs(dev, &dev->txq);
queue_work(system_bh_wq, &dev->bh_work);
/* this needs to be handled individually because the generic layer
* doesn't know what is sufficient and could not restore private
@@ -1400,7 +1406,7 @@ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
// some devices want funky USB-level framing, for
// win32 driver (usually) and/or hardware quirks
if (info->tx_fixup) {
- skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
+ skb = info->tx_fixup(dev, skb, GFP_ATOMIC);
if (!skb) {
/* packet collected; minidriver waiting for more */
if (info->flags & FLAG_MULTI_PACKET)
@@ -1410,7 +1416,8 @@ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
}
}
- if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
netif_dbg(dev, tx_err, dev->net, "no urb\n");
goto drop;
}
@@ -1419,8 +1426,8 @@ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
entry->urb = urb;
entry->dev = dev;
- usb_fill_bulk_urb (urb, dev->udev, dev->out,
- skb->data, skb->len, tx_complete, skb);
+ usb_fill_bulk_urb(urb, dev->udev, dev->out,
+ skb->data, skb->len, tx_complete, skb);
if (dev->can_dma_sg) {
if (build_dma_sg(skb, urb) < 0)
goto drop;
@@ -1490,8 +1497,8 @@ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
case -EPIPE:
- netif_stop_queue (net);
- usbnet_defer_kevent (dev, EVENT_TX_HALT);
+ netif_stop_queue(net);
+ usbnet_defer_kevent(dev, EVENT_TX_HALT);
usb_autopm_put_interface_async(dev->intf);
break;
default:
@@ -1506,7 +1513,7 @@ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
if (dev->txq.qlen >= TX_QLEN (dev))
netif_stop_queue (net);
}
- spin_unlock_irqrestore (&dev->txq.lock, flags);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
if (retval) {
netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
@@ -1514,7 +1521,7 @@ drop:
dev->net->stats.tx_dropped++;
not_drop:
if (skb)
- dev_kfree_skb_any (skb);
+ dev_kfree_skb_any(skb);
if (urb) {
kfree(urb->sg);
usb_free_urb(urb);
@@ -1625,7 +1632,7 @@ static void usbnet_bh(struct timer_list *t)
queue_work(system_bh_wq, &dev->bh_work);
}
if (dev->txq.qlen < TX_QLEN (dev))
- netif_wake_queue (dev->net);
+ netif_wake_queue(dev->net);
}
}
@@ -1658,7 +1665,7 @@ void usbnet_disconnect(struct usb_interface *intf)
return;
usbnet_mark_going_away(dev);
- xdev = interface_to_usbdev (intf);
+ xdev = interface_to_usbdev(intf);
netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
intf->dev.driver->name,
@@ -1666,7 +1673,7 @@ void usbnet_disconnect(struct usb_interface *intf)
dev->driver_info->description);
net = dev->net;
- unregister_netdev (net);
+ unregister_netdev(net);
cancel_work_sync(&dev->kevent);
@@ -1737,7 +1744,7 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
dev_dbg (&udev->dev, "blacklisted by %s\n", name);
return -ENODEV;
}
- xdev = interface_to_usbdev (udev);
+ xdev = interface_to_usbdev(udev);
interface = udev->cur_altsetting;
status = -ENOMEM;
@@ -1767,10 +1774,10 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init(&dev->rxq_pause);
spin_lock_init(&dev->bql_spinlock);
INIT_WORK(&dev->bh_work, usbnet_bh_work);
- INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
+ INIT_WORK(&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0);
- mutex_init (&dev->phy_mutex);
+ mutex_init(&dev->phy_mutex);
mutex_init(&dev->interrupt_mutex);
dev->interrupt_count = 0;
@@ -1792,7 +1799,7 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
// allow device-specific bind/init procedures
// NOTE net->name still not usable ...
if (info->bind) {
- status = info->bind (dev, udev);
+ status = info->bind(dev, udev);
if (status < 0)
goto out1;
@@ -1817,18 +1824,18 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
} else if (!info->in || !info->out)
- status = usbnet_get_endpoints (dev, udev);
+ status = usbnet_get_endpoints(dev, udev);
else {
u8 ep_addrs[3] = {
info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
};
- dev->in = usb_rcvbulkpipe (xdev, info->in);
- dev->out = usb_sndbulkpipe (xdev, info->out);
+ dev->in = usb_rcvbulkpipe(xdev, info->in);
+ dev->out = usb_sndbulkpipe(xdev, info->out);
if (!(info->flags & FLAG_NO_SETINT))
- status = usb_set_interface (xdev,
- interface->desc.bInterfaceNumber,
- interface->desc.bAlternateSetting);
+ status = usb_set_interface(xdev,
+ interface->desc.bInterfaceNumber,
+ interface->desc.bAlternateSetting);
else
status = 0;
@@ -1836,7 +1843,7 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
status = -EINVAL;
}
if (status >= 0 && dev->status)
- status = init_status (dev, udev);
+ status = init_status(dev, udev);
if (status < 0)
goto out3;
@@ -1870,7 +1877,7 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
}
}
- status = register_netdev (net);
+ status = register_netdev(net);
if (status)
goto out5;
netif_info(dev, probe, dev->net,
@@ -1881,9 +1888,9 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
net->dev_addr);
// ok, it's ready to go.
- usb_set_intfdata (udev, dev);
+ usb_set_intfdata(udev, dev);
- netif_device_attach (net);
+ netif_device_attach(net);
if (dev->driver_info->flags & FLAG_LINK_INTR)
usbnet_link_change(dev, 0, 0);
@@ -1896,7 +1903,7 @@ out4:
usb_free_urb(dev->interrupt);
out3:
if (info->unbind)
- info->unbind (dev, udev);
+ info->unbind(dev, udev);
out1:
/* subdrivers must undo all they did in bind() if they
* fail it, but we may fail later and a deferred kevent
@@ -1938,7 +1945,7 @@ int usbnet_suspend(struct usb_interface *intf, pm_message_t message)
* accelerate emptying of the rx and queues, to avoid
* having everything error out.
*/
- netif_device_detach (dev->net);
+ netif_device_detach(dev->net);
usbnet_terminate_urbs(dev);
__usbnet_status_stop_force(dev);
@@ -1946,7 +1953,7 @@ int usbnet_suspend(struct usb_interface *intf, pm_message_t message)
* reattach so runtime management can use and
* wake the device
*/
- netif_device_attach (dev->net);
+ netif_device_attach(dev->net);
}
return 0;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87a63c4bee77..14e6f2a2fb77 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -392,14 +392,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Restore Eth hdr pulled by dev_forward_skb/eth_type_trans */
__skb_push(skb, ETH_HLEN);
- /* Depend on prior success packets started NAPI consumer via
- * __veth_xdp_flush(). Cancel TXQ stop if consumer stopped,
- * paired with empty check in veth_poll().
- */
netif_tx_stop_queue(txq);
- smp_mb__after_atomic();
- if (unlikely(__ptr_ring_empty(&rq->xdp_ring)))
- netif_tx_wake_queue(txq);
+ /* Makes sure NAPI peer consumer runs. Consumer is responsible
+ * for starting txq again, until then ndo_start_xmit (this
+ * function) will not be invoked by the netstack again.
+ */
+ __veth_xdp_flush(rq);
break;
case NET_RX_DROP: /* same as NET_XMIT_DROP */
drop:
@@ -900,17 +898,9 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
- struct veth_priv *priv = netdev_priv(rq->dev);
- int queue_idx = rq->xdp_rxq.queue_index;
- struct netdev_queue *peer_txq;
- struct net_device *peer_dev;
int i, done = 0, n_xdpf = 0;
void *xdpf[VETH_XDP_BATCH];
- /* NAPI functions as RCU section */
- peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
- peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
-
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
@@ -959,9 +949,6 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
- if (peer_txq && unlikely(netif_tx_queue_stopped(peer_txq)))
- netif_tx_wake_queue(peer_txq);
-
return done;
}
@@ -969,17 +956,28 @@ static int veth_poll(struct napi_struct *napi, int budget)
{
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
+ struct veth_priv *priv = netdev_priv(rq->dev);
+ int queue_idx = rq->xdp_rxq.queue_index;
+ struct netdev_queue *peer_txq;
struct veth_stats stats = {};
+ struct net_device *peer_dev;
struct veth_xdp_tx_bq bq;
int done;
bq.count = 0;
+ /* NAPI functions as RCU section */
+ peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
+ peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
+
xdp_set_return_frame_no_direct();
done = veth_xdp_rcv(rq, budget, &bq, &stats);
if (stats.xdp_redirect > 0)
xdp_do_flush();
+ if (stats.xdp_tx > 0)
+ veth_xdp_flush(rq, &bq);
+ xdp_clear_return_frame_no_direct();
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
@@ -992,9 +990,12 @@ static int veth_poll(struct napi_struct *napi, int budget)
}
}
- if (stats.xdp_tx > 0)
- veth_xdp_flush(rq, &bq);
- xdp_clear_return_frame_no_direct();
+ /* Release backpressure per NAPI poll */
+ smp_rmb(); /* Paired with netif_tx_stop_queue set_bit */
+ if (peer_txq && netif_tx_queue_stopped(peer_txq)) {
+ txq_trans_cond_update(peer_txq);
+ netif_tx_wake_queue(peer_txq);
+ }
return done;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cfa006b88688..1bb3aeca66c6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -775,10 +775,26 @@ static bool virtqueue_napi_complete(struct napi_struct *napi,
return false;
}
+static void virtnet_tx_wake_queue(struct virtnet_info *vi,
+ struct send_queue *sq)
+{
+ unsigned int index = vq2txq(sq->vq);
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (netif_tx_queue_stopped(txq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
+ netif_tx_wake_queue(txq);
+ }
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
- struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
+ unsigned int index = vq2txq(vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct napi_struct *napi = &sq->napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
@@ -786,8 +802,7 @@ static void skb_xmit_done(struct virtqueue *vq)
if (napi->weight)
virtqueue_napi_schedule(napi, vq);
else
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ virtnet_tx_wake_queue(vi, sq);
}
#define MRG_CTX_HEADER_SHIFT 22
@@ -3080,13 +3095,8 @@ static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
free_old_xmit(sq, txq, !!budget);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
- if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 &&
- netif_tx_queue_stopped(txq)) {
- u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_inc(&sq->stats.wake);
- u64_stats_update_end(&sq->stats.syncp);
- netif_tx_wake_queue(txq);
- }
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2)
+ virtnet_tx_wake_queue(vi, sq);
__netif_tx_unlock(txq);
}
@@ -3276,13 +3286,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
else
free_old_xmit(sq, txq, !!budget);
- if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 &&
- netif_tx_queue_stopped(txq)) {
- u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_inc(&sq->stats.wake);
- u64_stats_update_end(&sq->stats.syncp);
- netif_tx_wake_queue(txq);
- }
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2)
+ virtnet_tx_wake_queue(vi, sq);
if (xsk_done >= budget) {
__netif_tx_unlock(txq);
@@ -3339,7 +3344,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
- virtio_is_little_endian(vi->vdev), 0))
+ virtio_is_little_endian(vi->vdev), 0,
+ false))
return -EPROTO;
if (vi->mergeable_rx_bufs)
@@ -3537,6 +3543,9 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
/* Prevent the upper layer from trying to send packets. */
netif_stop_subqueue(vi->dev, qindex);
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.stop);
+ u64_stats_update_end(&sq->stats.syncp);
__netif_tx_unlock_bh(txq);
}
@@ -3553,7 +3562,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_lock_bh(txq);
sq->reset = false;
- netif_tx_wake_queue(txq);
+ virtnet_tx_wake_queue(vi, sq);
__netif_tx_unlock_bh(txq);
if (running)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index cc4d7573839d..a14d0ad978e1 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1081,23 +1081,11 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev,
return 0;
}
-static int
-vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
- u32 *rules)
+static u32 vmxnet3_get_rx_ring_count(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_rx_queues;
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
- return err;
+ return adapter->num_rx_queues;
}
#ifdef VMXNET3_RSS
@@ -1335,7 +1323,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
- .get_rxnfc = vmxnet3_get_rxnfc,
+ .get_rx_ring_count = vmxnet3_get_rx_ring_count,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh = vmxnet3_get_rss,
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index 99fe772ad679..b1eec2216360 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -188,8 +188,6 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
__be16 port, __be32 src_vni, __be32 vni,
u32 ifindex, bool swdev_notify);
-u32 eth_vni_hash(const unsigned char *addr, __be32 vni);
-u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni);
int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index d50c35328363..7b9d9989e517 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -7759,6 +7759,13 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&list);
list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ /* The operating channel (tx_null == true) should
+ * not be last in the list, to avoid breaking
+ * RTL8851BU and RTL8832BU.
+ */
+ if (list_len + 1 == RTW89_SCAN_LIST_LIMIT_AX && ch_info->tx_null)
+ break;
+
list_move_tail(&ch_info->list, &list);
list_len++;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 33d6342124bc..301a9d294d30 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -21,7 +21,8 @@ static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = {
/* Get the param values for the specific param ID's */
static int ipc_devlink_get_param(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index c814fbd756a1..0dace12f5ad0 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -78,8 +78,9 @@ struct mhi_mbim_context {
struct mbim_tx_hdr {
struct usb_cdc_ncm_nth16 nth16;
+
+ /* Must be last as it ends in a flexible-array member. */
struct usb_cdc_ncm_ndp16 ndp16;
- struct usb_cdc_ncm_dpe16 dpe16[2];
} __packed;
static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
@@ -98,7 +99,7 @@ static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim
static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
{
if (strcmp(cntrl->name, "foxconn-dw5934e") == 0 ||
- strcmp(cntrl->name, "foxconn-t99w515") == 0)
+ strcmp(cntrl->name, "foxconn-t99w640") == 0)
return WDS_BIND_MUX_DATA_PORT_MUX_ID;
return 0;
@@ -107,20 +108,20 @@ static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
+ DEFINE_RAW_FLEX(struct mbim_tx_hdr, mbim_hdr, ndp16.dpe16, 2);
unsigned int dgram_size = skb->len;
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
- struct mbim_tx_hdr *mbim_hdr;
/* Only one NDP is sent, containing the IP packet (no aggregation) */
/* Ensure we have enough headroom for crafting MBIM header */
- if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
+ if (skb_cow_head(skb, __struct_size(mbim_hdr))) {
dev_kfree_skb_any(skb);
return NULL;
}
- mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
+ mbim_hdr = skb_push(skb, __struct_size(mbim_hdr));
/* Fill NTB header */
nth16 = &mbim_hdr->nth16;
@@ -133,12 +134,11 @@ static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
/* Fill the unique NDP */
ndp16 = &mbim_hdr->ndp16;
ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
- ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
- + sizeof(struct usb_cdc_ncm_dpe16) * 2);
+ ndp16->wLength = cpu_to_le16(struct_size(ndp16, dpe16, 2));
ndp16->wNextNdpIndex = 0;
/* Datagram follows the mbim header */
- ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
+ ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(__struct_size(mbim_hdr));
ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
/* null termination */
@@ -584,7 +584,8 @@ static void mhi_mbim_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
ndev->type = ARPHRD_RAWIP;
- ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
+ ndev->needed_headroom =
+ struct_size_t(struct mbim_tx_hdr, ndp16.dpe16, 2);
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 689c920ca898..43ac1c3f1ad0 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -897,7 +897,7 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
* @queue: CLDMA queue.
* @recv_skb: Receiving skb callback.
*/
-void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
+static void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
queue->recv_skb = recv_skb;
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
index f2d9941be9c8..9d0107e18a7b 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -126,8 +126,6 @@ void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
- int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index 2b1edb3eba15..9b9c5eb54e07 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -12,11 +12,11 @@
#define MEI_NFC_MAX_HCI_PAYLOAD 300
/**
- * struct nfc_mei_phy
+ * struct nfc_mei_phy - NFC description of the MEI PHY and interface functions
*
* @cldev: mei client device
* @hdev: nfc hci device
-
+ *
* @send_wq: send completion wait queue
* @fw_ivn: NFC Interface Version Number
* @vendor_id: NFC manufacturer ID
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa4181d7de73..f1f719351f3f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4901,7 +4901,6 @@ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
*/
nvme_stop_keep_alive(ctrl);
blk_mq_destroy_queue(ctrl->admin_q);
- blk_put_queue(ctrl->admin_q);
if (ctrl->ops->flags & NVME_F_FABRICS) {
blk_mq_destroy_queue(ctrl->fabrics_q);
blk_put_queue(ctrl->fabrics_q);
@@ -5045,6 +5044,8 @@ static void nvme_free_ctrl(struct device *dev)
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
+ if (ctrl->admin_q)
+ blk_put_queue(ctrl->admin_q);
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
nvme_free_cels(ctrl);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 03987f497a5b..2c903729b0b9 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2355,17 +2355,11 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags;
- if (ctrl->ctrl.tagset)
- nvme_remove_io_tag_set(&ctrl->ctrl);
-
/* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags);
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
- nvme_remove_admin_tag_set(&ctrl->ctrl);
-
kfree(ctrl->queues);
put_device(ctrl->dev);
@@ -3259,13 +3253,20 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
+
/*
* kill the association on the link side. this will block
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
+
+ if (ctrl->ctrl.tagset)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+
+ nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 543e17aead12..e35eccacee8c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -793,7 +793,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return;
}
nvme_add_ns_head_cdev(head);
- kblockd_schedule_work(&head->partition_scan_work);
+ queue_work(nvme_wq, &head->partition_scan_work);
}
nvme_mpath_add_sysfs_link(ns->head);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index ceba21684e82..300d5e032f6d 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
const char *hash_name;
u8 *challenge = req->sq->dhchap_c1;
struct nvme_dhchap_key *transformed_key;
- u8 buf[4], sc_c = ctrl->concat ? 1 : 0;
+ u8 buf[4];
int ret;
hash_name = nvme_auth_hmac_name(ctrl->shash_id);
@@ -367,7 +367,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
- *buf = sc_c;
+ *buf = req->sq->sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index bf01ec414c55..5946681cb0e3 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -43,6 +43,7 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
data->auth_protocol[0].dhchap.halen,
data->auth_protocol[0].dhchap.dhlen);
req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ req->sq->sc_c = data->sc_c;
if (data->sc_c != NVME_AUTH_SECP_NOSC) {
if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51df72f5e89b..f3b09f4099f0 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -159,6 +159,7 @@ struct nvmet_sq {
bool authenticated;
struct delayed_work auth_expired_work;
u16 dhchap_tid;
+ u8 sc_c;
u8 dhchap_status;
u8 dhchap_step;
u8 *dhchap_c1;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4492b809094b..36f8c0985430 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -958,6 +958,7 @@ void pci_save_aspm_l1ss_state(struct pci_dev *dev);
void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#ifdef CONFIG_PCIEASPM
+void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
@@ -965,6 +966,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
void pci_configure_ltr(struct pci_dev *pdev);
void pci_bridge_reconfigure_ltr(struct pci_dev *pdev);
#else
+static inline void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap) { }
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 79b965158473..cedea47a3547 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -814,7 +814,6 @@ static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
- u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
struct pci_bus *linkbus = parent->subordinate;
@@ -829,9 +828,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* If ASPM not supported, don't mess with the clocks and link,
* bail out now.
*/
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
- pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
- if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
+ if (!(parent->aspm_l0s_support && child->aspm_l0s_support) &&
+ !(parent->aspm_l1_support && child->aspm_l1_support))
return;
/* Configure common clock before checking latencies */
@@ -843,8 +841,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* read-only Link Capabilities may change depending on common clock
* configuration (PCIe r5.0, sec 7.5.3.6).
*/
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
- pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
@@ -864,7 +860,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* given link unless components on both sides of the link each
* support L0s.
*/
- if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
+ if (parent->aspm_l0s_support && child->aspm_l0s_support)
link->aspm_support |= PCIE_LINK_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
@@ -873,7 +869,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
/* Setup L1 state */
- if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
+ if (parent->aspm_l1_support && child->aspm_l1_support)
link->aspm_support |= PCIE_LINK_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
@@ -1530,6 +1526,19 @@ int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
}
EXPORT_SYMBOL(pci_enable_link_state_locked);
+void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap)
+{
+ if (lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
+ pdev->aspm_l0s_support = 0;
+ if (lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
+ pdev->aspm_l1_support = 0;
+
+ pci_info(pdev, "ASPM: Link Capabilities%s%s treated as unsupported to avoid device defect\n",
+ lnkcap & PCI_EXP_LNKCAP_ASPM_L0S ? " L0s" : "",
+ lnkcap & PCI_EXP_LNKCAP_ASPM_L1 ? " L1" : "");
+
+}
+
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0ce98e18b5a8..9cd032dff31e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1656,6 +1656,13 @@ void set_pcie_port_type(struct pci_dev *pdev)
if (reg32 & PCI_EXP_LNKCAP_DLLLARC)
pdev->link_active_reporting = 1;
+#ifdef CONFIG_PCIEASPM
+ if (reg32 & PCI_EXP_LNKCAP_ASPM_L0S)
+ pdev->aspm_l0s_support = 1;
+ if (reg32 & PCI_EXP_LNKCAP_ASPM_L1)
+ pdev->aspm_l1_support = 1;
+#endif
+
parent = pci_upstream_bridge(pdev);
if (!parent)
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 214ed060ca1b..b9c252aa6fe0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2494,28 +2494,27 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
*/
static void quirk_disable_aspm_l0s(struct pci_dev *dev)
{
- pci_info(dev, "Disabling L0s\n");
- pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+ pcie_aspm_remove_cap(dev, PCI_EXP_LNKCAP_ASPM_L0S);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
{
- pci_info(dev, "Disabling ASPM L0s/L1\n");
- pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pcie_aspm_remove_cap(dev,
+ PCI_EXP_LNKCAP_ASPM_L0S | PCI_EXP_LNKCAP_ASPM_L1);
}
/*
@@ -2523,7 +2522,10 @@ static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
* upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
* disable both L0s and L1 for now to be safe.
*/
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x0451, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PASEMI, 0xa002, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0x1105, quirk_disable_aspm_l0s_l1);
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index cc64f93709a4..ca4f97be7538 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -155,7 +155,16 @@ static u8 get_st_modes(struct pci_dev *pdev)
return reg;
}
-static u32 get_st_table_loc(struct pci_dev *pdev)
+/**
+ * pcie_tph_get_st_table_loc - Return the device's ST table location
+ * @pdev: PCI device to query
+ *
+ * Return:
+ * PCI_TPH_LOC_NONE - Not present
+ * PCI_TPH_LOC_CAP - Located in the TPH Requester Extended Capability
+ * PCI_TPH_LOC_MSIX - Located in the MSI-X Table
+ */
+u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev)
{
u32 reg;
@@ -163,6 +172,7 @@ static u32 get_st_table_loc(struct pci_dev *pdev)
return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg);
}
+EXPORT_SYMBOL(pcie_tph_get_st_table_loc);
/*
* Return the size of ST table. If ST table is not in TPH Requester Extended
@@ -174,7 +184,7 @@ u16 pcie_tph_get_st_table_size(struct pci_dev *pdev)
u32 loc;
/* Check ST table location first */
- loc = get_st_table_loc(pdev);
+ loc = pcie_tph_get_st_table_loc(pdev);
/* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */
loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
@@ -299,7 +309,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
*/
set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE);
- loc = get_st_table_loc(pdev);
+ loc = pcie_tph_get_st_table_loc(pdev);
/* Convert loc to match with PCI_TPH_LOC_* */
loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index e255c1b069ec..7dd282da67ce 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -1109,7 +1109,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
- /* check if the corresponding bit is set in sscountovf or overflow mask in shmem */
+ /* check if the corresponding bit is set in scountovf or overflow mask in shmem */
if (!(overflow & BIT(hidx)))
continue;
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
index 68abb6d6cecd..a8f82104a384 100644
--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -532,6 +532,11 @@ static int cs42l43_gpio_add_pin_ranges(struct gpio_chip *chip)
return ret;
}
+static void cs42l43_fwnode_put(void *data)
+{
+ fwnode_handle_put(data);
+}
+
static int cs42l43_pin_probe(struct platform_device *pdev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
@@ -563,10 +568,20 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
priv->gpio_chip.ngpio = CS42L43_NUM_GPIOS;
if (is_of_node(fwnode)) {
- fwnode = fwnode_get_named_child_node(fwnode, "pinctrl");
-
- if (fwnode && !fwnode->dev)
- fwnode->dev = priv->dev;
+ struct fwnode_handle *child;
+
+ child = fwnode_get_named_child_node(fwnode, "pinctrl");
+ if (child) {
+ ret = devm_add_action_or_reset(&pdev->dev,
+ cs42l43_fwnode_put, child);
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+ if (!child->dev)
+ child->dev = priv->dev;
+ fwnode = child;
+ }
}
priv->gpio_chip.fwnode = fwnode;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8189.c b/drivers/pinctrl/mediatek/pinctrl-mt8189.c
index 7028aff55ae5..f6a3e584588b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8189.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8189.c
@@ -1642,9 +1642,7 @@ static const struct mtk_pin_reg_calc mt8189_reg_cals[PINCTRL_PIN_REG_MAX] = {
};
static const char * const mt8189_pinctrl_register_base_names[] = {
- "gpio_base", "iocfg_bm0_base", "iocfg_bm1_base", "iocfg_bm2_base", "iocfg_lm_base",
- "iocfg_lt0_base", "iocfg_lt1_base", "iocfg_rb0_base", "iocfg_rb1_base",
- "iocfg_rt_base"
+ "base", "lm", "rb0", "rb1", "bm0", "bm1", "bm2", "lt0", "lt1", "rt",
};
static const struct mtk_eint_hw mt8189_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8196.c b/drivers/pinctrl/mediatek/pinctrl-mt8196.c
index 82a73929c7a0..dec957c1724b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8196.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8196.c
@@ -1801,10 +1801,8 @@ static const struct mtk_pin_reg_calc mt8196_reg_cals[PINCTRL_PIN_REG_MAX] = {
};
static const char * const mt8196_pinctrl_register_base_names[] = {
- "iocfg0", "iocfg_rt", "iocfg_rm1", "iocfg_rm2",
- "iocfg_rb", "iocfg_bm1", "iocfg_bm2", "iocfg_bm3",
- "iocfg_lt", "iocfg_lm1", "iocfg_lm2", "iocfg_lb1",
- "iocfg_lb2", "iocfg_tm1", "iocfg_tm2", "iocfg_tm3",
+ "base", "rt", "rm1", "rm2", "rb", "bm1", "bm2", "bm3",
+ "lt", "lm1", "lm2", "lb1", "lb2", "tm1", "tm2", "tm3",
};
static const struct mtk_eint_hw mt8196_eint_hw = {
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index 501eb296c760..35511f83d056 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -392,6 +392,7 @@ static int s32_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
gpio_pin->pin_id = offset;
gpio_pin->config = config;
+ INIT_LIST_HEAD(&gpio_pin->list);
spin_lock_irqsave(&ipctl->gpio_configs_lock, flags);
list_add(&gpio_pin->list, &ipctl->gpio_configs);
@@ -951,7 +952,7 @@ int s32_pinctrl_probe(struct platform_device *pdev,
spin_lock_init(&ipctl->gpio_configs_lock);
s32_pinctrl_desc =
- devm_kmalloc(&pdev->dev, sizeof(*s32_pinctrl_desc), GFP_KERNEL);
+ devm_kzalloc(&pdev->dev, sizeof(*s32_pinctrl_desc), GFP_KERNEL);
if (!s32_pinctrl_desc)
return -ENOMEM;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 67525d542c5b..e99871b90ab9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -189,7 +189,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
*/
if (d && i != gpio_func &&
!test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
- disable_irq(irq);
+ disable_irq_nosync(irq);
raw_spin_lock_irqsave(&pctrl->lock, flags);
diff --git a/drivers/pinctrl/realtek/Kconfig b/drivers/pinctrl/realtek/Kconfig
index 0fc6bd4fcb7e..400c9e5b16ad 100644
--- a/drivers/pinctrl/realtek/Kconfig
+++ b/drivers/pinctrl/realtek/Kconfig
@@ -6,6 +6,7 @@ config PINCTRL_RTD
default y
select PINMUX
select GENERIC_PINCONF
+ select REGMAP_MMIO
config PINCTRL_RTD1619B
tristate "Realtek DHC 1619B pin controller driver"
diff --git a/drivers/platform/arm64/lenovo-thinkpad-t14s.c b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
index 1d5d11adaf32..cf6a1d3b2617 100644
--- a/drivers/platform/arm64/lenovo-thinkpad-t14s.c
+++ b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
@@ -120,6 +120,7 @@ static int t14s_ec_write(void *context, unsigned int reg,
if (ret < 0)
return ret;
+ fsleep(10000);
return 0;
}
@@ -157,6 +158,7 @@ static int t14s_ec_read(void *context, unsigned int reg,
out:
i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ fsleep(10000);
return ret;
}
@@ -191,6 +193,8 @@ static int t14s_ec_read_evt(struct t14s_ec *ec, u8 *val)
if (ret < 0)
goto out;
+ fsleep(10000);
+
ret = 0;
out:
@@ -557,12 +561,6 @@ static int t14s_ec_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(ec->regmap),
"Failed to init regmap\n");
- ret = devm_request_threaded_irq(dev, client->irq, NULL,
- t14s_ec_irq_handler,
- IRQF_ONESHOT, dev_name(dev), ec);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Failed to get IRQ\n");
-
ret = t14s_leds_probe(ec);
if (ret < 0)
return ret;
@@ -579,6 +577,12 @@ static int t14s_ec_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ t14s_ec_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), ec);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get IRQ\n");
+
/*
* Disable wakeup support by default, because the driver currently does
* not support masking any events and the laptop should not wake up when
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c122016d82f1..c883a28e0916 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -545,6 +545,7 @@ config MSI_WMI
config MSI_WMI_PLATFORM
tristate "MSI WMI Platform features"
depends on ACPI_WMI
+ depends on DMI
depends on HWMON
help
Say Y here if you want to have support for WMI-based platform features
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 13eb22b35aa8..d848afc91f87 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -102,6 +102,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
+ WMID_BACKLIGHT_EVENT = 0x4,
WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
WMID_GAMING_TURBO_KEY_EVENT = 0x7,
WMID_AC_EVENT = 0x8,
@@ -2369,6 +2370,9 @@ static void acer_wmi_notify(union acpi_object *obj, void *context)
sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
}
break;
+ case WMID_BACKLIGHT_EVENT:
+ /* Already handled by acpi-video */
+ break;
case WMID_ACCEL_OR_KBD_DOCK_EVENT:
acer_gsensor_event();
acer_kbd_dock_event(&return_value);
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index d63aaad7ef59..404e62ad293a 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -122,6 +122,14 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
}
},
+ {
+ .ident = "ROG Xbox Ally RC73YA",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "RC73YA"),
+ }
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
{
.ident = "V14 G4 AMN",
@@ -204,6 +212,23 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "82ND"),
}
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4618 */
+ {
+ .ident = "Lenovo Legion Go 2",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N0"),
+ }
+ },
+ {
+ .ident = "Lenovo Legion Go 2",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N1"),
+ }
+ },
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index bd318fd02ccf..cae3fcafd4d7 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -106,6 +106,7 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
switch (dev->cpu_id) {
case AMD_CPU_ID_PCO:
case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_VG:
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
dev->num_ips = 12;
@@ -517,6 +518,7 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
case AMD_CPU_ID_PCO:
return MSG_OS_HINT_PCO;
case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_VG:
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
@@ -717,6 +719,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SHP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_VG) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
{ }
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index 62f3e51020fd..fe3f53eb5955 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -156,6 +156,7 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define AMD_CPU_ID_RN 0x1630
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_VG 0x1645
#define AMD_CPU_ID_YC 0x14B5
#define AMD_CPU_ID_CB 0x14D8
#define AMD_CPU_ID_PS 0x14E8
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index f417dcc9af35..fadf7aac6779 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -90,34 +90,34 @@ static struct awcc_quirks empty_quirks;
static const struct dmi_system_id awcc_dmi_table[] __initconst = {
{
- .ident = "Alienware Area-51m",
+ .ident = "Alienware 16 Aurora",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware 16 Aurora"),
},
- .driver_data = &generic_quirks,
+ .driver_data = &g_series_quirks,
},
{
- .ident = "Alienware Area-51m R2",
+ .ident = "Alienware Area-51m",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m R2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m"),
},
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware m15 R5",
+ .ident = "Alienware m15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R5"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15"),
},
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware m15 R7",
+ .ident = "Alienware m16 R1 AMD",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R7"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
},
.driver_data = &generic_quirks,
},
@@ -130,14 +130,6 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &g_series_quirks,
},
{
- .ident = "Alienware m16 R1 AMD",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
- },
- .driver_data = &generic_quirks,
- },
- {
.ident = "Alienware m16 R2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -146,114 +138,66 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware m17 R5",
+ .ident = "Alienware m17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17"),
},
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware m18 R2",
+ .ident = "Alienware m18",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m18 R2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m18"),
},
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware x15 R1",
+ .ident = "Alienware x15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R1"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15"),
},
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware x15 R2",
+ .ident = "Alienware x17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x17"),
},
.driver_data = &generic_quirks,
},
{
- .ident = "Alienware x17 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x17 R2"),
- },
- .driver_data = &generic_quirks,
- },
- {
- .ident = "Dell Inc. G15 5510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5510"),
- },
- .driver_data = &g_series_quirks,
- },
- {
- .ident = "Dell Inc. G15 5511",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
- },
- .driver_data = &g_series_quirks,
- },
- {
- .ident = "Dell Inc. G15 5515",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
- },
- .driver_data = &g_series_quirks,
- },
- {
- .ident = "Dell Inc. G15 5530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5530"),
- },
- .driver_data = &g_series_quirks,
- },
- {
- .ident = "Dell Inc. G16 7630",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G16 7630"),
- },
- .driver_data = &g_series_quirks,
- },
- {
- .ident = "Dell Inc. G3 3500",
+ .ident = "Dell Inc. G15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G3 3500"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15"),
},
.driver_data = &g_series_quirks,
},
{
- .ident = "Dell Inc. G3 3590",
+ .ident = "Dell Inc. G16",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G3 3590"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G16"),
},
.driver_data = &g_series_quirks,
},
{
- .ident = "Dell Inc. G5 5500",
+ .ident = "Dell Inc. G3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G5 5500"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G3"),
},
.driver_data = &g_series_quirks,
},
{
- .ident = "Dell Inc. G5 5505",
+ .ident = "Dell Inc. G5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G5 5505"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G5"),
},
.driver_data = &g_series_quirks,
},
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 8b3533d6ba09..ad9d9f97960f 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -92,9 +92,11 @@ static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
-/* DMI Board names of Victus 16-r1000 and Victus 16-s1000 laptops */
+/* DMI Board names of Victus 16-r and Victus 16-s laptops */
static const char * const victus_s_thermal_profile_boards[] = {
- "8C99", "8C9C"
+ "8BBE", "8BD4", "8BD5",
+ "8C78", "8C99", "8C9C",
+ "8D41",
};
enum hp_wmi_radio {
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index c3772df34679..8a4c54089ace 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -81,6 +81,10 @@ static const struct key_entry huawei_wmi_keymap[] = {
{ KE_KEY, 0x289, { KEY_WLAN } },
// Huawei |M| key
{ KE_KEY, 0x28a, { KEY_CONFIG } },
+ // HONOR YOYO key
+ { KE_KEY, 0x28b, { KEY_NOTIFICATION_CENTER } },
+ // HONOR print screen
+ { KE_KEY, 0x28e, { KEY_PRINT } },
// Keyboard backlit
{ KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
{ KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index f25a427cccda..9c07a7faf18f 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -55,6 +55,7 @@ static const struct acpi_device_id intel_hid_ids[] = {
{ "INTC10CB" },
{ "INTC10CC" },
{ "INTC10F1" },
+ { "INTC10F2" },
{ }
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index bafac8aa2baf..14513010daad 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -250,7 +250,7 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
} else {
ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
IRQF_NO_SUSPEND, "intel_punit_ipc",
- &punit_ipcdev);
+ punit_ipcdev);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", irq);
return ret;
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
index 3f4343147dad..950ede5eab76 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
@@ -108,11 +108,11 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_read_config_dword(pdev, 0xD0, &mmio_base);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
ret = pci_read_config_dword(pdev, 0xFC, &pcu_base);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
pcu_base &= GENMASK(10, 0);
base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 70ae11519837..0abe850ef54e 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -40,7 +40,7 @@
* @agent_type_mask: Bit mask of all hardware agents for this domain
* @uncore_attr_group: Attribute group storage
* @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
- * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
+ * @min_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
* @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
* @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
* @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
@@ -48,13 +48,14 @@
* @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
* @package_id_kobj_attr: Storage for kobject attribute package_id
* @elc_low_threshold_percent_kobj_attr:
- Storage for kobject attribute elc_low_threshold_percent
+ * Storage for kobject attribute elc_low_threshold_percent
* @elc_high_threshold_percent_kobj_attr:
- Storage for kobject attribute elc_high_threshold_percent
+ * Storage for kobject attribute elc_high_threshold_percent
* @elc_high_threshold_enable_kobj_attr:
- Storage for kobject attribute elc_high_threshold_enable
+ * Storage for kobject attribute elc_high_threshold_enable
* @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz
* @agent_types_kobj_attr: Storage for kobject attribute agent_type
+ * @die_id_kobj_attr: Attribute storage for die_id information
* @uncore_attrs: Attribute storage for group creation
*
* This structure is used to encapsulate all data related to uncore sysfs
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index 2a6897035150..0dfc552b2802 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -256,6 +256,10 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
X86_MATCH_VFM(INTEL_ARROWLAKE, NULL),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, NULL),
+ X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_WILDCATLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_NOVALAKE, NULL),
+ X86_MATCH_VFM(INTEL_NOVALAKE_L, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c
index dc5e9878cb68..e912fcc12d12 100644
--- a/drivers/platform/x86/msi-wmi-platform.c
+++ b/drivers/platform/x86/msi-wmi-platform.c
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/device/driver.h>
+#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/hwmon.h>
#include <linux/kernel.h>
@@ -28,7 +29,7 @@
#define DRIVER_NAME "msi-wmi-platform"
-#define MSI_PLATFORM_GUID "ABBC0F6E-8EA1-11d1-00A0-C90629100000"
+#define MSI_PLATFORM_GUID "ABBC0F6E-8EA1-11D1-00A0-C90629100000"
#define MSI_WMI_PLATFORM_INTERFACE_VERSION 2
@@ -448,7 +449,45 @@ static struct wmi_driver msi_wmi_platform_driver = {
.probe = msi_wmi_platform_probe,
.no_singleton = true,
};
-module_wmi_driver(msi_wmi_platform_driver);
+
+/*
+ * MSI reused the WMI GUID from the WMI-ACPI sample code provided by Microsoft,
+ * so other manufacturers might use it as well for their WMI-ACPI implementations.
+ */
+static const struct dmi_system_id msi_wmi_platform_whitelist[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ },
+ },
+ { }
+};
+
+static int __init msi_wmi_platform_module_init(void)
+{
+ if (!dmi_check_system(msi_wmi_platform_whitelist)) {
+ if (!force)
+ return -ENODEV;
+
+ pr_warn("Ignoring DMI whitelist\n");
+ }
+
+ return wmi_driver_register(&msi_wmi_platform_driver);
+}
+
+static void __exit msi_wmi_platform_module_exit(void)
+{
+ wmi_driver_unregister(&msi_wmi_platform_driver);
+}
+
+module_init(msi_wmi_platform_module_init);
+module_exit(msi_wmi_platform_module_exit);
+
MODULE_AUTHOR("Armin Wolf <W_Armin@gmx.de>");
MODULE_DESCRIPTION("MSI WMI platform features");
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index 8fe1c0a501c9..b5e2ffd5ea64 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -41,7 +41,7 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
- int num_domains, i;
+ int num_domains, i, ret;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
struct scmi_pm_domain *scmi_pd;
@@ -108,9 +108,18 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ ret = of_genpd_add_provider_onecell(np, scmi_pd_data);
+ if (ret)
+ goto err_rm_genpds;
+
dev_set_drvdata(dev, scmi_pd_data);
- return of_genpd_add_provider_onecell(np, scmi_pd_data);
+ return 0;
+err_rm_genpds:
+ for (i = num_domains - 1; i >= 0; i--)
+ pm_genpd_remove(domains[i]);
+
+ return ret;
}
static void scmi_pm_domain_remove(struct scmi_device *sdev)
diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index 33991f3c6b55..a34b260274f7 100644
--- a/drivers/pmdomain/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
@@ -536,6 +536,8 @@ static void imx_gpc_remove(struct platform_device *pdev)
return;
}
}
+
+ of_node_put(pgc_node);
}
static struct platform_driver imx_gpc_driver = {
diff --git a/drivers/pmdomain/samsung/exynos-pm-domains.c b/drivers/pmdomain/samsung/exynos-pm-domains.c
index 5d478bb37ad6..5c3aa8983087 100644
--- a/drivers/pmdomain/samsung/exynos-pm-domains.c
+++ b/drivers/pmdomain/samsung/exynos-pm-domains.c
@@ -92,13 +92,14 @@ static const struct of_device_id exynos_pm_domain_of_match[] = {
{ },
};
-static const char *exynos_get_domain_name(struct device_node *node)
+static const char *exynos_get_domain_name(struct device *dev,
+ struct device_node *node)
{
const char *name;
if (of_property_read_string(node, "label", &name) < 0)
name = kbasename(node->full_name);
- return kstrdup_const(name, GFP_KERNEL);
+ return devm_kstrdup_const(dev, name, GFP_KERNEL);
}
static int exynos_pd_probe(struct platform_device *pdev)
@@ -115,20 +116,27 @@ static int exynos_pd_probe(struct platform_device *pdev)
if (!pd)
return -ENOMEM;
- pd->pd.name = exynos_get_domain_name(np);
+ pd->pd.name = exynos_get_domain_name(dev, np);
if (!pd->pd.name)
return -ENOMEM;
pd->base = of_iomap(np, 0);
- if (!pd->base) {
- kfree_const(pd->pd.name);
+ if (!pd->base)
return -ENODEV;
- }
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+ /*
+ * Some Samsung platforms with bootloaders turning on the splash-screen
+ * and handing it over to the kernel, requires the power-domains to be
+ * reset during boot.
+ */
+ if (IS_ENABLED(CONFIG_ARM) &&
+ of_device_is_compatible(np, "samsung,exynos4210-pd"))
+ exynos_pd_power_off(&pd->pd);
+
on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
pm_genpd_init(&pd->pd, NULL, !on);
@@ -147,15 +155,6 @@ static int exynos_pd_probe(struct platform_device *pdev)
parent.np, child.np);
}
- /*
- * Some Samsung platforms with bootloaders turning on the splash-screen
- * and handing it over to the kernel, requires the power-domains to be
- * reset during boot. As a temporary hack to manage this, let's enforce
- * a sync_state.
- */
- if (!ret)
- of_genpd_sync_state(np);
-
pm_runtime_enable(dev);
return ret;
}
diff --git a/drivers/power/supply/intel_dc_ti_battery.c b/drivers/power/supply/intel_dc_ti_battery.c
index 56b0c92e9d28..67a75281b0ac 100644
--- a/drivers/power/supply/intel_dc_ti_battery.c
+++ b/drivers/power/supply/intel_dc_ti_battery.c
@@ -127,7 +127,8 @@ struct dc_ti_battery_chip {
static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
{
struct dc_ti_battery_chip *chip = power_supply_get_drvdata(psy);
- s64 cnt_start_usec, now_usec, sleep_usec;
+ ktime_t ktime;
+ s64 sleep_usec;
unsigned int reg_val;
s32 acc, smpl_ctr;
int ret;
@@ -141,16 +142,17 @@ static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, i
if (ret)
goto out_err;
- cnt_start_usec = ktime_get_ns() / NSEC_PER_USEC;
+ ktime = ktime_get();
/* Read Vbat, convert IIO mV to power-supply ųV */
ret = iio_read_channel_processed_scale(chip->vbat_channel, volt, 1000);
if (ret < 0)
goto out_err;
+ ktime = ktime_sub(ktime_get(), ktime);
+
/* Sleep at least 3 sample-times + slack to get 3+ CC samples */
- now_usec = ktime_get_ns() / NSEC_PER_USEC;
- sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - (now_usec - cnt_start_usec);
+ sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - ktime_to_us(ktime);
if (sleep_usec > 0 && sleep_usec < 1000000)
usleep_range(sleep_usec, sleep_usec + SLEEP_SLACK_US);
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 68f1f7fdaa9d..790eb42b78db 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -328,9 +328,31 @@ static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
return ns;
}
-static int ines_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int ines_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ unsigned long flags;
+ u32 port_conf;
+
+ cfg->rx_filter = port->rxts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT
+ : HWTSTAMP_FILTER_NONE;
+ if (port->txts_enabled) {
+ spin_lock_irqsave(&port->lock, flags);
+ port_conf = ines_read32(port, port_conf);
+ spin_unlock_irqrestore(&port->lock, flags);
+ cfg->tx_type = (port_conf & CM_ONE_STEP) ? HWTSTAMP_TX_ONESTEP_P2P
+ : HWTSTAMP_TX_OFF;
+ } else {
+ cfg->tx_type = HWTSTAMP_TX_OFF;
+ }
+
+ return 0;
+}
+
+static int ines_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
@@ -709,7 +731,8 @@ static struct mii_timestamper *ines_ptp_probe_channel(struct device *device,
}
port->mii_ts.rxtstamp = ines_rxtstamp;
port->mii_ts.txtstamp = ines_txtstamp;
- port->mii_ts.hwtstamp = ines_hwtstamp;
+ port->mii_ts.hwtstamp_set = ines_hwtstamp_set;
+ port->mii_ts.hwtstamp_get = ines_hwtstamp_get;
port->mii_ts.link_state = ines_link_state;
port->mii_ts.ts_info = ines_ts_info;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index eeebe4d149f7..bf8187c963aa 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -25,8 +25,7 @@
#include <linux/crc16.h>
#include <linux/dpll.h>
-#define PCI_VENDOR_ID_FACEBOOK 0x1d9b
-#define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400
+#define PCI_DEVICE_ID_META_TIMECARD 0x0400
#define PCI_VENDOR_ID_CELESTICA 0x18d4
#define PCI_DEVICE_ID_CELESTICA_TIMECARD 0x1008
@@ -1030,7 +1029,7 @@ static struct ocp_resource ocp_adva_resource[] = {
};
static const struct pci_device_id ptp_ocp_pcidev_id[] = {
- { PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
+ { PCI_DEVICE_DATA(META, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
{ PCI_DEVICE_DATA(ADVA, TIMECARD, &ocp_adva_resource) },
@@ -2225,6 +2224,9 @@ ptp_ocp_ts_enable(void *priv, u32 req, bool enable)
static void
ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext)
{
+ if (!ext)
+ return;
+
ext->info->enable(ext, ~0, false);
pci_free_irq(ext->bp->pdev, ext->irq_vec, ext);
kfree(ext);
@@ -3250,20 +3252,16 @@ signal_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dev_ext_attribute *ea = to_ext_attr(attr);
struct ptp_ocp *bp = dev_get_drvdata(dev);
struct ptp_ocp_signal *signal;
+ int gen = (uintptr_t)ea->var;
struct timespec64 ts;
- ssize_t count;
- int i;
-
- i = (uintptr_t)ea->var;
- signal = &bp->signal[i];
- count = sysfs_emit(buf, "%llu %d %llu %d", signal->period,
- signal->duty, signal->phase, signal->polarity);
+ signal = &bp->signal[gen];
ts = ktime_to_timespec64(signal->start);
- count += sysfs_emit_at(buf, count, " %ptT TAI\n", &ts);
- return count;
+ return sysfs_emit(buf, "%llu %d %llu %d %ptT TAI\n",
+ signal->period, signal->duty, signal->phase, signal->polarity,
+ &ts.tv_sec);
}
static EXT_ATTR_RW(signal, signal, 0);
static EXT_ATTR_RW(signal, signal, 1);
@@ -4562,21 +4560,14 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_detach_sysfs(bp);
ptp_ocp_attr_group_del(bp);
timer_delete_sync(&bp->watchdog);
- if (bp->ts0)
- ptp_ocp_unregister_ext(bp->ts0);
- if (bp->ts1)
- ptp_ocp_unregister_ext(bp->ts1);
- if (bp->ts2)
- ptp_ocp_unregister_ext(bp->ts2);
- if (bp->ts3)
- ptp_ocp_unregister_ext(bp->ts3);
- if (bp->ts4)
- ptp_ocp_unregister_ext(bp->ts4);
- if (bp->pps)
- ptp_ocp_unregister_ext(bp->pps);
+ ptp_ocp_unregister_ext(bp->ts0);
+ ptp_ocp_unregister_ext(bp->ts1);
+ ptp_ocp_unregister_ext(bp->ts2);
+ ptp_ocp_unregister_ext(bp->ts3);
+ ptp_ocp_unregister_ext(bp->ts4);
+ ptp_ocp_unregister_ext(bp->pps);
for (i = 0; i < 4; i++)
- if (bp->signal_out[i])
- ptp_ocp_unregister_ext(bp->signal_out[i]);
+ ptp_ocp_unregister_ext(bp->signal_out[i]);
for (i = 0; i < __PORT_COUNT; i++)
if (bp->port[i].line != -1)
serial8250_unregister_port(bp->port[i].line);
@@ -4829,8 +4820,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_dpll:
- while (i) {
- --i;
+ while (i--) {
dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]);
dpll_pin_put(bp->sma[i].dpll_pin);
}
diff --git a/drivers/pwm/pwm-adp5585.c b/drivers/pwm/pwm-adp5585.c
index dc2860979e24..806f8d79b0d7 100644
--- a/drivers/pwm/pwm-adp5585.c
+++ b/drivers/pwm/pwm-adp5585.c
@@ -190,13 +190,13 @@ static int adp5585_pwm_probe(struct platform_device *pdev)
return 0;
}
-static const struct adp5585_pwm_chip adp5589_pwm_chip_info = {
+static const struct adp5585_pwm_chip adp5585_pwm_chip_info = {
.pwm_cfg = ADP5585_PWM_CFG,
.pwm_offt_low = ADP5585_PWM_OFFT_LOW,
.pwm_ont_low = ADP5585_PWM_ONT_LOW,
};
-static const struct adp5585_pwm_chip adp5585_pwm_chip_info = {
+static const struct adp5585_pwm_chip adp5589_pwm_chip_info = {
.pwm_cfg = ADP5589_PWM_CFG,
.pwm_offt_low = ADP5589_PWM_OFFT_LOW,
.pwm_ont_low = ADP5589_PWM_ONT_LOW,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 1cb647ed70c6..a2d16e9abfb5 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -334,6 +334,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
"Failed to register regulator: %ld\n",
PTR_ERR(drvdata->dev));
+ gpiod_put(cfg.ena_gpiod);
return ret;
}
diff --git a/drivers/reset/reset-imx8mp-audiomix.c b/drivers/reset/reset-imx8mp-audiomix.c
index 6b357adfe646..eceb37ff5dc5 100644
--- a/drivers/reset/reset-imx8mp-audiomix.c
+++ b/drivers/reset/reset-imx8mp-audiomix.c
@@ -14,8 +14,8 @@
#include <linux/reset-controller.h>
#define IMX8MP_AUDIOMIX_EARC_RESET_OFFSET 0x200
-#define IMX8MP_AUDIOMIX_EARC_RESET_MASK BIT(1)
-#define IMX8MP_AUDIOMIX_EARC_PHY_RESET_MASK BIT(2)
+#define IMX8MP_AUDIOMIX_EARC_RESET_MASK BIT(0)
+#define IMX8MP_AUDIOMIX_EARC_PHY_RESET_MASK BIT(1)
#define IMX8MP_AUDIOMIX_DSP_RUNSTALL_OFFSET 0x108
#define IMX8MP_AUDIOMIX_DSP_RUNSTALL_MASK BIT(5)
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 0aeafa772fb1..407b7c516658 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -701,7 +701,6 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
grp->sweep_req_pend_num--;
ctcmpc_send_sweep_resp(ch);
- kfree(mpcginfo);
return;
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index edc0bcd46923..10b53bba373c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -761,7 +761,7 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
if (rc)
QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
ipa_name, com, CARD_DEVID(card), rc,
- qeth_get_ipa_msg(rc));
+ qeth_get_ipa_msg(com, rc));
else
QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
ipa_name, com, CARD_DEVID(card));
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index d9266f7d8187..1add124e033b 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -139,82 +139,237 @@ struct ipa_rc_msg {
const char *msg;
};
-static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_def_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
- {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
- {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
{IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
- {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"},
- {IPA_RC_INVALID_FORMAT, "invalid format or length"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
- {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"},
{IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
{IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
- {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
{IPA_RC_ID_NOT_FOUND, "Identifier not found"},
- {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
- {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"},
- {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
- {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
- {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"},
{IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
- {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
- {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
- {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
- {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
- {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"},
{IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"},
- {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
- {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
- {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
- {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
- {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"},
- {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"},
- {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"},
- {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
- {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"},
- {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
- {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
- {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"},
- {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
{IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
{IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
{IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
- {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
- {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
{IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
{IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
{IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
- {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
- {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"},
{IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
{IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
{IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
- {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
- {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
- {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
- {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
- {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
- {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
/* default for qeth_get_ipa_msg(): */
{IPA_RC_FFFF, "Unknown Error"}
};
-const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+static const struct ipa_rc_msg qeth_ipa_rc_adp_parms_msg[] = {
+ {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_diag_ass_msg[] = {
+ {IPA_RC_INVALID_FORMAT, "invalid format or length"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_addr_msg[] = {
+ {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
+ {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
+ {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_vnicc_msg[] = {
+ {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"},
+ {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_sbp_iqd_msg[] = {
+ {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"},
+ {IPA_RC_SBP_IQD_OS_MISMATCH, "OS mismatch"},
+ {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+ {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"},
+ {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
+ {IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
+ {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"},
+ {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_sbp_osa_msg[] = {
+ {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"},
+ {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"},
+ {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+ {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"},
+ {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
+ {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
+ {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"},
+ {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_mac_msg[] = {
+ {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
+ {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
+ {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
+ {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"},
+ {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_ip_msg[] = {
+ {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
+ {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
+ {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
+ {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_lan_msg[] = {
+ {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
+ {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_vlan_msg[] = {
+ {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
+ {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
+ {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"}
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_rtg_msg[] = {
+ {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
+ {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
+ {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
+ {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}
+};
+
+struct ipa_cmd_rc_map {
+ enum qeth_ipa_cmds cmd;
+ const struct ipa_rc_msg *msg_arr;
+ const size_t arr_len;
+};
+
+static const struct ipa_cmd_rc_map qeth_ipa_cmd_rc_map[] = {
+ {
+ .cmd = IPA_CMD_SETADAPTERPARMS,
+ .msg_arr = qeth_ipa_rc_adp_parms_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_adp_parms_msg)
+ },
+ {
+ .cmd = IPA_CMD_SET_DIAG_ASS,
+ .msg_arr = qeth_ipa_rc_diag_ass_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_diag_ass_msg)
+ },
+ {
+ .cmd = IPA_CMD_CREATE_ADDR,
+ .msg_arr = qeth_ipa_rc_addr_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_addr_msg)
+ },
+ {
+ .cmd = IPA_CMD_DESTROY_ADDR,
+ .msg_arr = qeth_ipa_rc_addr_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_addr_msg)
+ },
+ {
+ .cmd = IPA_CMD_VNICC,
+ .msg_arr = qeth_ipa_rc_vnicc_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_vnicc_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETBRIDGEPORT_IQD,
+ .msg_arr = qeth_ipa_rc_sbp_iqd_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_sbp_iqd_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETBRIDGEPORT_OSA,
+ .msg_arr = qeth_ipa_rc_sbp_osa_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_sbp_osa_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETVMAC,
+ .msg_arr = qeth_ipa_rc_mac_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg)
+ },
+ {
+ .cmd = IPA_CMD_DELVMAC,
+ .msg_arr = qeth_ipa_rc_mac_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETGMAC,
+ .msg_arr = qeth_ipa_rc_mac_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg)
+ },
+ {
+ .cmd = IPA_CMD_DELGMAC,
+ .msg_arr = qeth_ipa_rc_mac_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_mac_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETIP,
+ .msg_arr = qeth_ipa_rc_ip_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_ip_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETIPM,
+ .msg_arr = qeth_ipa_rc_ip_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_ip_msg)
+ },
+ {
+ .cmd = IPA_CMD_DELIPM,
+ .msg_arr = qeth_ipa_rc_ip_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_ip_msg)
+ },
+ {
+ .cmd = IPA_CMD_STARTLAN,
+ .msg_arr = qeth_ipa_rc_lan_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_lan_msg)
+ },
+ {
+ .cmd = IPA_CMD_STOPLAN,
+ .msg_arr = qeth_ipa_rc_lan_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_lan_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETVLAN,
+ .msg_arr = qeth_ipa_rc_vlan_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_vlan_msg)
+ },
+ {
+ .cmd = IPA_CMD_DELVLAN,
+ .msg_arr = qeth_ipa_rc_vlan_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_vlan_msg)
+ },
+ {
+ .cmd = IPA_CMD_SETRTG,
+ .msg_arr = qeth_ipa_rc_rtg_msg,
+ .arr_len = ARRAY_SIZE(qeth_ipa_rc_rtg_msg)
+ }
+};
+
+const char *qeth_get_ipa_msg(enum qeth_ipa_cmds cmd,
+ enum qeth_ipa_return_codes rc)
{
int x;
+ const struct ipa_rc_msg *msg_arr = NULL;
+ size_t arr_len = 0;
- for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
- if (qeth_ipa_rc_msg[x].rc == rc)
- return qeth_ipa_rc_msg[x].msg;
- return qeth_ipa_rc_msg[x].msg;
-}
+ for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_rc_map); x++) {
+ if (qeth_ipa_cmd_rc_map[x].cmd == cmd) {
+ msg_arr = qeth_ipa_cmd_rc_map[x].msg_arr;
+ arr_len = qeth_ipa_cmd_rc_map[x].arr_len;
+ break;
+ }
+ }
+ for (x = 0; x < arr_len; x++) {
+ if (msg_arr[x].rc == rc)
+ return msg_arr[x].msg;
+ }
+
+ for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_def_msg) - 1; x++) {
+ if (qeth_ipa_rc_def_msg[x].rc == rc)
+ return qeth_ipa_rc_def_msg[x].msg;
+ }
+ return qeth_ipa_rc_def_msg[x].msg;
+}
struct ipa_cmd_names {
enum qeth_ipa_cmds cmd;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6257f00786b3..252fc84e6eca 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -159,13 +159,17 @@ enum qeth_ipa_return_codes {
IPA_RC_SUCCESS = 0x0000,
IPA_RC_NOTSUPP = 0x0001,
IPA_RC_IP_TABLE_FULL = 0x0002,
+ IPA_RC_INVALID_SUBCMD = 0x0002,
IPA_RC_UNKNOWN_ERROR = 0x0003,
+ IPA_RC_HARDWARE_AUTH_ERROR = 0x0003,
IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
+ IPA_RC_VNICC_OOSEQ = 0x0005,
IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
IPA_RC_SBP_IQD_NOT_CONFIGURED = 0x000C,
IPA_RC_DUP_IPV6_HOME = 0x0010,
+ IPA_RC_SBP_IQD_OS_MISMATCH = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
IPA_RC_NO_ID_AVAILABLE = 0x0012,
IPA_RC_ID_NOT_FOUND = 0x0013,
@@ -173,6 +177,7 @@ enum qeth_ipa_return_codes {
IPA_RC_SBP_IQD_CURRENT_SECOND = 0x0018,
IPA_RC_SBP_IQD_LIMIT_SECOND = 0x001C,
IPA_RC_INVALID_IP_VERSION = 0x0020,
+ IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN = 0x0020,
IPA_RC_SBP_IQD_CURRENT_PRIMARY = 0x0024,
IPA_RC_LAN_FRAME_MISMATCH = 0x0040,
IPA_RC_SBP_IQD_NO_QDIO_QUEUES = 0x00EB,
@@ -220,16 +225,6 @@ enum qeth_ipa_return_codes {
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_FFFF = 0xffff
};
-/* for VNIC Characteristics */
-#define IPA_RC_VNICC_OOSEQ 0x0005
-
-/* for SET_DIAGNOSTIC_ASSIST */
-#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
-#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
-
-/* for SETBRIDGEPORT (double occupancies) */
-#define IPA_RC_SBP_IQD_OS_MISMATCH IPA_RC_DUP_IPV6_HOME
-#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
@@ -862,8 +857,9 @@ enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
-extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+const char *qeth_get_ipa_msg(enum qeth_ipa_cmds cmd,
+ enum qeth_ipa_return_codes rc);
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd)->hdr.initiator == IPA_CMD_INITIATOR_HOST)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4c62c597c7be..b3af9b78fa12 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2208,9 +2208,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
- sg_finish_rem_req(srp);
list_del(&srp->entry);
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ sg_finish_rem_req(srp);
+ /*
+ * sg_rq_end_io() uses srp->parentfp. Hence, only clear
+ * srp->parentfp after blk_mq_free_request() has been called.
+ */
srp->parentfp = NULL;
+
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 155ddeb8fcd4..bbf1fd4fe1e9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -519,9 +519,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
- reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
- reg |= MX51_ECSPI_CTRL_XCH;
- writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+ if (spi_imx->usedma) {
+ reg = readl(spi_imx->base + MX51_ECSPI_DMA);
+ reg |= MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN;
+ writel(reg, spi_imx->base + MX51_ECSPI_DMA);
+ } else {
+ reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ reg |= MX51_ECSPI_CTRL_XCH;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+ }
}
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
@@ -759,7 +765,6 @@ static void mx51_setup_wml(struct spi_imx_data *spi_imx)
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
MX51_ECSPI_DMA_TX_WML(tx_wml) |
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
- MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
}
@@ -1520,6 +1525,8 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
reinit_completion(&spi_imx->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
+ spi_imx->devtype_data->trigger(spi_imx);
+
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
/* Wait SDMA to finish the data transfer.*/
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index d59cc8a18484..c86dc56f38b4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -300,7 +300,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
- stalled = 10;
+ stalled = 32;
while (rx_words) {
if (rx_words == n_words && !(stalled--) &&
!(sr & XSPI_SR_TX_EMPTY_MASK) &&
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e0647a06890..e25df9990f82 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2851,6 +2851,18 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
+ /*
+ * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case
+ * the GPIO controller does not have a driver yet. This needs to be done
+ * here too, because this call sets the GPIO direction and/or bias.
+ * Setting these needs to be done even if there is no driver, in which
+ * case spi_probe() will never get called.
+ * TODO: ideally the setup of the GPIO should be handled in a generic
+ * manner in the ACPI/gpiolib core code.
+ */
+ if (spi->irq < 0)
+ spi->irq = acpi_dev_gpio_irq_get(adev, 0);
+
acpi_device_set_enumerated(adev);
adev->power.flags.ignore_parent = true;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index c7b7da629741..01a8e349dc4d 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -894,6 +894,9 @@ static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+ if (!tl_hba->sh)
+ return -ENODEV;
+
return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
tl_hba->sh->host_no, tl_tpg->tl_tpgt);
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 35ded4330431..8f7f50acb6d6 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -592,14 +592,15 @@ static void vhost_net_busy_poll(struct vhost_net *net,
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_net_virtqueue *tnvq,
unsigned int *out_num, unsigned int *in_num,
- struct msghdr *msghdr, bool *busyloop_intr)
+ struct msghdr *msghdr, bool *busyloop_intr,
+ unsigned int *ndesc)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *rvq = &rnvq->vq;
struct vhost_virtqueue *tvq = &tnvq->vq;
- int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
- out_num, in_num, NULL, NULL);
+ int r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+ out_num, in_num, NULL, NULL, ndesc);
if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */
@@ -610,8 +611,8 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
- r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
- out_num, in_num, NULL, NULL);
+ r = vhost_get_vq_desc_n(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
+ out_num, in_num, NULL, NULL, ndesc);
}
return r;
@@ -642,12 +643,14 @@ static int get_tx_bufs(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct msghdr *msg,
unsigned int *out, unsigned int *in,
- size_t *len, bool *busyloop_intr)
+ size_t *len, bool *busyloop_intr,
+ unsigned int *ndesc)
{
struct vhost_virtqueue *vq = &nvq->vq;
int ret;
- ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg,
+ busyloop_intr, ndesc);
if (ret < 0 || ret == vq->num)
return ret;
@@ -766,6 +769,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ unsigned int ndesc = 0;
do {
bool busyloop_intr = false;
@@ -774,7 +778,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
vhost_tx_batch(net, nvq, sock, &msg);
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
- &busyloop_intr);
+ &busyloop_intr, &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
@@ -806,7 +810,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
goto done;
} else if (unlikely(err != -ENOSPC)) {
vhost_tx_batch(net, nvq, sock, &msg);
- vhost_discard_vq_desc(vq, 1);
+ vhost_discard_vq_desc(vq, 1, ndesc);
vhost_net_enable_vq(net, vq);
break;
}
@@ -829,7 +833,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
- vhost_discard_vq_desc(vq, 1);
+ vhost_discard_vq_desc(vq, 1, ndesc);
vhost_net_enable_vq(net, vq);
break;
}
@@ -868,6 +872,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
int err;
struct vhost_net_ubuf_ref *ubufs;
struct ubuf_info_msgzc *ubuf;
+ unsigned int ndesc = 0;
bool zcopy_used;
int sent_pkts = 0;
@@ -879,7 +884,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
busyloop_intr = false;
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
- &busyloop_intr);
+ &busyloop_intr, &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
@@ -941,7 +946,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
if (retry) {
- vhost_discard_vq_desc(vq, 1);
+ vhost_discard_vq_desc(vq, 1, ndesc);
vhost_net_enable_vq(net, vq);
break;
}
@@ -1045,11 +1050,12 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
unsigned *iovcount,
struct vhost_log *log,
unsigned *log_num,
- unsigned int quota)
+ unsigned int quota,
+ unsigned int *ndesc)
{
struct vhost_virtqueue *vq = &nvq->vq;
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
- unsigned int out, in;
+ unsigned int out, in, desc_num, n = 0;
int seg = 0;
int headcount = 0;
unsigned d;
@@ -1064,9 +1070,9 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
r = -ENOBUFS;
goto err;
}
- r = vhost_get_vq_desc(vq, vq->iov + seg,
- ARRAY_SIZE(vq->iov) - seg, &out,
- &in, log, log_num);
+ r = vhost_get_vq_desc_n(vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num, &desc_num);
if (unlikely(r < 0))
goto err;
@@ -1093,6 +1099,7 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
++headcount;
datalen -= len;
seg += in;
+ n += desc_num;
}
*iovcount = seg;
@@ -1113,9 +1120,11 @@ static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
nheads[0] = headcount;
}
+ *ndesc = n;
+
return headcount;
err:
- vhost_discard_vq_desc(vq, headcount);
+ vhost_discard_vq_desc(vq, headcount, n);
return r;
}
@@ -1151,6 +1160,7 @@ static void handle_rx(struct vhost_net *net)
struct iov_iter fixup;
__virtio16 num_buffers;
int recv_pkts = 0;
+ unsigned int ndesc;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
sock = vhost_vq_get_backend(vq);
@@ -1182,7 +1192,8 @@ static void handle_rx(struct vhost_net *net)
headcount = get_rx_bufs(nvq, vq->heads + count,
vq->nheads + count,
vhost_len, &in, vq_log, &log,
- likely(mergeable) ? UIO_MAXIOV : 1);
+ likely(mergeable) ? UIO_MAXIOV : 1,
+ &ndesc);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
@@ -1228,7 +1239,7 @@ static void handle_rx(struct vhost_net *net)
if (unlikely(err != sock_len)) {
pr_debug("Discarded rx packet: "
" len %d, expected %zd\n", err, sock_len);
- vhost_discard_vq_desc(vq, headcount);
+ vhost_discard_vq_desc(vq, headcount, ndesc);
continue;
}
/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
@@ -1252,7 +1263,7 @@ static void handle_rx(struct vhost_net *net)
copy_to_iter(&num_buffers, sizeof num_buffers,
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
- vhost_discard_vq_desc(vq, headcount);
+ vhost_discard_vq_desc(vq, headcount, ndesc);
goto out;
}
nvq->done_idx += headcount;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 8570fdf2e14a..a78226b37739 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2792,18 +2792,34 @@ static int get_indirect(struct vhost_virtqueue *vq,
return 0;
}
-/* This looks in the virtqueue and for the first available buffer, and converts
- * it to an iovec for convenient access. Since descriptors consist of some
- * number of output then some number of input descriptors, it's actually two
- * iovecs, but we pack them into one and note how many of each there were.
+/**
+ * vhost_get_vq_desc_n - Fetch the next available descriptor chain and build iovecs
+ * @vq: target virtqueue
+ * @iov: array that receives the scatter/gather segments
+ * @iov_size: capacity of @iov in elements
+ * @out_num: the number of output segments
+ * @in_num: the number of input segments
+ * @log: optional array to record addr/len for each writable segment; NULL if unused
+ * @log_num: optional output; number of entries written to @log when provided
+ * @ndesc: optional output; number of descriptors consumed from the available ring
+ * (useful for rollback via vhost_discard_vq_desc)
*
- * This function returns the descriptor number found, or vq->num (which is
- * never a valid descriptor number) if none was found. A negative code is
- * returned on error. */
-int vhost_get_vq_desc(struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num)
+ * Extracts one available descriptor chain from @vq and translates guest addresses
+ * into host iovecs.
+ *
+ * On success, advances @vq->last_avail_idx by 1 and @vq->next_avail_head by the
+ * number of descriptors consumed (also stored via @ndesc when non-NULL).
+ *
+ * Return:
+ * - head index in [0, @vq->num) on success;
+ * - @vq->num if no descriptor is currently available;
+ * - negative errno on failure
+ */
+int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ unsigned int *ndesc)
{
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
struct vring_desc desc;
@@ -2921,17 +2937,49 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
vq->last_avail_idx++;
vq->next_avail_head += c;
+ if (ndesc)
+ *ndesc = c;
+
/* Assume notifications from guest are disabled at this point,
* if they aren't we would need to update avail_event index. */
BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc_n);
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error.
+ */
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ return vhost_get_vq_desc_n(vq, iov, iov_size, out_num, in_num,
+ log, log_num, NULL);
+}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
-/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
-void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
+/**
+ * vhost_discard_vq_desc - Reverse the effect of vhost_get_vq_desc_n()
+ * @vq: target virtqueue
+ * @nbufs: number of buffers to roll back
+ * @ndesc: number of descriptors to roll back
+ *
+ * Rewinds the internal consumer cursors after a failed attempt to use buffers
+ * returned by vhost_get_vq_desc_n().
+ */
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int nbufs,
+ unsigned int ndesc)
{
- vq->last_avail_idx -= n;
+ vq->next_avail_head -= ndesc;
+ vq->last_avail_idx -= nbufs;
}
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 621a6d9a8791..b49f08e4a1b4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -230,7 +230,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num);
-void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+
+int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ unsigned int *ndesc);
+
+void vhost_discard_vq_desc(struct vhost_virtqueue *, int nbuf,
+ unsigned int ndesc);
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
bool vhost_vq_has_work(struct vhost_virtqueue *vq);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index f31359922e98..d9b6fa1088b7 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -229,7 +229,7 @@ error:
* @name: The name of the cell.
* @namesz: The strlen of the cell name.
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
- * @excl: T if an error should be given if the cell name already exists.
+ * @reason: The reason we're doing the lookup
* @trace: The reason to be logged if the lookup is successful.
*
* Look up a cell record by name and query the DNS for VL server addresses if
@@ -239,7 +239,8 @@ error:
*/
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
- const char *vllist, bool excl,
+ const char *vllist,
+ enum afs_lookup_cell_for reason,
enum afs_cell_trace trace)
{
struct afs_cell *cell, *candidate, *cursor;
@@ -247,12 +248,18 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
enum afs_cell_state state;
int ret, n;
- _enter("%s,%s", name, vllist);
+ _enter("%s,%s,%u", name, vllist, reason);
- if (!excl) {
+ if (reason != AFS_LOOKUP_CELL_PRELOAD) {
cell = afs_find_cell(net, name, namesz, trace);
- if (!IS_ERR(cell))
+ if (!IS_ERR(cell)) {
+ if (reason == AFS_LOOKUP_CELL_DYNROOT)
+ goto no_wait;
+ if (cell->state == AFS_CELL_SETTING_UP ||
+ cell->state == AFS_CELL_UNLOOKED)
+ goto lookup_cell;
goto wait_for_cell;
+ }
}
/* Assume we're probably going to create a cell and preallocate and
@@ -298,26 +305,69 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
- afs_queue_cell(cell, afs_cell_trace_queue_new);
+lookup_cell:
+ if (reason != AFS_LOOKUP_CELL_PRELOAD &&
+ reason != AFS_LOOKUP_CELL_ROOTCELL) {
+ set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
+ afs_queue_cell(cell, afs_cell_trace_queue_new);
+ }
wait_for_cell:
- _debug("wait_for_cell");
state = smp_load_acquire(&cell->state); /* vs error */
- if (state != AFS_CELL_ACTIVE &&
- state != AFS_CELL_DEAD) {
+ switch (state) {
+ case AFS_CELL_ACTIVE:
+ case AFS_CELL_DEAD:
+ break;
+ case AFS_CELL_UNLOOKED:
+ default:
+ if (reason == AFS_LOOKUP_CELL_PRELOAD ||
+ reason == AFS_LOOKUP_CELL_ROOTCELL)
+ break;
+ _debug("wait_for_cell");
afs_see_cell(cell, afs_cell_trace_wait);
wait_var_event(&cell->state,
({
state = smp_load_acquire(&cell->state); /* vs error */
state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
}));
+ _debug("waited_for_cell %d %d", cell->state, cell->error);
}
+no_wait:
/* Check the state obtained from the wait check. */
+ state = smp_load_acquire(&cell->state); /* vs error */
if (state == AFS_CELL_DEAD) {
ret = cell->error;
goto error;
}
+ if (state == AFS_CELL_ACTIVE) {
+ switch (cell->dns_status) {
+ case DNS_LOOKUP_NOT_DONE:
+ if (cell->dns_source == DNS_RECORD_FROM_CONFIG) {
+ ret = 0;
+ break;
+ }
+ fallthrough;
+ default:
+ ret = -EIO;
+ goto error;
+ case DNS_LOOKUP_GOOD:
+ case DNS_LOOKUP_GOOD_WITH_BAD:
+ ret = 0;
+ break;
+ case DNS_LOOKUP_GOT_NOT_FOUND:
+ ret = -ENOENT;
+ goto error;
+ case DNS_LOOKUP_BAD:
+ ret = -EREMOTEIO;
+ goto error;
+ case DNS_LOOKUP_GOT_LOCAL_FAILURE:
+ case DNS_LOOKUP_GOT_TEMP_FAILURE:
+ case DNS_LOOKUP_GOT_NS_FAILURE:
+ ret = -EDESTADDRREQ;
+ goto error;
+ }
+ }
_leave(" = %p [cell]", cell);
return cell;
@@ -325,7 +375,7 @@ wait_for_cell:
cell_already_exists:
_debug("cell exists");
cell = cursor;
- if (excl) {
+ if (reason == AFS_LOOKUP_CELL_PRELOAD) {
ret = -EEXIST;
} else {
afs_use_cell(cursor, trace);
@@ -384,7 +434,8 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
return -EINVAL;
/* allocate a cell record for the root/workstation cell */
- new_root = afs_lookup_cell(net, rootcell, len, vllist, false,
+ new_root = afs_lookup_cell(net, rootcell, len, vllist,
+ AFS_LOOKUP_CELL_ROOTCELL,
afs_cell_trace_use_lookup_ws);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
@@ -777,6 +828,7 @@ static bool afs_manage_cell(struct afs_cell *cell)
switch (cell->state) {
case AFS_CELL_SETTING_UP:
goto set_up_cell;
+ case AFS_CELL_UNLOOKED:
case AFS_CELL_ACTIVE:
goto cell_is_active;
case AFS_CELL_REMOVING:
@@ -797,7 +849,7 @@ set_up_cell:
goto remove_cell;
}
- afs_set_cell_state(cell, AFS_CELL_ACTIVE);
+ afs_set_cell_state(cell, AFS_CELL_UNLOOKED);
cell_is_active:
if (afs_has_cell_expired(cell, &next_manage))
@@ -807,6 +859,8 @@ cell_is_active:
ret = afs_update_cell(cell);
if (ret < 0)
cell->error = ret;
+ if (cell->state == AFS_CELL_UNLOOKED)
+ afs_set_cell_state(cell, AFS_CELL_ACTIVE);
}
if (next_manage < TIME64_MAX && cell->net->live) {
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 8c6130789fde..dc9d29e3739e 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -108,7 +108,8 @@ static struct dentry *afs_dynroot_lookup_cell(struct inode *dir, struct dentry *
dotted = true;
}
- cell = afs_lookup_cell(net, name, len, NULL, false,
+ cell = afs_lookup_cell(net, name, len, NULL,
+ AFS_LOOKUP_CELL_DYNROOT,
afs_cell_trace_use_lookup_dynroot);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index a45ae5c2ef8a..b92f96f56767 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -343,6 +343,7 @@ extern const char afs_init_sysname[];
enum afs_cell_state {
AFS_CELL_SETTING_UP,
+ AFS_CELL_UNLOOKED,
AFS_CELL_ACTIVE,
AFS_CELL_REMOVING,
AFS_CELL_DEAD,
@@ -1049,9 +1050,18 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
extern int afs_cell_init(struct afs_net *, const char *);
extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned,
enum afs_cell_trace);
+enum afs_lookup_cell_for {
+ AFS_LOOKUP_CELL_DYNROOT,
+ AFS_LOOKUP_CELL_MOUNTPOINT,
+ AFS_LOOKUP_CELL_DIRECT_MOUNT,
+ AFS_LOOKUP_CELL_PRELOAD,
+ AFS_LOOKUP_CELL_ROOTCELL,
+ AFS_LOOKUP_CELL_ALIAS_CHECK,
+};
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
- const char *vllist, bool excl,
+ const char *vllist,
+ enum afs_lookup_cell_for reason,
enum afs_cell_trace trace);
extern struct afs_cell *afs_use_cell(struct afs_cell *, enum afs_cell_trace);
void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 1ad048e6e164..57c204a3c04e 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -107,7 +107,8 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size > AFS_MAXCELLNAME)
return -ENAMETOOLONG;
- cell = afs_lookup_cell(ctx->net, p, size, NULL, false,
+ cell = afs_lookup_cell(ctx->net, p, size, NULL,
+ AFS_LOOKUP_CELL_MOUNTPOINT,
afs_cell_trace_use_lookup_mntpt);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 40e879c8ca77..44520549b509 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -122,7 +122,8 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
if (strcmp(buf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_lookup_cell(net, name, strlen(name), args, true,
+ cell = afs_lookup_cell(net, name, strlen(name), args,
+ AFS_LOOKUP_CELL_PRELOAD,
afs_cell_trace_use_lookup_add);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index da407f2d6f0d..d672b7ab57ae 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -290,7 +290,7 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
/* lookup the cell record */
if (cellname) {
cell = afs_lookup_cell(ctx->net, cellname, cellnamesz,
- NULL, false,
+ NULL, AFS_LOOKUP_CELL_DIRECT_MOUNT,
afs_cell_trace_use_lookup_mount);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%*.*s'\n",
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index 709b4cdb723e..fc9676abd252 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -269,7 +269,8 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
if (!name_len || name_len > AFS_MAXCELLNAME)
master = ERR_PTR(-EOPNOTSUPP);
else
- master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false,
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL,
+ AFS_LOOKUP_CELL_ALIAS_CHECK,
afs_cell_trace_use_lookup_canonical);
kfree(cell_name);
if (IS_ERR(master))
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 1d41ce477df5..984b365df046 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -61,7 +61,19 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
di = (struct bfs_inode *)bh->b_data + off;
- inode->i_mode = 0x0000FFFF & le32_to_cpu(di->i_mode);
+ /*
+ * https://martin.hinner.info/fs/bfs/bfs-structure.html explains that
+ * BFS in SCO UnixWare environment used only lower 9 bits of di->i_mode
+ * value. This means that, although bfs_write_inode() saves whole
+ * inode->i_mode bits (which include S_IFMT bits and S_IS{UID,GID,VTX}
+ * bits), middle 7 bits of di->i_mode value can be garbage when these
+ * bits were not saved by bfs_write_inode().
+ * Since we can't tell whether middle 7 bits are garbage, use only
+ * lower 12 bits (i.e. tolerate S_IS{UID,GID,VTX} bits possibly being
+ * garbage) and reconstruct S_IFMT bits for Linux environment from
+ * di->i_vtype value.
+ */
+ inode->i_mode = 0x00000FFF & le32_to_cpu(di->i_mode);
if (le32_to_cpu(di->i_vtype) == BFS_VDIR) {
inode->i_mode |= S_IFDIR;
inode->i_op = &bfs_dir_inops;
@@ -71,6 +83,11 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
inode->i_mapping->a_ops = &bfs_aops;
+ } else {
+ brelse(bh);
+ printf("Unknown vtype=%u %s:%08lx\n",
+ le32_to_cpu(di->i_vtype), inode->i_sb->s_id, ino);
+ goto error;
}
BFS_I(inode)->i_sblock = le32_to_cpu(di->i_sblock);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index a839f960cd4a..a8b1d79e4af0 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -837,8 +837,10 @@ out:
inode_unlock(d_inode(root));
if (err) {
- if (f)
+ if (f) {
+ exe_file_allow_write_access(f);
filp_close(f, NULL);
+ }
kfree(e);
return err;
}
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 1f4d8ce56667..6de97565d5f7 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -533,6 +533,7 @@ static struct file_system_type efivarfs_type = {
.init_fs_context = efivarfs_init_fs_context,
.kill_sb = efivarfs_kill_sb,
.parameters = efivarfs_parameters,
+ .fs_flags = FS_POWER_FREEZE,
};
static __init int efivarfs_init(void)
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 7f9592856bf7..74d451f732c7 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -433,7 +433,10 @@ static int exfat_read_boot_sector(struct super_block *sb)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
/* set block size to read super block */
- sb_min_blocksize(sb, 512);
+ if (!sb_min_blocksize(sb, 512)) {
+ exfat_err(sb, "unable to set blocksize");
+ return -EINVAL;
+ }
/* read boot sector */
sbi->boot_bh = sb_bread(sb, 0);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 9648ed097816..9cfe20a3daaf 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1595,8 +1595,12 @@ int fat_fill_super(struct super_block *sb, struct fs_context *fc,
setup(sb); /* flavour-specific stuff that needs options */
+ error = -EINVAL;
+ if (!sb_min_blocksize(sb, 512)) {
+ fat_msg(sb, KERN_ERR, "unable to set blocksize");
+ goto out_fail;
+ }
error = -EIO;
- sb_min_blocksize(sb, 512);
bh = sb_bread(sb, 0);
if (bh == NULL) {
fat_msg(sb, KERN_ERR, "unable to read boot sector");
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 6bc7c97b017d..b2f6486fe1d5 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -373,7 +373,7 @@ static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
sprintf(buff, "%d", i);
fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
- if (!fs->mqs_kobj) {
+ if (!fsvq->kobj) {
ret = -ENOMEM;
goto out_del;
}
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 1e1acf5775ab..86455eebbf6c 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -979,7 +979,7 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct hostfs_fs_info *fsi = fc->s_fs_info;
struct fs_parse_result result;
- char *host_root;
+ char *host_root, *tmp_root;
int opt;
opt = fs_parse(fc, hostfs_param_specs, param, &result);
@@ -990,11 +990,13 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_hostfs:
host_root = param->string;
if (!*host_root)
- host_root = "";
- fsi->host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
- if (fsi->host_root_path == NULL)
+ break;
+ tmp_root = kasprintf(GFP_KERNEL, "%s%s",
+ fsi->host_root_path, host_root);
+ if (!tmp_root)
return -ENOMEM;
+ kfree(fsi->host_root_path);
+ fsi->host_root_path = tmp_root;
break;
}
@@ -1004,17 +1006,17 @@ static int hostfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
static int hostfs_parse_monolithic(struct fs_context *fc, void *data)
{
struct hostfs_fs_info *fsi = fc->s_fs_info;
- char *host_root = (char *)data;
+ char *tmp_root, *host_root = (char *)data;
/* NULL is printed as '(null)' by printf(): avoid that. */
if (host_root == NULL)
- host_root = "";
+ return 0;
- fsi->host_root_path =
- kasprintf(GFP_KERNEL, "%s/%s", root_ino, host_root);
- if (fsi->host_root_path == NULL)
+ tmp_root = kasprintf(GFP_KERNEL, "%s%s", fsi->host_root_path, host_root);
+ if (!tmp_root)
return -ENOMEM;
-
+ kfree(fsi->host_root_path);
+ fsi->host_root_path = tmp_root;
return 0;
}
@@ -1049,6 +1051,11 @@ static int hostfs_init_fs_context(struct fs_context *fc)
if (!fsi)
return -ENOMEM;
+ fsi->host_root_path = kasprintf(GFP_KERNEL, "%s/", root_ino);
+ if (!fsi->host_root_path) {
+ kfree(fsi);
+ return -ENOMEM;
+ }
fc->s_fs_info = fsi;
fc->ops = &hostfs_context_ops;
return 0;
diff --git a/fs/inode.c b/fs/inode.c
index ec9339024ac3..cff1d3af0d57 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1967,6 +1967,18 @@ retry:
}
EXPORT_SYMBOL(iput);
+/**
+ * iput_not_last - put an inode assuming this is not the last reference
+ * @inode: inode to put
+ */
+void iput_not_last(struct inode *inode)
+{
+ VFS_BUG_ON_INODE(atomic_read(&inode->i_count) < 2, inode);
+
+ WARN_ON(atomic_sub_return(1, &inode->i_count) == 0);
+}
+EXPORT_SYMBOL(iput_not_last);
+
#ifdef CONFIG_BLOCK
/**
* bmap - find a block number in a file
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6f0e6b19383c..ad3143d4066b 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -610,6 +610,11 @@ static int isofs_fill_super(struct super_block *s, struct fs_context *fc)
goto out_freesbi;
}
opt->blocksize = sb_min_blocksize(s, opt->blocksize);
+ if (!opt->blocksize) {
+ printk(KERN_ERR
+ "ISOFS: unable to set blocksize\n");
+ goto out_freesbi;
+ }
sbi->s_high_sierra = 0; /* default is iso9660 */
sbi->s_session = opt->session;
diff --git a/fs/lockd/netlink.c b/fs/lockd/netlink.c
index 6e00b02cad90..880c42b4f8c3 100644
--- a/fs/lockd/netlink.c
+++ b/fs/lockd/netlink.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/lockd.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/fs/lockd/netlink.h b/fs/lockd/netlink.h
index 1920543a7955..d8408f077dd8 100644
--- a/fs/lockd/netlink.h
+++ b/fs/lockd/netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/lockd.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_LOCKD_GEN_H
#define _LINUX_LOCKD_GEN_H
diff --git a/fs/namespace.c b/fs/namespace.c
index d82910f33dc4..2bad25709b2c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -132,16 +132,6 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
-static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node)
-{
- struct ns_common *ns;
-
- if (!node)
- return NULL;
- ns = rb_entry(node, struct ns_common, ns_tree_node);
- return container_of(ns, struct mnt_namespace, ns);
-}
-
static void mnt_ns_release(struct mnt_namespace *ns)
{
/* keep alive for {list,stat}mount() */
@@ -151,7 +141,8 @@ static void mnt_ns_release(struct mnt_namespace *ns)
kfree(ns);
}
}
-DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T))
+DEFINE_FREE(mnt_ns_release, struct mnt_namespace *,
+ if (!IS_ERR(_T)) mnt_ns_release(_T))
static void mnt_ns_release_rcu(struct rcu_head *rcu)
{
@@ -5454,11 +5445,11 @@ static int statmount_string(struct kstatmount *s, u64 flag)
ret = statmount_sb_source(s, seq);
break;
case STATMOUNT_MNT_UIDMAP:
- sm->mnt_uidmap = start;
+ offp = &sm->mnt_uidmap;
ret = statmount_mnt_uidmap(s, seq);
break;
case STATMOUNT_MNT_GIDMAP:
- sm->mnt_gidmap = start;
+ offp = &sm->mnt_gidmap;
ret = statmount_mnt_gidmap(s, seq);
break;
default:
@@ -5736,7 +5727,7 @@ static int copy_mnt_id_req(const struct mnt_id_req __user *req,
ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
if (ret)
return ret;
- if (kreq->spare != 0)
+ if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id)
return -EINVAL;
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
@@ -5753,16 +5744,12 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
{
struct mnt_namespace *mnt_ns;
- if (kreq->mnt_ns_id && kreq->spare)
- return ERR_PTR(-EINVAL);
-
- if (kreq->mnt_ns_id)
- return lookup_mnt_ns(kreq->mnt_ns_id);
-
- if (kreq->spare) {
+ if (kreq->mnt_ns_id) {
+ mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id);
+ } else if (kreq->mnt_ns_fd) {
struct ns_common *ns;
- CLASS(fd, f)(kreq->spare);
+ CLASS(fd, f)(kreq->mnt_ns_fd);
if (fd_empty(f))
return ERR_PTR(-EBADF);
@@ -5777,6 +5764,8 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
} else {
mnt_ns = current->nsproxy->mnt_ns;
}
+ if (!mnt_ns)
+ return ERR_PTR(-ENOENT);
refcount_inc(&mnt_ns->passive);
return mnt_ns;
@@ -5801,8 +5790,8 @@ SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
return ret;
ns = grab_requested_mnt_ns(&kreq);
- if (!ns)
- return -ENOENT;
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
!ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
@@ -5912,8 +5901,8 @@ static void __free_klistmount_free(const struct klistmount *kls)
static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
size_t nr_mnt_ids)
{
-
u64 last_mnt_id = kreq->param;
+ struct mnt_namespace *ns;
/* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
@@ -5927,9 +5916,10 @@ static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *
if (!kls->kmnt_ids)
return -ENOMEM;
- kls->ns = grab_requested_mnt_ns(kreq);
- if (!kls->ns)
- return -ENOENT;
+ ns = grab_requested_mnt_ns(kreq);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+ kls->ns = ns;
kls->mnt_parent_id = kreq->mnt_id;
return 0;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4e3dcc157a83..54699299d5b1 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -338,6 +338,14 @@ again:
/* Match the xprt security policy */
if (clp->cl_xprtsec.policy != data->xprtsec.policy)
continue;
+ if (clp->cl_xprtsec.policy == RPC_XPRTSEC_TLS_X509) {
+ if (clp->cl_xprtsec.cert_serial !=
+ data->xprtsec.cert_serial)
+ continue;
+ if (clp->cl_xprtsec.privkey_serial !=
+ data->xprtsec.privkey_serial)
+ continue;
+ }
refcount_inc(&clp->cl_count);
return clp;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 46d9c65d50f8..ea9f6ca8f30f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2268,11 +2268,12 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
return -ENAMETOOLONG;
if (open_flags & O_CREAT) {
- file->f_mode |= FMODE_CREATED;
error = nfs_do_create(dir, dentry, mode, open_flags);
- if (error)
+ if (!error) {
+ file->f_mode |= FMODE_CREATED;
+ return finish_open(file, dentry, NULL);
+ } else if (error != -EEXIST || open_flags & O_EXCL)
return error;
- return finish_open(file, dentry, NULL);
}
if (d_in_lookup(dentry)) {
/* The only flags nfs_lookup considers are
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 18b57c7c2f97..13ad70fc00d8 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -718,6 +718,8 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct nfs_fattr *fattr;
loff_t oldsize = i_size_read(inode);
int error = 0;
+ kuid_t task_uid = current_fsuid();
+ kuid_t owner_uid = inode->i_uid;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
@@ -739,9 +741,11 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
spin_lock(&inode->i_lock);
if (attr->ia_valid & ATTR_MTIME_SET) {
- nfs_set_timestamps_to_ts(inode, attr);
- attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
+ if (uid_eq(task_uid, owner_uid)) {
+ nfs_set_timestamps_to_ts(inode, attr);
+ attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
ATTR_ATIME|ATTR_ATIME_SET);
+ }
} else {
nfs_update_timestamps(inode, attr->ia_valid);
attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
@@ -751,10 +755,12 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
attr->ia_valid & ATTR_ATIME &&
!(attr->ia_valid & ATTR_MTIME)) {
if (attr->ia_valid & ATTR_ATIME_SET) {
- spin_lock(&inode->i_lock);
- nfs_set_timestamps_to_ts(inode, attr);
- spin_unlock(&inode->i_lock);
- attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
+ if (uid_eq(task_uid, owner_uid)) {
+ spin_lock(&inode->i_lock);
+ nfs_set_timestamps_to_ts(inode, attr);
+ spin_unlock(&inode->i_lock);
+ attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
+ }
} else {
nfs_update_delegated_atime(inode);
attr->ia_valid &= ~ATTR_ATIME;
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index 2c0455e91571..656976b4f42c 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -42,10 +42,9 @@ struct nfs_local_kiocb {
/* Begin mostly DIO-specific members */
size_t end_len;
short int end_iter_index;
- short int n_iters;
+ atomic_t n_iters;
bool iter_is_dio_aligned[NFSLOCAL_MAX_IOS];
- loff_t offset[NFSLOCAL_MAX_IOS] ____cacheline_aligned;
- struct iov_iter iters[NFSLOCAL_MAX_IOS];
+ struct iov_iter iters[NFSLOCAL_MAX_IOS] ____cacheline_aligned;
/* End mostly DIO-specific members */
};
@@ -314,7 +313,9 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
init_sync_kiocb(&iocb->kiocb, file);
iocb->hdr = hdr;
+ iocb->kiocb.ki_pos = hdr->args.offset;
iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ iocb->kiocb.ki_complete = NULL;
iocb->aio_complete_work = NULL;
iocb->end_iter_index = -1;
@@ -388,13 +389,24 @@ static bool nfs_iov_iter_aligned_bvec(const struct iov_iter *i,
return true;
}
+static void
+nfs_local_iter_setup(struct iov_iter *iter, int rw, struct bio_vec *bvec,
+ unsigned int nvecs, unsigned long total,
+ size_t start, size_t len)
+{
+ iov_iter_bvec(iter, rw, bvec, nvecs, total);
+ if (start)
+ iov_iter_advance(iter, start);
+ iov_iter_truncate(iter, len);
+}
+
/*
* Setup as many as 3 iov_iter based on extents described by @local_dio.
* Returns the number of iov_iter that were setup.
*/
static int
nfs_local_iters_setup_dio(struct nfs_local_kiocb *iocb, int rw,
- unsigned int nvecs, size_t len,
+ unsigned int nvecs, unsigned long total,
struct nfs_local_dio *local_dio)
{
int n_iters = 0;
@@ -402,39 +414,17 @@ nfs_local_iters_setup_dio(struct nfs_local_kiocb *iocb, int rw,
/* Setup misaligned start? */
if (local_dio->start_len) {
- iov_iter_bvec(&iters[n_iters], rw, iocb->bvec, nvecs, len);
- iters[n_iters].count = local_dio->start_len;
- iocb->offset[n_iters] = iocb->hdr->args.offset;
- iocb->iter_is_dio_aligned[n_iters] = false;
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec,
+ nvecs, total, 0, local_dio->start_len);
++n_iters;
}
- /* Setup misaligned end?
- * If so, the end is purposely setup to be issued using buffered IO
- * before the middle (which will use DIO, if DIO-aligned, with AIO).
- * This creates problems if/when the end results in a partial write.
- * So must save index and length of end to handle this corner case.
- */
- if (local_dio->end_len) {
- iov_iter_bvec(&iters[n_iters], rw, iocb->bvec, nvecs, len);
- iocb->offset[n_iters] = local_dio->end_offset;
- iov_iter_advance(&iters[n_iters],
- local_dio->start_len + local_dio->middle_len);
- iocb->iter_is_dio_aligned[n_iters] = false;
- /* Save index and length of end */
- iocb->end_iter_index = n_iters;
- iocb->end_len = local_dio->end_len;
- ++n_iters;
- }
-
- /* Setup DIO-aligned middle to be issued last, to allow for
- * DIO with AIO completion (see nfs_local_call_{read,write}).
+ /*
+ * Setup DIO-aligned middle, if there is no misaligned end (below)
+ * then AIO completion is used, see nfs_local_call_{read,write}
*/
- iov_iter_bvec(&iters[n_iters], rw, iocb->bvec, nvecs, len);
- if (local_dio->start_len)
- iov_iter_advance(&iters[n_iters], local_dio->start_len);
- iters[n_iters].count -= local_dio->end_len;
- iocb->offset[n_iters] = local_dio->middle_offset;
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec, nvecs,
+ total, local_dio->start_len, local_dio->middle_len);
iocb->iter_is_dio_aligned[n_iters] =
nfs_iov_iter_aligned_bvec(&iters[n_iters],
@@ -442,12 +432,22 @@ nfs_local_iters_setup_dio(struct nfs_local_kiocb *iocb, int rw,
if (unlikely(!iocb->iter_is_dio_aligned[n_iters])) {
trace_nfs_local_dio_misaligned(iocb->hdr->inode,
- iocb->hdr->args.offset, len, local_dio);
+ local_dio->start_len, local_dio->middle_len, local_dio);
return 0; /* no DIO-aligned IO possible */
}
+ iocb->end_iter_index = n_iters;
++n_iters;
- iocb->n_iters = n_iters;
+ /* Setup misaligned end? */
+ if (local_dio->end_len) {
+ nfs_local_iter_setup(&iters[n_iters], rw, iocb->bvec,
+ nvecs, total, local_dio->start_len +
+ local_dio->middle_len, local_dio->end_len);
+ iocb->end_iter_index = n_iters;
+ ++n_iters;
+ }
+
+ atomic_set(&iocb->n_iters, n_iters);
return n_iters;
}
@@ -473,18 +473,26 @@ nfs_local_iters_init(struct nfs_local_kiocb *iocb, int rw)
}
len = hdr->args.count - total;
+ /*
+ * For each iocb, iocb->n_iters is always at least 1 and we always
+ * end io after first nfs_local_pgio_done call unless misaligned DIO.
+ */
+ atomic_set(&iocb->n_iters, 1);
+
if (test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) {
struct nfs_local_dio local_dio;
if (nfs_is_local_dio_possible(iocb, rw, len, &local_dio) &&
- nfs_local_iters_setup_dio(iocb, rw, v, len, &local_dio) != 0)
+ nfs_local_iters_setup_dio(iocb, rw, v, len, &local_dio) != 0) {
+ /* Ensure DIO WRITE's IO on stable storage upon completion */
+ if (rw == ITER_SOURCE)
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
return; /* is DIO-aligned */
+ }
}
/* Use buffered IO */
- iocb->offset[0] = hdr->args.offset;
iov_iter_bvec(&iocb->iters[0], rw, iocb->bvec, v, len);
- iocb->n_iters = 1;
}
static void
@@ -504,9 +512,11 @@ nfs_local_pgio_init(struct nfs_pgio_header *hdr,
hdr->task.tk_start = ktime_get();
}
-static void
-nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+static bool
+nfs_local_pgio_done(struct nfs_local_kiocb *iocb, long status, bool force)
{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
/* Must handle partial completions */
if (status >= 0) {
hdr->res.count += status;
@@ -517,6 +527,12 @@ nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
hdr->task.tk_status = status;
}
+
+ if (force)
+ return true;
+
+ BUG_ON(atomic_read(&iocb->n_iters) <= 0);
+ return atomic_dec_and_test(&iocb->n_iters);
}
static void
@@ -547,11 +563,11 @@ static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb)
queue_work(nfsiod_workqueue, &iocb->work);
}
-static void
-nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+static void nfs_local_read_done(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
struct file *filp = iocb->kiocb.ki_filp;
+ long status = hdr->task.tk_status;
if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
/* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
@@ -564,20 +580,27 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
*/
hdr->res.replen = 0;
- if (hdr->res.count != hdr->args.count ||
- hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ /* nfs_readpage_result() handles short read */
+
+ if (hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
hdr->res.eof = true;
dprintk("%s: read %ld bytes eof %d.\n", __func__,
status > 0 ? status : 0, hdr->res.eof);
}
+static inline void nfs_local_read_iocb_done(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_read_done(iocb);
+ nfs_local_pgio_release(iocb);
+}
+
static void nfs_local_read_aio_complete_work(struct work_struct *work)
{
struct nfs_local_kiocb *iocb =
container_of(work, struct nfs_local_kiocb, work);
- nfs_local_pgio_release(iocb);
+ nfs_local_read_iocb_done(iocb);
}
static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
@@ -585,8 +608,10 @@ static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
struct nfs_local_kiocb *iocb =
container_of(kiocb, struct nfs_local_kiocb, kiocb);
- nfs_local_pgio_done(iocb->hdr, ret);
- nfs_local_read_done(iocb, ret);
+ /* AIO completion of DIO read should always be last to complete */
+ if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
+ return;
+
nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
}
@@ -596,32 +621,36 @@ static void nfs_local_call_read(struct work_struct *work)
container_of(work, struct nfs_local_kiocb, work);
struct file *filp = iocb->kiocb.ki_filp;
const struct cred *save_cred;
+ bool force_done = false;
ssize_t status;
+ int n_iters;
save_cred = override_creds(filp->f_cred);
- for (int i = 0; i < iocb->n_iters ; i++) {
+ n_iters = atomic_read(&iocb->n_iters);
+ for (int i = 0; i < n_iters ; i++) {
if (iocb->iter_is_dio_aligned[i]) {
iocb->kiocb.ki_flags |= IOCB_DIRECT;
- iocb->kiocb.ki_complete = nfs_local_read_aio_complete;
- iocb->aio_complete_work = nfs_local_read_aio_complete_work;
- }
+ /* Only use AIO completion if DIO-aligned segment is last */
+ if (i == iocb->end_iter_index) {
+ iocb->kiocb.ki_complete = nfs_local_read_aio_complete;
+ iocb->aio_complete_work = nfs_local_read_aio_complete_work;
+ }
+ } else
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
- iocb->kiocb.ki_pos = iocb->offset[i];
status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
if (status != -EIOCBQUEUED) {
- nfs_local_pgio_done(iocb->hdr, status);
- if (iocb->hdr->task.tk_status)
+ if (unlikely(status >= 0 && status < iocb->iters[i].count))
+ force_done = true; /* Partial read */
+ if (nfs_local_pgio_done(iocb, status, force_done)) {
+ nfs_local_read_iocb_done(iocb);
break;
+ }
}
}
revert_creds(save_cred);
-
- if (status != -EIOCBQUEUED) {
- nfs_local_read_done(iocb, status);
- nfs_local_pgio_release(iocb);
- }
}
static int
@@ -736,11 +765,10 @@ static void nfs_local_vfs_getattr(struct nfs_local_kiocb *iocb)
fattr->du.nfs3.used = stat.blocks << 9;
}
-static void
-nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
+static void nfs_local_write_done(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
- struct inode *inode = hdr->inode;
+ long status = hdr->task.tk_status;
dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
@@ -759,10 +787,17 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
status = -ENOSPC;
/* record -ENOSPC in terms of nfs_local_pgio_done */
- nfs_local_pgio_done(hdr, status);
+ (void) nfs_local_pgio_done(iocb, status, true);
}
if (hdr->task.tk_status < 0)
- nfs_reset_boot_verifier(inode);
+ nfs_reset_boot_verifier(hdr->inode);
+}
+
+static inline void nfs_local_write_iocb_done(struct nfs_local_kiocb *iocb)
+{
+ nfs_local_write_done(iocb);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
}
static void nfs_local_write_aio_complete_work(struct work_struct *work)
@@ -770,8 +805,7 @@ static void nfs_local_write_aio_complete_work(struct work_struct *work)
struct nfs_local_kiocb *iocb =
container_of(work, struct nfs_local_kiocb, work);
- nfs_local_vfs_getattr(iocb);
- nfs_local_pgio_release(iocb);
+ nfs_local_write_iocb_done(iocb);
}
static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
@@ -779,8 +813,10 @@ static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
struct nfs_local_kiocb *iocb =
container_of(kiocb, struct nfs_local_kiocb, kiocb);
- nfs_local_pgio_done(iocb->hdr, ret);
- nfs_local_write_done(iocb, ret);
+ /* AIO completion of DIO write should always be last to complete */
+ if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
+ return;
+
nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
}
@@ -791,63 +827,40 @@ static void nfs_local_call_write(struct work_struct *work)
struct file *filp = iocb->kiocb.ki_filp;
unsigned long old_flags = current->flags;
const struct cred *save_cred;
+ bool force_done = false;
ssize_t status;
+ int n_iters;
current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
save_cred = override_creds(filp->f_cred);
file_start_write(filp);
- for (int i = 0; i < iocb->n_iters ; i++) {
+ n_iters = atomic_read(&iocb->n_iters);
+ for (int i = 0; i < n_iters ; i++) {
if (iocb->iter_is_dio_aligned[i]) {
iocb->kiocb.ki_flags |= IOCB_DIRECT;
- iocb->kiocb.ki_complete = nfs_local_write_aio_complete;
- iocb->aio_complete_work = nfs_local_write_aio_complete_work;
- }
-retry:
- iocb->kiocb.ki_pos = iocb->offset[i];
+ /* Only use AIO completion if DIO-aligned segment is last */
+ if (i == iocb->end_iter_index) {
+ iocb->kiocb.ki_complete = nfs_local_write_aio_complete;
+ iocb->aio_complete_work = nfs_local_write_aio_complete_work;
+ }
+ } else
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
+
status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
if (status != -EIOCBQUEUED) {
- if (unlikely(status >= 0 && status < iocb->iters[i].count)) {
- /* partial write */
- if (i == iocb->end_iter_index) {
- /* Must not account partial end, otherwise, due
- * to end being issued before middle: the partial
- * write accounting in nfs_local_write_done()
- * would incorrectly advance hdr->args.offset
- */
- status = 0;
- } else {
- /* Partial write at start or buffered middle,
- * exit early.
- */
- nfs_local_pgio_done(iocb->hdr, status);
- break;
- }
- } else if (unlikely(status == -ENOTBLK &&
- (iocb->kiocb.ki_flags & IOCB_DIRECT))) {
- /* VFS will return -ENOTBLK if DIO WRITE fails to
- * invalidate the page cache. Retry using buffered IO.
- */
- iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
- iocb->kiocb.ki_complete = NULL;
- iocb->aio_complete_work = NULL;
- goto retry;
- }
- nfs_local_pgio_done(iocb->hdr, status);
- if (iocb->hdr->task.tk_status)
+ if (unlikely(status >= 0 && status < iocb->iters[i].count))
+ force_done = true; /* Partial write */
+ if (nfs_local_pgio_done(iocb, status, force_done)) {
+ nfs_local_write_iocb_done(iocb);
break;
+ }
}
}
file_end_write(filp);
revert_creds(save_cred);
current->flags = old_flags;
-
- if (status != -EIOCBQUEUED) {
- nfs_local_write_done(iocb, status);
- nfs_local_vfs_getattr(iocb);
- nfs_local_pgio_release(iocb);
- }
}
static int
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 0d7310c1ee0c..5d97c1d38bb6 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -2,6 +2,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/sunrpc/addr.h>
+#include <net/handshake.h>
#include "internal.h"
#include "nfs3_fs.h"
#include "netns.h"
@@ -98,7 +99,11 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
- .xprtsec = mds_clp->cl_xprtsec,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ .cert_serial = TLS_NO_CERT,
+ .privkey_serial = TLS_NO_PRIVKEY,
+ },
.connect_timeout = connect_timeout,
.reconnect_timeout = connect_timeout,
};
@@ -111,9 +116,14 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
cl_init.hostname = buf;
switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_clp->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.xprtsec = mds_clp->cl_xprtsec;
+ else
+ ds_proto = XPRT_TRANSPORT_TCP;
+ fallthrough;
case XPRT_TRANSPORT_RDMA:
case XPRT_TRANSPORT_TCP:
- case XPRT_TRANSPORT_TCP_TLS:
if (mds_clp->cl_nconnect > 1)
cl_init.nconnect = mds_clp->cl_nconnect;
}
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 5998d6bd8a4f..3a4baed993c9 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -11,6 +11,7 @@
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <net/handshake.h>
#include "internal.h"
#include "callback.h"
#include "delegation.h"
@@ -983,7 +984,11 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
.net = mds_clp->cl_net,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
- .xprtsec = mds_srv->nfs_client->cl_xprtsec,
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ .cert_serial = TLS_NO_CERT,
+ .privkey_serial = TLS_NO_PRIVKEY,
+ },
};
char buf[INET6_ADDRSTRLEN + 1];
@@ -992,9 +997,14 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
cl_init.hostname = buf;
switch (ds_proto) {
+ case XPRT_TRANSPORT_TCP_TLS:
+ if (mds_srv->nfs_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE)
+ cl_init.xprtsec = mds_srv->nfs_client->cl_xprtsec;
+ else
+ ds_proto = XPRT_TRANSPORT_TCP;
+ fallthrough;
case XPRT_TRANSPORT_RDMA:
case XPRT_TRANSPORT_TCP:
- case XPRT_TRANSPORT_TCP_TLS:
if (mds_clp->cl_nconnect > 1) {
cl_init.nconnect = mds_clp->cl_nconnect;
cl_init.max_connect = NFS_MAX_TRANSPORTS;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 411776718494..93c6ce04332b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4715,16 +4715,19 @@ static int _nfs4_proc_lookupp(struct inode *inode,
};
unsigned short task_flags = 0;
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+ if (server->flags & NFS_MOUNT_SOFTREVAL)
task_flags |= RPC_TASK_TIMEOUT;
+ if (server->caps & NFS_CAP_MOVEABLE)
+ task_flags |= RPC_TASK_MOVEABLE;
args.bitmask = nfs4_bitmask(server, fattr->label);
nfs_fattr_init(fattr);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
- &res.seq_res, task_flags);
+ status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args,
+ &res.seq_res, task_flags);
dprintk("NFS reply lookupp: %d\n", status);
return status;
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 7b32afb29782..9976cc16b689 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -809,8 +809,11 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
unsigned int retrans)
{
struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
+ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
struct nfs4_pnfs_ds_addr *da;
unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
+ int ds_proto;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
@@ -834,27 +837,28 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
.xprtsec = clp->cl_xprtsec,
};
- if (da->da_transport != clp->cl_proto &&
- clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
- continue;
- if (da->da_transport == XPRT_TRANSPORT_TCP &&
- mds_srv->nfs_client->cl_proto == XPRT_TRANSPORT_TCP_TLS)
+ if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
+ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS)
xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
- if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ if (xprt_args.ident != clp->cl_proto)
+ continue;
+ if (xprt_args.dstaddr->sa_family !=
+ clp->cl_addr.ss_family)
continue;
/* Add this address as an alias */
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
- rpc_clnt_test_and_add_xprt, NULL);
+ rpc_clnt_test_and_add_xprt, NULL);
continue;
}
- if (da->da_transport == XPRT_TRANSPORT_TCP &&
- mds_srv->nfs_client->cl_proto == XPRT_TRANSPORT_TCP_TLS)
- da->da_transport = XPRT_TRANSPORT_TCP_TLS;
- clp = get_v3_ds_connect(mds_srv,
- &da->da_addr,
- da->da_addrlen, da->da_transport,
- timeo, retrans);
+
+ ds_proto = da->da_transport;
+ if (ds_proto == XPRT_TRANSPORT_TCP &&
+ xprtsec_policy != RPC_XPRTSEC_NONE)
+ ds_proto = XPRT_TRANSPORT_TCP_TLS;
+
+ clp = get_v3_ds_connect(mds_srv, &da->da_addr, da->da_addrlen,
+ ds_proto, timeo, retrans);
if (IS_ERR(clp))
continue;
clp->cl_rpcclient->cl_softerr = 0;
@@ -880,7 +884,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
u32 minor_version)
{
struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
+ enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
struct nfs4_pnfs_ds_addr *da;
+ int ds_proto;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
@@ -908,12 +915,8 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
.data = &xprtdata,
};
- if (da->da_transport != clp->cl_proto &&
- clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
- continue;
- if (da->da_transport == XPRT_TRANSPORT_TCP &&
- mds_srv->nfs_client->cl_proto ==
- XPRT_TRANSPORT_TCP_TLS) {
+ if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
+ clp->cl_proto == XPRT_TRANSPORT_TCP_TLS) {
struct sockaddr *addr =
(struct sockaddr *)&da->da_addr;
struct sockaddr_in *sin =
@@ -944,7 +947,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
xprt_args.servername = servername;
}
- if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ if (xprt_args.ident != clp->cl_proto)
+ continue;
+ if (xprt_args.dstaddr->sa_family !=
+ clp->cl_addr.ss_family)
continue;
/**
@@ -958,15 +964,14 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
if (xprtdata.cred)
put_cred(xprtdata.cred);
} else {
- if (da->da_transport == XPRT_TRANSPORT_TCP &&
- mds_srv->nfs_client->cl_proto ==
- XPRT_TRANSPORT_TCP_TLS)
- da->da_transport = XPRT_TRANSPORT_TCP_TLS;
- clp = nfs4_set_ds_client(mds_srv,
- &da->da_addr,
- da->da_addrlen,
- da->da_transport, timeo,
- retrans, minor_version);
+ ds_proto = da->da_transport;
+ if (ds_proto == XPRT_TRANSPORT_TCP &&
+ xprtsec_policy != RPC_XPRTSEC_NONE)
+ ds_proto = XPRT_TRANSPORT_TCP_TLS;
+
+ clp = nfs4_set_ds_client(mds_srv, &da->da_addr,
+ da->da_addrlen, ds_proto,
+ timeo, retrans, minor_version);
if (IS_ERR(clp))
continue;
@@ -977,7 +982,6 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
clp = ERR_PTR(-EIO);
continue;
}
-
}
}
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 545148d42dcc..ea6e6168092b 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -189,6 +189,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
return p;
kobject_put(&p->kobject);
+ kobject_put(&p->nfs_net_kobj);
}
return NULL;
}
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
index ca54aa583530..ac51a44e1065 100644
--- a/fs/nfsd/netlink.c
+++ b/fs/nfsd/netlink.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/nfsd.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
index 8eb903f24c41..478117ff6b8c 100644
--- a/fs/nfsd/netlink.h
+++ b/fs/nfsd/netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/nfsd.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_NFSD_GEN_H
#define _LINUX_NFSD_GEN_H
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 018055fd2cdb..e3ea6fe7edb4 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -16,6 +16,7 @@ static struct cached_fid *init_cached_dir(const char *path);
static void free_cached_dir(struct cached_fid *cfid);
static void smb2_close_cached_fid(struct kref *ref);
static void cfids_laundromat_worker(struct work_struct *work);
+static void close_cached_dir_locked(struct cached_fid *cfid);
struct cached_dir_dentry {
struct list_head entry;
@@ -388,7 +389,7 @@ out:
* lease. Release one here, and the second below.
*/
cfid->has_lease = false;
- close_cached_dir(cfid);
+ close_cached_dir_locked(cfid);
}
spin_unlock(&cfids->cfid_list_lock);
@@ -480,18 +481,52 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
spin_lock(&cfid->cfids->cfid_list_lock);
if (cfid->has_lease) {
cfid->has_lease = false;
- close_cached_dir(cfid);
+ close_cached_dir_locked(cfid);
}
spin_unlock(&cfid->cfids->cfid_list_lock);
close_cached_dir(cfid);
}
-
+/**
+ * close_cached_dir - drop a reference of a cached dir
+ *
+ * The release function will be called with cfid_list_lock held to remove the
+ * cached dirs from the list before any other thread can take another @cfid
+ * ref. Must not be called with cfid_list_lock held; use
+ * close_cached_dir_locked() called instead.
+ *
+ * @cfid: cached dir
+ */
void close_cached_dir(struct cached_fid *cfid)
{
+ lockdep_assert_not_held(&cfid->cfids->cfid_list_lock);
kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
}
+/**
+ * close_cached_dir_locked - put a reference of a cached dir with
+ * cfid_list_lock held
+ *
+ * Calling close_cached_dir() with cfid_list_lock held has the potential effect
+ * of causing a deadlock if the invariant of refcount >= 2 is false.
+ *
+ * This function is used in paths that hold cfid_list_lock and expect at least
+ * two references. If that invariant is violated, WARNs and returns without
+ * dropping a reference; the final put must still go through
+ * close_cached_dir().
+ *
+ * @cfid: cached dir
+ */
+static void close_cached_dir_locked(struct cached_fid *cfid)
+{
+ lockdep_assert_held(&cfid->cfids->cfid_list_lock);
+
+ if (WARN_ON(kref_read(&cfid->refcount) < 2))
+ return;
+
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+}
+
/*
* Called from cifs_kill_sb when we unmount a share
*/
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 7da194f29fef..dcc50a2bfa4b 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1363,6 +1363,14 @@ do_retry:
if (rdata->result == -ENODATA) {
rdata->result = 0;
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ trace_smb3_read_err(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid,
+ rdata->subreq.start + rdata->subreq.transferred,
+ rdata->subreq.len - rdata->subreq.transferred,
+ rdata->result);
} else {
size_t trans = rdata->subreq.transferred + rdata->got_bytes;
if (trans < rdata->subreq.len &&
@@ -1374,6 +1382,13 @@ do_retry:
}
if (rdata->got_bytes)
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
+ trace_smb3_read_done(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid,
+ rdata->subreq.start + rdata->subreq.transferred,
+ rdata->got_bytes);
}
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
@@ -1445,6 +1460,13 @@ cifs_async_readv(struct cifs_io_subrequest *rdata)
rdata->iov[1].iov_base = (char *)smb + 4;
rdata->iov[1].iov_len = get_rfc1002_length(smb);
+ trace_smb3_read_enter(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.netfid,
+ tcon->tid, tcon->ses->Suid,
+ rdata->subreq.start, rdata->subreq.len);
+
rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive,
cifs_readv_callback, NULL, rdata, 0, NULL);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 176301191f29..388efb35fb98 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -4451,6 +4451,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
out:
kfree(ctx->username);
+ kfree(ctx->domainname);
kfree_sensitive(ctx->password);
kfree(origin_fullpath);
kfree(ctx);
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index e60927b2a7c8..2a0d8b87bd8e 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1435,12 +1435,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_errorf(fc, "Unknown error parsing devname\n");
goto cifs_parse_mount_err;
}
+ kfree(ctx->source);
ctx->source = smb3_fs_context_fullpath(ctx, '/');
if (IS_ERR(ctx->source)) {
ctx->source = NULL;
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
+ kfree(fc->source);
fc->source = kstrdup(ctx->source, GFP_KERNEL);
if (fc->source == NULL) {
cifs_errorf(fc, "OOM when copying UNC string\n");
@@ -1468,7 +1470,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
break;
}
- if (strnlen(param->string, CIFS_MAX_USERNAME_LEN) >
+ if (strnlen(param->string, CIFS_MAX_USERNAME_LEN) ==
CIFS_MAX_USERNAME_LEN) {
pr_warn("username too long\n");
goto cifs_parse_mount_err;
@@ -1832,6 +1834,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->password = NULL;
kfree_sensitive(ctx->password2);
ctx->password2 = NULL;
+ kfree(ctx->source);
+ ctx->source = NULL;
+ kfree(fc->source);
+ fc->source = NULL;
return -EINVAL;
}
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 85a4c55b61b8..c6c428c2e08d 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -290,6 +290,9 @@ static void smbd_disconnect_rdma_connection(struct smbdirect_socket *sc)
break;
case SMBDIRECT_SOCKET_CREATED:
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ break;
+
case SMBDIRECT_SOCKET_CONNECTED:
sc->status = SMBDIRECT_SOCKET_ERROR;
break;
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 051cd9dbba13..915cedde5d66 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -830,7 +830,7 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
if (!server || server->terminate)
continue;
- if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
+ if (CIFS_CHAN_NEEDS_RECONNECT(ses, cur))
continue;
/*
diff --git a/fs/super.c b/fs/super.c
index 5bab94fb7e03..277b84e5c279 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1183,11 +1183,14 @@ static inline bool get_active_super(struct super_block *sb)
static const char *filesystems_freeze_ptr = "filesystems_freeze";
-static void filesystems_freeze_callback(struct super_block *sb, void *unused)
+static void filesystems_freeze_callback(struct super_block *sb, void *freeze_all_ptr)
{
if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
return;
+ if (freeze_all_ptr && !(sb->s_type->fs_flags & FS_POWER_FREEZE))
+ return;
+
if (!get_active_super(sb))
return;
@@ -1201,9 +1204,13 @@ static void filesystems_freeze_callback(struct super_block *sb, void *unused)
deactivate_super(sb);
}
-void filesystems_freeze(void)
+void filesystems_freeze(bool freeze_all)
{
- __iterate_supers(filesystems_freeze_callback, NULL,
+ void *freeze_all_ptr = NULL;
+
+ if (freeze_all)
+ freeze_all_ptr = &freeze_all;
+ __iterate_supers(filesystems_freeze_callback, freeze_all_ptr,
SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE);
}
diff --git a/fs/xfs/scrub/symlink_repair.c b/fs/xfs/scrub/symlink_repair.c
index 5902398185a8..df629892462f 100644
--- a/fs/xfs/scrub/symlink_repair.c
+++ b/fs/xfs/scrub/symlink_repair.c
@@ -184,7 +184,7 @@ xrep_symlink_salvage_inline(
sc->ip->i_disk_size == 1 && old_target[0] == '?')
return 0;
- nr = min(XFS_SYMLINK_MAXLEN, xfs_inode_data_fork_size(ip));
+ nr = min(XFS_SYMLINK_MAXLEN, ifp->if_bytes);
memcpy(target_buf, ifp->if_data, nr);
return nr;
}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 1067ebb3b001..bc71aa9dcee8 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1693,7 +1693,10 @@ xfs_fs_fill_super(
if (error)
return error;
- sb_min_blocksize(sb, BBSIZE);
+ if (!sb_min_blocksize(sb, BBSIZE)) {
+ xfs_err(mp, "unable to set blocksize");
+ return -EINVAL;
+ }
sb->s_xattr = xfs_xattr_handlers;
sb->s_export_op = &xfs_export_operations;
#ifdef CONFIG_XFS_QUOTA
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 7146a8e9e9c2..d0eccbd920e5 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -417,15 +417,32 @@ static inline void acpi_processor_throttling_init(void) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
/* in processor_idle.c */
+extern struct cpuidle_driver acpi_idle_driver;
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-void acpi_processor_power_init(struct acpi_processor *pr);
-void acpi_processor_power_exit(struct acpi_processor *pr);
+int acpi_processor_power_init(struct acpi_processor *pr);
+int acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
-void acpi_processor_register_idle_driver(void);
-void acpi_processor_unregister_idle_driver(void);
-int acpi_processor_ffh_lpi_probe(unsigned int cpu);
-int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
+#else
+static inline int acpi_processor_power_init(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_processor_power_exit(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_processor_hotplug(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
@@ -448,6 +465,11 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
}
#endif /* CONFIG_CPU_FREQ */
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
+#endif
+
void acpi_processor_init_invariance_cppc(void);
#endif
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index da6301a6fcea..69d4ae92d822 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -877,7 +877,10 @@
MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
- MACRO__(0xB0B0, ## __VA_ARGS__), \
+ MACRO__(0xB0B0, ## __VA_ARGS__)
+
+/* WCL */
+#define INTEL_WCL_IDS(MACRO__, ...) \
MACRO__(0xFD80, ## __VA_ARGS__), \
MACRO__(0xFD81, ## __VA_ARGS__)
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 792e10a09787..c9013e472aa3 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -566,6 +566,7 @@ struct ata_bmdma_prd {
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_is_locked(id) (((id)[ATA_ID_DLF] & 0x7) == 0x7)
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index d30816dd93c7..726d909e87ce 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#ifndef _CAN_BITTIMING_H
@@ -16,10 +16,12 @@
#define CAN_CTRLMODE_FD_TDC_MASK \
(CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+#define CAN_CTRLMODE_XL_TDC_MASK \
+ (CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TDC_MANUAL)
#define CAN_CTRLMODE_TDC_AUTO_MASK \
- (CAN_CTRLMODE_TDC_AUTO)
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_XL_TDC_AUTO)
#define CAN_CTRLMODE_TDC_MANUAL_MASK \
- (CAN_CTRLMODE_TDC_MANUAL)
+ (CAN_CTRLMODE_TDC_MANUAL | CAN_CTRLMODE_XL_TDC_MANUAL)
/*
* struct can_tdc - CAN FD Transmission Delay Compensation parameters
@@ -85,6 +87,11 @@ struct can_tdc {
u32 tdcf;
};
+/* The transceiver decoding margin corresponds to t_Decode in ISO 11898-2 */
+#define CAN_PWM_DECODE_NS 5
+/* Maximum PWM symbol duration. Corresponds to t_SymbolNom_MAX - t_Decode */
+#define CAN_PWM_NS_MAX (205 - CAN_PWM_DECODE_NS)
+
/*
* struct can_tdc_const - CAN hardware-dependent constant for
* Transmission Delay Compensation
@@ -118,11 +125,48 @@ struct can_tdc_const {
u32 tdcf_max;
};
+/*
+ * struct can_pwm - CAN Pulse-Width Modulation (PWM) parameters
+ *
+ * @pwms: pulse width modulation short phase
+ * @pwml: pulse width modulation long phase
+ * @pwmo: pulse width modulation offset
+ */
+struct can_pwm {
+ u32 pwms;
+ u32 pwml;
+ u32 pwmo;
+};
+
+/*
+ * struct can_pwm - CAN hardware-dependent constants for Pulse-Width
+ * Modulation (PWM)
+ *
+ * @pwms_min: PWM short phase minimum value. Must be at least 1.
+ * @pwms_max: PWM short phase maximum value
+ * @pwml_min: PWM long phase minimum value. Must be at least 1.
+ * @pwml_max: PWM long phase maximum value
+ * @pwmo_min: PWM offset phase minimum value
+ * @pwmo_max: PWM offset phase maximum value
+ */
+struct can_pwm_const {
+ u32 pwms_min;
+ u32 pwms_max;
+ u32 pwml_min;
+ u32 pwml_max;
+ u32 pwmo_min;
+ u32 pwmo_max;
+};
+
struct data_bittiming_params {
const struct can_bittiming_const *data_bittiming_const;
struct can_bittiming data_bittiming;
const struct can_tdc_const *tdc_const;
- struct can_tdc tdc;
+ const struct can_pwm_const *pwm_const;
+ union {
+ struct can_tdc tdc;
+ struct can_pwm pwm;
+ };
const u32 *data_bitrate_const;
unsigned int data_bitrate_const_cnt;
int (*do_set_data_bittiming)(struct net_device *dev);
@@ -136,12 +180,14 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported);
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack);
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
- netdev_err(dev, "bit-timing calculation not available\n");
+ NL_SET_ERR_MSG(extack, "bit-timing calculation not available\n");
return -EINVAL;
}
@@ -151,6 +197,14 @@ can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
{
}
+
+static inline int
+can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG(extack,
+ "bit-timing calculation not available: manually provide PWML and PWMS\n");
+ return -EINVAL;
+}
#endif /* CONFIG_CAN_CALC_BITTIMING */
void can_sjw_set_default(struct can_bittiming *bt);
@@ -164,6 +218,10 @@ int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const unsigned int bitrate_const_cnt,
struct netlink_ext_ack *extack);
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack);
+
/*
* can_get_relative_tdco() - TDCO relative to the sample point
*
@@ -206,4 +264,17 @@ static inline unsigned int can_bit_time(const struct can_bittiming *bt)
return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
}
+/* Duration of one bit in minimum time quantum */
+static inline unsigned int can_bit_time_tqmin(const struct can_bittiming *bt)
+{
+ return can_bit_time(bt) * bt->brp;
+}
+
+/* Convert a duration from minimum a minimum time quantum to nano seconds */
+static inline u32 can_tqmin_to_ns(u32 tqmin, u32 clock_freq)
+{
+ return DIV_U64_ROUND_CLOSEST(mul_u32_u32(tqmin, NSEC_PER_SEC),
+ clock_freq);
+}
+
#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index bd7410b5d8a6..52c8be5c160e 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -47,7 +47,7 @@ struct can_priv {
const struct can_bittiming_const *bittiming_const;
struct can_bittiming bittiming;
- struct data_bittiming_params fd;
+ struct data_bittiming_params fd, xl;
unsigned int bitrate_const_cnt;
const u32 *bitrate_const;
u32 bitrate_max;
@@ -85,6 +85,11 @@ static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
}
+static inline bool can_xl_tdc_is_enabled(const struct can_priv *priv)
+{
+ return !!(priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+}
+
static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
{
return priv->ctrlmode & ~priv->ctrlmode_supported;
@@ -95,22 +100,6 @@ static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
-/* drop skb if it does not contain a valid CAN frame for sending */
-static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
- netdev_info_once(dev,
- "interface in listen only mode, dropping skb\n");
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
- }
-
- return can_dropped_invalid_skb(dev, skb);
-}
-
void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
@@ -146,6 +135,51 @@ void can_bus_off(struct net_device *dev);
const char *can_get_state_str(const enum can_state state);
const char *can_get_ctrlmode_str(u32 ctrlmode);
+static inline bool can_dev_in_xl_only_mode(struct can_priv *priv)
+{
+ const u32 mixed_mode = CAN_CTRLMODE_FD | CAN_CTRLMODE_XL;
+
+ /* When CAN XL is enabled but FD is disabled we are running in
+ * the so-called 'CANXL-only mode' where the error signalling is
+ * disabled. This helper function determines the required value
+ * to disable error signalling in the CAN XL controller.
+ * The so-called CC/FD/XL 'mixed mode' requires error signalling.
+ */
+ return ((priv->ctrlmode & mixed_mode) == CAN_CTRLMODE_XL);
+}
+
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ u32 silent_mode = priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_RESTRICTED);
+
+ if (silent_mode) {
+ netdev_info_once(dev, "interface in %s mode, dropping skb\n",
+ can_get_ctrlmode_str(silent_mode));
+ goto invalid_skb;
+ }
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
+ netdev_info_once(dev, "CAN FD is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ if (can_dev_in_xl_only_mode(priv) && !can_is_canxl_skb(skb)) {
+ netdev_info_once(dev,
+ "Error signaling is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+
+invalid_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+
void can_state_get_by_berr_counter(const struct net_device *dev,
const struct can_berr_counter *bec,
enum can_state *tx_state,
diff --git a/include/linux/entry-virt.h b/include/linux/entry-virt.h
index 42c89e3e5ca7..bfa767702d9a 100644
--- a/include/linux/entry-virt.h
+++ b/include/linux/entry-virt.h
@@ -32,7 +32,7 @@
*/
static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work);
-#ifndef arch_xfer_to_guest_mode_work
+#ifndef arch_xfer_to_guest_mode_handle_work
static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work)
{
return 0;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a104b3994230..03e7516c6187 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -901,6 +901,26 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
+static inline int bpf_prog_run_data_pointers(
+ const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+ void *save_data_meta, *save_data_end;
+ int res;
+
+ save_data_meta = cb->data_meta;
+ save_data_end = cb->data_end;
+
+ bpf_compute_data_pointers(skb);
+ res = bpf_prog_run(prog, skb);
+
+ cb->data_meta = save_data_meta;
+ cb->data_end = save_data_end;
+
+ return res;
+}
+
/* Similar to bpf_compute_data_pointers(), except that save orginal
* data in cb->data and cb->meta_data for restore.
*/
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c895146c1444..dd3b57cfadee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2689,6 +2689,7 @@ struct file_system_type {
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_LBS 128 /* FS supports LBS */
+#define FS_POWER_FREEZE 256 /* Always freeze on suspend/hibernate */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2823,6 +2824,7 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
+void iput_not_last(struct inode *);
int inode_update_timestamps(struct inode *inode, int flags);
int generic_update_time(struct inode *, int);
@@ -3423,8 +3425,8 @@ static inline void remove_inode_hash(struct inode *inode)
extern void inode_sb_list_add(struct inode *inode);
extern void inode_add_lru(struct inode *inode);
-extern int sb_set_blocksize(struct super_block *, int);
-extern int sb_min_blocksize(struct super_block *, int);
+int sb_set_blocksize(struct super_block *sb, int size);
+int __must_check sb_min_blocksize(struct super_block *sb, int size);
int generic_file_mmap(struct file *, struct vm_area_struct *);
int generic_file_mmap_prepare(struct vm_area_desc *desc);
@@ -3606,7 +3608,7 @@ extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
-void filesystems_freeze(void);
+void filesystems_freeze(bool freeze_all);
void filesystems_thaw(void);
extern int dcache_dir_open(struct inode *, struct file *);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7ded7df6e9b5..07f8c309e432 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -193,6 +193,10 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
+#ifndef arch_ftrace_partial_regs
+#define arch_ftrace_partial_regs(regs) do {} while (0)
+#endif
+
static __always_inline struct pt_regs *
ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
{
@@ -202,7 +206,11 @@ ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
* Since arch_ftrace_get_regs() will check some members and may return
* NULL, we can not use it.
*/
- return &arch_ftrace_regs(fregs)->regs;
+ regs = &arch_ftrace_regs(fregs)->regs;
+
+ /* Allow arch specific updates to regs. */
+ arch_ftrace_partial_regs(regs);
+ return regs;
}
#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 105cc4c00cc3..abc20f9810fd 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -249,10 +249,12 @@ static inline void clear_highpage_kasan_tagged(struct page *page)
kunmap_local(kaddr);
}
-#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
-static inline void tag_clear_highpage(struct page *page)
+/* Return false to let people know we did not initialize the pages */
+static inline bool tag_clear_highpages(struct page *page, int numpages)
{
+ return false;
}
#endif
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index 995db62570f9..3102c425c8e0 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -27,7 +27,9 @@ struct phy_device;
* as soon as a timestamp becomes available. One of the PTP_CLASS_
* values is passed in 'type'.
*
- * @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ * @hwtstamp_set: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @hwtstamp_get: Handles SIOCGHWTSTAMP ioctl for hardware time stamping.
*
* @link_state: Allows the device to respond to changes in the link
* state. The caller invokes this function while holding
@@ -51,9 +53,12 @@ struct mii_timestamper {
void (*txtstamp)(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type);
- int (*hwtstamp)(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *kernel_config,
- struct netlink_ext_ack *extack);
+ int (*hwtstamp_set)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
+
+ int (*hwtstamp_get)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 046396269ccf..1c54aa6f74fb 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -488,7 +488,6 @@ struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
-struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
@@ -599,6 +598,7 @@ struct mlx5_priv {
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
+ struct blocking_notifier_head esw_n_head;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
@@ -614,12 +614,18 @@ struct mlx5_priv {
struct mlx5_bfreg_data bfregs;
struct mlx5_sq_bfreg bfreg;
#ifdef CONFIG_MLX5_SF
- struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_nb vhca_state_nb;
+ struct blocking_notifier_head vhca_state_n_head;
+ struct notifier_block sf_dev_nb;
struct mlx5_sf_dev_table *sf_dev_table;
struct mlx5_core_dev *parent_mdev;
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
+ struct notifier_block sf_hw_table_vhca_nb;
struct mlx5_sf_hw_table *sf_hw_table;
+ struct notifier_block sf_table_esw_nb;
+ struct notifier_block sf_table_vhca_nb;
+ struct notifier_block sf_table_mdev_nb;
struct mlx5_sf_table *sf_table;
#endif
struct blocking_notifier_head lag_nh;
@@ -819,6 +825,7 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
enum {
MLX5_CMD_ENT_STATE_PENDING_COMP,
+ MLX5_CMD_ENT_STATE_TIMEDOUT,
};
struct mlx5_cmd_work_ent {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 7bf2449c53b2..9cadb1d5e6df 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -71,6 +71,7 @@ enum {
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
+ MLX5_FLOW_TABLE_OTHER_ESWITCH = BIT(6),
};
#define LEFTOVERS_RULE_NUM 2
@@ -128,6 +129,24 @@ enum {
FDB_PER_VPORT,
};
+enum fs_flow_table_type {
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_NIC_TX = 0x1,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
+};
+
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
struct mlx5_flow_definer;
@@ -209,6 +228,7 @@ struct mlx5_flow_table_attr {
u32 flags;
u16 uid;
u16 vport;
+ u16 esw_owner_vhca_id;
struct mlx5_flow_table *next_ft;
struct {
@@ -354,4 +374,8 @@ u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
struct mlx5_flow_root_namespace *
mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1b0b36aa2a76..e9dcd4bf355d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -5251,13 +5251,15 @@ struct mlx5_ifc_set_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8809,13 +8811,15 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8840,13 +8844,15 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8985,13 +8991,15 @@ struct mlx5_ifc_delete_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -9535,13 +9543,15 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x20];
@@ -9580,7 +9590,8 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
@@ -9588,7 +9599,7 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 table_type[0x8];
u8 reserved_at_88[0x4];
u8 group_type[0x4];
- u8 reserved_at_90[0x10];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -11878,10 +11889,12 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x10];
+ u8 eswitch_owner_vhca_id[0x10];
u8 table_type[0x8];
u8 reserved_at_88[0x7];
@@ -11921,14 +11934,16 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 58770b86f793..1df9d9a57bbc 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -112,6 +112,7 @@ enum mlx5e_ext_link_mode {
MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
+ MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
MLX5E_EXT_LINK_MODES_NUMBER,
};
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index c87b9507cfa1..f876bfc0669c 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -73,7 +73,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d16b33bacc32..7c79b3369b82 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2074,7 +2074,7 @@ static inline unsigned long folio_nr_pages(const struct folio *folio)
return folio_large_nr_pages(folio);
}
-#if !defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE)
+#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
/*
* We don't expect any folios that exceed buddy sizes (and consequently
* memory sections).
@@ -2087,10 +2087,17 @@ static inline unsigned long folio_nr_pages(const struct folio *folio)
* pages are guaranteed to be contiguous.
*/
#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
-#else
+#elif defined(CONFIG_HUGETLB_PAGE)
/*
* There is no real limit on the folio size. We limit them to the maximum we
- * currently expect (e.g., hugetlb, dax).
+ * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
+ * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
+ */
+#define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)
+#else
+/*
+ * Without hugetlb, gigantic folios that are bigger than a single PUD are
+ * currently impossible.
*/
#define MAX_FOLIO_ORDER PUD_ORDER
#endif
diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h
index 9e4e331b1603..ba28140ce670 100644
--- a/include/linux/pci-tph.h
+++ b/include/linux/pci-tph.h
@@ -29,6 +29,7 @@ int pcie_tph_get_cpu_st(struct pci_dev *dev,
void pcie_disable_tph(struct pci_dev *pdev);
int pcie_enable_tph(struct pci_dev *pdev, int mode);
u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
+u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev);
#else
static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
unsigned int index, u16 tag)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d1fdf81fbe1e..bf97d49c23cf 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -412,6 +412,8 @@ struct pci_dev {
u16 l1ss; /* L1SS Capability pointer */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ unsigned int aspm_l0s_support:1; /* ASPM L0s support */
+ unsigned int aspm_l1_support:1; /* ASPM L1 support */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
#endif
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index e40f554ff717..36073f7b6bb4 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -38,7 +38,9 @@ enum dw_xpcs_pma_id {
DW_XPCS_PMA_GEN4_6G_ID,
DW_XPCS_PMA_GEN5_10G_ID,
DW_XPCS_PMA_GEN5_12G_ID,
- WX_TXGBE_XPCS_PMA_10G_ID = 0x0018fc80,
+ WX_TXGBE_XPCS_PMA_10G_ID = 0xfc806000,
+ /* Meta Platforms OUI 88:25:08, model 0, revision 0 */
+ MP_FBNIC_XPCS_PMA_100G_ID = 0x46904000,
};
struct dw_xpcs_info {
diff --git a/include/linux/phy.h b/include/linux/phy.h
index bf5457341ca8..059a104223c4 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1915,7 +1915,7 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
*/
static inline bool phy_has_hwtstamp(struct phy_device *phydev)
{
- return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
+ return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp_set;
}
/**
@@ -1950,7 +1950,7 @@ static inline int phy_hwtstamp(struct phy_device *phydev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack)
{
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, cfg, extack);
+ return phydev->mii_ts->hwtstamp_set(phydev->mii_ts, cfg, extack);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
@@ -2040,6 +2040,9 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
return phydev->is_pseudo_fixed_link;
}
+phy_interface_t phy_fix_phy_mode_for_mac_delays(phy_interface_t interface,
+ bool mac_txid, bool mac_rxid);
+
int phy_save_page(struct phy_device *phydev);
int phy_select_page(struct phy_device *phydev, int page);
int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ff90281ddf90..86737076101d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3778,8 +3778,8 @@ static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
enum dma_data_direction dir)
{
if (skb_frag_is_net_iov(frag)) {
- return netmem_to_net_iov(frag->netmem)->dma_addr + offset +
- frag->offset;
+ return netmem_to_net_iov(frag->netmem)->desc.dma_addr +
+ offset + frag->offset;
}
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 4f70a6551e68..f1054b9c2d8a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -113,7 +113,7 @@ struct stmmac_axi {
u32 axi_wr_osr_lmt;
u32 axi_rd_osr_lmt;
bool axi_kbbe;
- u32 axi_blen[AXI_BLEN];
+ u32 axi_blen_regval;
bool axi_fb;
bool axi_mb;
bool axi_rb;
@@ -264,8 +264,8 @@ struct plat_stmmacenet_data {
unsigned int mode,
phy_interface_t interface);
void (*ptp_clk_freq_config)(struct stmmac_priv *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
+ int (*init)(struct device *dev, void *priv);
+ void (*exit)(struct device *dev, void *priv);
int (*suspend)(struct device *dev, void *priv);
int (*resume)(struct device *dev, void *priv);
int (*mac_setup)(void *priv, struct mac_device_info *mac);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index b673c31569f3..75dabb763c65 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -384,7 +384,8 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr_v1_hash_tunnel *vhdr,
bool tnl_hdr_negotiated,
bool little_endian,
- int vlan_hlen)
+ int vlan_hlen,
+ bool has_data_valid)
{
struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
unsigned int inner_nh, outer_th;
@@ -394,8 +395,8 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM);
if (!tnl_gso_type)
- return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
- vlan_hlen);
+ return virtio_net_hdr_from_skb(skb, hdr, little_endian,
+ has_data_valid, vlan_hlen);
/* Tunnel support not negotiated but skb ask for it. */
if (!tnl_hdr_negotiated)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b8100dbfe5d7..0cb87687837f 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -749,7 +749,6 @@ struct hci_conn {
__u8 remote_cap;
__u8 remote_auth;
- __u8 remote_id;
unsigned int sent;
@@ -857,11 +856,12 @@ extern struct mutex hci_cb_list_lock;
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon);
-void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb,
+ u16 flags);
#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb);
#else
static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 *flags)
@@ -869,23 +869,30 @@ static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
return 0;
}
-static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+static inline int sco_recv_scodata(struct hci_dev *hdev, u16 handle,
+ struct sk_buff *skb)
{
+ kfree_skb(skb);
+ return -ENOENT;
}
#endif
#if IS_ENABLED(CONFIG_BT_LE)
int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb,
+ u16 flags);
#else
static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 *flags)
{
return 0;
}
-static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
- u16 flags)
+
+static inline int iso_recv(struct hci_dev *hdev, u16 handle,
+ struct sk_buff *skb, u16 flags)
{
+ kfree_skb(skb);
+ return -ENOENT;
}
#endif
diff --git a/include/net/devlink.h b/include/net/devlink.h
index d01046ef0577..cb839e0435a1 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -479,6 +479,10 @@ struct devlink_flash_notify {
* @set: set parameter value, used for runtime and permanent
* configuration modes
* @validate: validate input value is applicable (within value range, etc.)
+ * @get_default: get parameter default value, used for runtime and permanent
+ * configuration modes
+ * @reset_default: reset parameter to default value, used for runtime and permanent
+ * configuration modes
*
* This struct should be used by the driver to fill the data for
* a parameter it registers.
@@ -490,13 +494,20 @@ struct devlink_param {
enum devlink_param_type type;
unsigned long supported_cmodes;
int (*get)(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int (*set)(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
int (*validate)(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);
+ int (*get_default)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
+ int (*reset_default)(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack);
};
struct devlink_param_item {
@@ -508,6 +519,7 @@ struct devlink_param_item {
* until reload.
*/
bool driverinit_value_new_valid;
+ union devlink_param_value driverinit_default;
};
enum devlink_param_generic_id {
@@ -629,6 +641,37 @@ enum devlink_param_generic_id {
.validate = _validate, \
}
+#define DEVLINK_PARAM_GENERIC_WITH_DEFAULTS(_id, _cmodes, _get, _set, \
+ _validate, _get_default, \
+ _reset_default) \
+{ \
+ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
+ .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
+ .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
+ .generic = true, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+ .get_default = _get_default, \
+ .reset_default = _reset_default, \
+}
+
+#define DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(_id, _name, _type, _cmodes, \
+ _get, _set, _validate, \
+ _get_default, _reset_default) \
+{ \
+ .id = _id, \
+ .name = _name, \
+ .type = _type, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+ .get_default = _get_default, \
+ .reset_default = _reset_default, \
+}
+
/* Identifier of board design */
#define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id"
/* Revision of board design */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2df2e2ead9a8..e40cdc12f7f3 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -1251,7 +1251,8 @@ struct dsa_switch_ops {
dsa_devlink_param_get, dsa_devlink_param_set, NULL)
int dsa_devlink_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
@@ -1314,11 +1315,6 @@ static inline int dsa_devlink_port_to_port(struct devlink_port *port)
return port->index;
}
-struct dsa_switch_driver {
- struct list_head list;
- const struct dsa_switch_ops *ops;
-};
-
bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db);
diff --git a/include/net/gro.h b/include/net/gro.h
index e3affb2e2ca8..b65f631c521d 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -593,4 +593,31 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
+static inline struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
+{
+ unsigned int thlen, hlen, off;
+ struct tcphdr *th;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return NULL;
+
+ thlen = th->doff * 4;
+ if (unlikely(thlen < sizeof(*th)))
+ return NULL;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return NULL;
+ }
+
+ skb_gro_pull(skb, thlen);
+
+ return th;
+}
+
#endif /* _NET_GRO_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index fd40af2221b9..ecb362025c4e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -56,7 +56,9 @@ struct inet_connection_sock_af_ops {
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
* @icsk_bind2_hash: Bind node in the bhash2 table
- * @icsk_retransmit_timer: Resend (no ack)
+ * @icsk_delack_timer: Delayed ACK timer
+ * @icsk_keepalive_timer: Keepalive timer
+ * @mptcp_tout_timer: mptcp timer
* @icsk_rto: Retransmit timeout
* @icsk_pmtu_cookie Last pmtu seen by socket
* @icsk_ca_ops Pluggable congestion control hook
@@ -81,8 +83,11 @@ struct inet_connection_sock {
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
struct inet_bind2_bucket *icsk_bind2_hash;
- struct timer_list icsk_retransmit_timer;
- struct timer_list icsk_delack_timer;
+ struct timer_list icsk_delack_timer;
+ union {
+ struct timer_list icsk_keepalive_timer;
+ struct timer_list mptcp_tout_timer;
+ };
__u32 icsk_rto;
__u32 icsk_rto_min;
u32 icsk_rto_max;
@@ -184,10 +189,9 @@ static inline void inet_csk_delack_init(struct sock *sk)
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}
-static inline unsigned long
-icsk_timeout(const struct inet_connection_sock *icsk)
+static inline unsigned long tcp_timeout_expires(const struct sock *sk)
{
- return READ_ONCE(icsk->icsk_retransmit_timer.expires);
+ return READ_ONCE(sk->tcp_retransmit_timer.expires);
}
static inline unsigned long
@@ -203,7 +207,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
smp_store_release(&icsk->icsk_pending, 0);
#ifdef INET_CSK_CLEAR_TIMERS
- sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer(sk, &sk->tcp_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
smp_store_release(&icsk->icsk_ack.pending, 0);
@@ -235,7 +239,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
smp_store_release(&icsk->icsk_pending, what);
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, when);
+ sk_reset_timer(sk, &sk->tcp_retransmit_timer, when);
} else if (what == ICSK_TIME_DACK) {
smp_store_release(&icsk->icsk_ack.pending,
icsk->icsk_ack.pending | ICSK_ACK_TIMER);
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 637f42485dba..a4cf307859f8 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -486,6 +486,8 @@ struct gdma_wqe {
#define INLINE_OOB_SMALL_SIZE 8
#define INLINE_OOB_LARGE_SIZE 24
+#define MANA_MAX_TX_WQE_SGL_ENTRIES 30
+
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
@@ -592,6 +594,12 @@ enum {
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
+/* Driver supports linearizing the skb when num_sge exceeds hardware limit */
+#define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
+
+/* Driver can send HWC periodically to query stats */
+#define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
@@ -601,7 +609,9 @@ enum {
GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
- GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE)
+ GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
+ GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
+ GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE)
#define GDMA_DRV_CAP_FLAGS2 0
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 8906901535f5..d7e089c6b694 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -375,6 +375,14 @@ struct mana_tx_qp {
struct mana_ethtool_stats {
u64 stop_queue;
u64 wake_queue;
+ u64 tx_cqe_err;
+ u64 tx_cqe_unknown_type;
+ u64 tx_linear_pkt_cnt;
+ u64 rx_coalesced_err;
+ u64 rx_cqe_unknown_type;
+};
+
+struct mana_ethtool_hc_stats {
u64 hc_rx_discards_no_wqe;
u64 hc_rx_err_vport_disabled;
u64 hc_rx_bytes;
@@ -402,10 +410,6 @@ struct mana_ethtool_stats {
u64 hc_tx_mcast_pkts;
u64 hc_tx_mcast_bytes;
u64 hc_tx_err_gdma;
- u64 tx_cqe_err;
- u64 tx_cqe_unknown_type;
- u64 rx_coalesced_err;
- u64 rx_cqe_unknown_type;
};
struct mana_ethtool_phy_stats {
@@ -473,9 +477,14 @@ struct mana_context {
u16 num_ports;
u8 bm_hostmode;
+ struct mana_ethtool_hc_stats hc_stats;
struct mana_eq *eqs;
struct dentry *mana_eqs_debugfs;
+ /* Workqueue for querying hardware stats */
+ struct delayed_work gf_stats_work;
+ bool hwc_timeout_occurred;
+
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
/* Link state change work */
@@ -577,13 +586,14 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
-void mana_query_gf_stats(struct mana_port_context *apc);
+int mana_query_gf_stats(struct mana_context *ac);
int mana_query_link_cfg(struct mana_port_context *apc);
int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
int enable_clamping);
void mana_query_phy_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
extern struct dentry *mana_debugfs_root;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index de9d36acc8e2..2dbd46fc4734 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -74,17 +74,18 @@ struct netns_ipv4 {
/* TXRX readonly hotpath cache lines */
__cacheline_group_begin(netns_ipv4_read_txrx);
- u8 sysctl_tcp_moderate_rcvbuf;
__cacheline_group_end(netns_ipv4_read_txrx);
/* RX readonly hotpath cache line */
__cacheline_group_begin(netns_ipv4_read_rx);
+ u8 sysctl_tcp_moderate_rcvbuf;
u8 sysctl_ip_early_demux;
u8 sysctl_tcp_early_demux;
u8 sysctl_tcp_l3mdev_accept;
/* 3 bytes hole, try to pack */
int sysctl_tcp_reordering;
int sysctl_tcp_rmem[3];
+ int sysctl_tcp_rcvbuf_low_rtt;
__cacheline_group_end(netns_ipv4_read_rx);
struct inet_timewait_death_row tcp_death_row;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index c64fd896b1f9..99ac747b7906 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -536,6 +536,8 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
case TCF_LAYER_NETWORK:
return skb_network_header(skb);
case TCF_LAYER_TRANSPORT:
+ if (!skb_transport_header_was_set(skb))
+ break;
return skb_transport_header(skb);
}
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 4678db45832a..e703c507d0da 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -114,12 +114,13 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
void __qdisc_run(struct Qdisc *q);
-static inline void qdisc_run(struct Qdisc *q)
+static inline struct sk_buff *qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
__qdisc_run(q);
- qdisc_run_end(q);
+ return qdisc_run_end(q);
}
+ return NULL;
}
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 94966692ccdf..c3a7268b567e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -88,6 +88,8 @@ struct Qdisc {
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+#define TCQ_F_DEQUEUE_DROPS 0x400 /* ->dequeue() can drop packets in q->to_free */
+
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -103,17 +105,26 @@ struct Qdisc {
int pad;
refcount_t refcnt;
- /*
- * For performance sake on SMP, we put highly modified fields at the end
- */
- struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
- struct qdisc_skb_head q;
- struct gnet_stats_basic_sync bstats;
- struct gnet_stats_queue qstats;
- bool running; /* must be written under qdisc spinlock */
- unsigned long state;
- struct Qdisc *next_sched;
- struct sk_buff_head skb_bad_txq;
+ /* Cache line potentially dirtied in dequeue() or __netif_reschedule(). */
+ __cacheline_group_begin(Qdisc_read_mostly) ____cacheline_aligned;
+ struct sk_buff_head gso_skb;
+ struct Qdisc *next_sched;
+ struct sk_buff_head skb_bad_txq;
+ __cacheline_group_end(Qdisc_read_mostly);
+
+ /* Fields dirtied in dequeue() fast path. */
+ __cacheline_group_begin(Qdisc_write) ____cacheline_aligned;
+ struct qdisc_skb_head q;
+ unsigned long state;
+ struct gnet_stats_basic_sync bstats;
+ bool running; /* must be written under qdisc spinlock */
+
+ /* Note : we only change qstats.backlog in fast path. */
+ struct gnet_stats_queue qstats;
+
+ struct sk_buff *to_free;
+ __cacheline_group_end(Qdisc_write);
+
atomic_long_t defer_count ____cacheline_aligned_in_smp;
struct llist_head defer_list;
@@ -211,8 +222,10 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
return true;
}
-static inline void qdisc_run_end(struct Qdisc *qdisc)
+static inline struct sk_buff *qdisc_run_end(struct Qdisc *qdisc)
{
+ struct sk_buff *to_free = NULL;
+
if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
@@ -225,9 +238,16 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
if (unlikely(test_bit(__QDISC_STATE_MISSED,
&qdisc->state)))
__netif_schedule(qdisc);
- } else {
- WRITE_ONCE(qdisc->running, false);
+ return NULL;
+ }
+
+ if (qdisc->flags & TCQ_F_DEQUEUE_DROPS) {
+ to_free = qdisc->to_free;
+ if (to_free)
+ qdisc->to_free = NULL;
}
+ WRITE_ONCE(qdisc->running, false);
+ return to_free;
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -429,13 +449,16 @@ struct tcf_proto {
};
struct qdisc_skb_cb {
- struct {
- unsigned int pkt_len;
- u16 slave_dev_queue_mapping;
- u16 tc_classid;
- };
+ unsigned int pkt_len;
+ u16 pkt_segs;
+ u16 tc_classid;
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
+
+ u16 slave_dev_queue_mapping;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
@@ -826,6 +849,15 @@ static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
return qdisc_skb_cb(skb)->pkt_len;
}
+static inline unsigned int qdisc_pkt_segs(const struct sk_buff *skb)
+{
+ u32 pkt_segs = qdisc_skb_cb(skb)->pkt_segs;
+
+ DEBUG_NET_WARN_ON_ONCE(pkt_segs !=
+ (skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1));
+ return pkt_segs;
+}
+
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
enum net_xmit_qdisc_t {
__NET_XMIT_STOLEN = 0x00010000,
@@ -867,9 +899,7 @@ static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
- _bstats_update(bstats,
- qdisc_pkt_len(skb),
- skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+ _bstats_update(bstats, qdisc_pkt_len(skb), qdisc_pkt_segs(skb));
}
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
@@ -1064,11 +1094,8 @@ struct tc_skb_cb {
struct qdisc_skb_cb qdisc_cb;
u32 drop_reason;
- u16 zone; /* Only valid if post_ct = true */
+ u16 zone; /* Only valid if qdisc_skb_cb(skb)->post_ct = true */
u16 mru;
- u8 post_ct:1;
- u8 post_ct_snat:1;
- u8 post_ct_dnat:1;
};
static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
@@ -1091,6 +1118,28 @@ static inline void tcf_set_drop_reason(const struct sk_buff *skb,
tc_skb_cb(skb)->drop_reason = reason;
}
+static inline void tcf_kfree_skb_list(struct sk_buff *skb)
+{
+ while (unlikely(skb)) {
+ struct sk_buff *next = skb->next;
+
+ prefetch(next);
+ kfree_skb_reason(skb, tcf_get_drop_reason(skb));
+ skb = next;
+ }
+}
+
+static inline void qdisc_dequeue_drop(struct Qdisc *q, struct sk_buff *skb,
+ enum skb_drop_reason reason)
+{
+ DEBUG_NET_WARN_ON_ONCE(!(q->flags & TCQ_F_DEQUEUE_DROPS));
+ DEBUG_NET_WARN_ON_ONCE(q->flags & TCQ_F_NOLOCK);
+
+ tcf_set_drop_reason(skb, reason);
+ skb->next = q->to_free;
+ q->to_free = skb;
+}
+
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index 3d5879e08e78..6f2cd562b1de 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -72,7 +72,6 @@ struct sctp_shared_key *sctp_auth_get_shkey(
int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
struct sctp_association *asoc,
gfp_t gfp);
-int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
const struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
const struct sctp_hmac *
sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
diff --git a/include/net/sock.h b/include/net/sock.h
index a5f36ea9d46f..02253c6a578b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -305,6 +305,8 @@ struct sk_filter;
* @sk_txrehash: enable TX hash rethink
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
+ * @tcp_retransmit_timer: tcp retransmit timer
+ * @mptcp_retransmit_timer: mptcp retransmit timer
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING flags
@@ -481,11 +483,12 @@ struct sock {
struct rb_root tcp_rtx_queue;
};
struct sk_buff_head sk_write_queue;
- u32 sk_dst_pending_confirm;
- u32 sk_pacing_status; /* see enum sk_pacing */
struct page_frag sk_frag;
- struct timer_list sk_timer;
-
+ union {
+ struct timer_list sk_timer;
+ struct timer_list tcp_retransmit_timer;
+ struct timer_list mptcp_retransmit_timer;
+ };
unsigned long sk_pacing_rate; /* bytes per second */
atomic_t sk_zckey;
atomic_t sk_tskey;
@@ -493,6 +496,8 @@ struct sock {
__cacheline_group_end(sock_write_tx);
__cacheline_group_begin(sock_read_tx);
+ u32 sk_dst_pending_confirm;
+ u32 sk_pacing_status; /* see enum sk_pacing */
unsigned long sk_max_pacing_rate;
long sk_sndtimeo;
u32 sk_priority;
@@ -1631,6 +1636,8 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
sk_mem_reclaim(sk);
}
+void __sk_charge(struct sock *sk, gfp_t gfp);
+
#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
static inline void sk_owner_set(struct sock *sk, struct module *owner)
{
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4833ec7903ec..0deb5e9dd911 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2313,7 +2313,6 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct tcphdr *th);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f3014e4f54fc..0a14daaa5dd4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -536,7 +536,8 @@ static inline int xfrm_af2proto(unsigned int family)
static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
{
- if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
+ if ((x->sel.family != AF_UNSPEC) ||
+ (ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
(ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
return &x->inner_mode;
else
diff --git a/include/uapi/linux/android/binder_netlink.h b/include/uapi/linux/android/binder_netlink.h
index b218f96d6668..bf69833c9a19 100644
--- a/include/uapi/linux/android/binder_netlink.h
+++ b/include/uapi/linux/android/binder_netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/binder.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_ANDROID_BINDER_NETLINK_H
#define _UAPI_LINUX_ANDROID_BINDER_NETLINK_H
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index ef62f56eaaef..c30d16746159 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -5,6 +5,7 @@
* Definitions for the CAN netlink interface
*
* Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
@@ -103,6 +104,11 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */
#define CAN_CTRLMODE_TDC_AUTO 0x200 /* FD transceiver automatically calculates TDCV */
#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* FD TDCV is manually set up by user */
+#define CAN_CTRLMODE_RESTRICTED 0x800 /* Restricted operation mode */
+#define CAN_CTRLMODE_XL 0x1000 /* CAN XL mode */
+#define CAN_CTRLMODE_XL_TDC_AUTO 0x2000 /* XL transceiver automatically calculates TDCV */
+#define CAN_CTRLMODE_XL_TDC_MANUAL 0x4000 /* XL TDCV is manually set up by user */
+#define CAN_CTRLMODE_XL_TMS 0x8000 /* Transceiver Mode Switching */
/*
* CAN device statistics
@@ -138,6 +144,11 @@ enum {
IFLA_CAN_BITRATE_MAX,
IFLA_CAN_TDC, /* FD */
IFLA_CAN_CTRLMODE_EXT,
+ IFLA_CAN_XL_DATA_BITTIMING,
+ IFLA_CAN_XL_DATA_BITTIMING_CONST,
+ IFLA_CAN_XL_DATA_BITRATE_CONST,
+ IFLA_CAN_XL_TDC,
+ IFLA_CAN_XL_PWM,
/* add new constants above here */
__IFLA_CAN_MAX,
@@ -179,6 +190,29 @@ enum {
IFLA_CAN_CTRLMODE_MAX = __IFLA_CAN_CTRLMODE - 1
};
+/*
+ * CAN FD/XL Pulse-Width Modulation (PWM)
+ *
+ * Please refer to struct can_pwm_const and can_pwm in
+ * include/linux/can/bittiming.h for further details.
+ */
+enum {
+ IFLA_CAN_PWM_UNSPEC,
+ IFLA_CAN_PWM_PWMS_MIN, /* u32 */
+ IFLA_CAN_PWM_PWMS_MAX, /* u32 */
+ IFLA_CAN_PWM_PWML_MIN, /* u32 */
+ IFLA_CAN_PWM_PWML_MAX, /* u32 */
+ IFLA_CAN_PWM_PWMO_MIN, /* u32 */
+ IFLA_CAN_PWM_PWMO_MAX, /* u32 */
+ IFLA_CAN_PWM_PWMS, /* u32 */
+ IFLA_CAN_PWM_PWML, /* u32 */
+ IFLA_CAN_PWM_PWMO, /* u32 */
+
+ /* add new constants above here */
+ __IFLA_CAN_PWM,
+ IFLA_CAN_PWM_MAX = __IFLA_CAN_PWM - 1
+};
+
/* u16 termination range: 1..65535 Ohms */
#define CAN_TERMINATION_DISABLED 0
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 157f11d3fb72..e7d6b6d13470 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -639,6 +639,9 @@ enum devlink_attr {
DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD, /* u64 */
+ DEVLINK_ATTR_PARAM_VALUE_DEFAULT, /* dynamic */
+ DEVLINK_ATTR_PARAM_RESET_DEFAULT, /* flag */
+
/* Add new attributes above here, update the spec in
* Documentation/netlink/specs/devlink.yaml and re-generate
* net/devlink/netlink_gen.c.
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index 69d35570ac4f..b7ff9c44f9aa 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_DPLL_H
#define _UAPI_LINUX_DPLL_H
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8bd5ea5469d9..eb7ff2602fbb 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2077,6 +2077,10 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT = 118,
ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT = 119,
ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT = 120,
+ ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT = 121,
+ ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT = 122,
+ ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT = 123,
+ ETHTOOL_LINK_MODE_1600000baseDR8_2_Full_BIT = 124,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
@@ -2190,6 +2194,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_200000 200000
#define SPEED_400000 400000
#define SPEED_800000 800000
+#define SPEED_1600000 1600000
#define SPEED_UNKNOWN -1
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index b71b175df46d..556a0c834df5 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/ethtool.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H
#define _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index b5cd3e7b3775..bb6bef74d2d1 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/fou.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_FOU_H
#define _UAPI_LINUX_FOU_H
diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h
index 662e7de46c54..d7e40f594888 100644
--- a/include/uapi/linux/handshake.h
+++ b/include/uapi/linux/handshake.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/handshake.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_HANDSHAKE_H
#define _UAPI_LINUX_HANDSHAKE_H
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 2c93b7b731c8..df9d44a11540 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -92,7 +92,9 @@
#define ETH_P_ETHERCAT 0x88A4 /* EtherCAT */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
-#define ETH_P_MXLGSW 0x88C3 /* MaxLinear GSW DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_MXLGSW 0x88C3 /* Infineon Technologies Corporate Research ST
+ * Used by MaxLinear GSW DSA
+ */
#define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_LLDP 0x88CC /* Link Layer Discovery Protocol */
diff --git a/include/uapi/linux/if_team.h b/include/uapi/linux/if_team.h
index a5c06243a435..f4cd839ae725 100644
--- a/include/uapi/linux/if_team.h
+++ b/include/uapi/linux/if_team.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/team.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_IF_TEAM_H
#define _UAPI_LINUX_IF_TEAM_H
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 9cd89bcc1d9c..30f3c9eaafaa 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -27,7 +27,7 @@
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
#define INPUT_PROP_ACCELEROMETER 0x06 /* has accelerometer */
-#define INPUT_PROP_HAPTIC_TOUCHPAD 0x07 /* is a haptic touchpad */
+#define INPUT_PROP_PRESSUREPAD 0x07 /* pressure triggers clicks */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
diff --git a/include/uapi/linux/io_uring/query.h b/include/uapi/linux/io_uring/query.h
index 5d754322a27c..3539ccbfd064 100644
--- a/include/uapi/linux/io_uring/query.h
+++ b/include/uapi/linux/io_uring/query.h
@@ -36,6 +36,9 @@ struct io_uring_query_opcode {
__u64 enter_flags;
/* Bitmask of all supported IOSQE_* flags */
__u64 sqe_flags;
+ /* The number of available query opcodes */
+ __u32 nr_query_opcodes;
+ __u32 __pad;
};
#endif
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
index 8197a4800604..40aa545101a3 100644
--- a/include/uapi/linux/isst_if.h
+++ b/include/uapi/linux/isst_if.h
@@ -52,7 +52,7 @@ struct isst_if_cpu_map {
/**
* struct isst_if_cpu_maps - structure for CPU map IOCTL
* @cmd_count: Number of CPU mapping command in cpu_map[]
- * @cpu_map[]: Holds one or more CPU map data structure
+ * @cpu_map: Holds one or more CPU map data structure
*
* This structure used with ioctl ISST_IF_GET_PHY_ID to send
* one or more CPU mapping commands. Here IOCTL return value indicates
@@ -82,8 +82,8 @@ struct isst_if_io_reg {
/**
* struct isst_if_io_regs - structure for IO register commands
- * @cmd_count: Number of io reg commands in io_reg[]
- * @io_reg[]: Holds one or more io_reg command structure
+ * @req_count: Number of io reg commands in io_reg[]
+ * @io_reg: Holds one or more io_reg command structure
*
* This structure used with ioctl ISST_IF_IO_CMD to send
* one or more read/write commands to PUNIT. Here IOCTL return value
@@ -120,7 +120,7 @@ struct isst_if_mbox_cmd {
/**
* struct isst_if_mbox_cmds - structure for mailbox commands
* @cmd_count: Number of mailbox commands in mbox_cmd[]
- * @mbox_cmd[]: Holds one or more mbox commands
+ * @mbox_cmd: Holds one or more mbox commands
*
* This structure used with ioctl ISST_IF_MBOX_COMMAND to send
* one or more mailbox commands to PUNIT. Here IOCTL return value
@@ -152,7 +152,7 @@ struct isst_if_msr_cmd {
/**
* struct isst_if_msr_cmds - structure for msr commands
* @cmd_count: Number of mailbox commands in msr_cmd[]
- * @msr_cmd[]: Holds one or more msr commands
+ * @msr_cmd: Holds one or more msr commands
*
* This structure used with ioctl ISST_IF_MSR_COMMAND to send
* one or more MSR commands. IOCTL return value indicates number of
@@ -167,8 +167,9 @@ struct isst_if_msr_cmds {
* struct isst_core_power - Structure to get/set core_power feature
* @get_set: 0: Get, 1: Set
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @enable: Feature enable status
+ * @supported: Power domain supports SST_CP interface
* @priority_type: Priority type for the feature (ordered/proportional)
*
* Structure to get/set core_power feature state using IOCTL
@@ -187,11 +188,11 @@ struct isst_core_power {
* struct isst_clos_param - Structure to get/set clos praram
* @get_set: 0: Get, 1: Set
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
- * clos: Clos ID for the parameters
- * min_freq_mhz: Minimum frequency in MHz
- * max_freq_mhz: Maximum frequency in MHz
- * prop_prio: Proportional priority from 0-15
+ * @power_domain_id: Power Domain id
+ * @clos: Clos ID for the parameters
+ * @min_freq_mhz: Minimum frequency in MHz
+ * @max_freq_mhz: Maximum frequency in MHz
+ * @prop_prio: Proportional priority from 0-15
*
* Structure to get/set per clos property using IOCTL
* ISST_IF_CLOS_PARAM.
@@ -209,7 +210,7 @@ struct isst_clos_param {
/**
* struct isst_if_clos_assoc - Structure to assign clos to a CPU
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @logical_cpu: CPU number
* @clos: Clos ID to assign to the logical CPU
*
@@ -228,6 +229,7 @@ struct isst_if_clos_assoc {
* @get_set: Request is for get or set
* @punit_cpu_map: Set to 1 if the CPU number is punit numbering not
* Linux CPU number
+ * @assoc_info: CLOS data for this CPU
*
* Structure used to get/set associate CPUs to clos using IOCTL
* ISST_IF_CLOS_ASSOC.
@@ -257,7 +259,7 @@ struct isst_tpmi_instance_count {
/**
* struct isst_perf_level_info - Structure to get information on SST-PP levels
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @logical_cpu: CPU number
* @clos: Clos ID to assign to the logical CPU
* @max_level: Maximum performance level supported by the platform
@@ -267,8 +269,8 @@ struct isst_tpmi_instance_count {
* @feature_state: SST-BF and SST-TF (enabled/disabled) status at current level
* @locked: SST-PP performance level change is locked/unlocked
* @enabled: SST-PP feature is enabled or not
- * @sst-tf_support: SST-TF support status at this level
- * @sst-bf_support: SST-BF support status at this level
+ * @sst_tf_support: SST-TF support status at this level
+ * @sst_bf_support: SST-BF support status at this level
*
* Structure to get SST-PP details using IOCTL ISST_IF_PERF_LEVELS.
*/
@@ -289,7 +291,7 @@ struct isst_perf_level_info {
/**
* struct isst_perf_level_control - Structure to set SST-PP level
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @level: level to set
*
* Structure used change SST-PP level using IOCTL ISST_IF_PERF_SET_LEVEL.
@@ -303,7 +305,7 @@ struct isst_perf_level_control {
/**
* struct isst_perf_feature_control - Structure to activate SST-BF/SST-TF
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @feature: bit 0 = SST-BF state, bit 1 = SST-TF state
*
* Structure used to enable SST-BF/SST-TF using IOCTL ISST_IF_PERF_SET_FEATURE.
@@ -320,7 +322,7 @@ struct isst_perf_feature_control {
/**
* struct isst_perf_level_data_info - Structure to get SST-PP level details
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @tdp_ratio: TDP Ratio
* @base_freq_mhz: Base frequency in MHz
@@ -341,8 +343,8 @@ struct isst_perf_feature_control {
* @pm_fabric_freq_mhz: Fabric (Uncore) minimum frequency
* @max_buckets: Maximum trl buckets
* @max_trl_levels: Maximum trl levels
- * @bucket_core_counts[TRL_MAX_BUCKETS]: Number of cores per bucket
- * @trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS]: maximum frequency
+ * @bucket_core_counts: Number of cores per bucket
+ * @trl_freq_mhz: maximum frequency
* for a bucket and trl level
*
* Structure used to get information on frequencies and TDP for a SST-PP
@@ -402,7 +404,7 @@ struct isst_perf_level_fabric_info {
/**
* struct isst_perf_level_cpu_mask - Structure to get SST-PP level CPU mask
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @punit_cpu_map: Set to 1 if the CPU number is punit numbering not
* Linux CPU number. If 0 CPU buffer is copied to user space
@@ -430,7 +432,7 @@ struct isst_perf_level_cpu_mask {
/**
* struct isst_base_freq_info - Structure to get SST-BF frequencies
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @high_base_freq_mhz: High priority CPU base frequency
* @low_base_freq_mhz: Low priority CPU base frequency
@@ -453,9 +455,11 @@ struct isst_base_freq_info {
/**
* struct isst_turbo_freq_info - Structure to get SST-TF frequencies
* @socket_id: Socket/package id
- * @power_domain: Power Domain id
+ * @power_domain_id: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @max_clip_freqs: Maximum number of low priority core clipping frequencies
+ * @max_buckets: Maximum trl buckets
+ * @max_trl_levels: Maximum trl levels
* @lp_clip_freq_mhz: Clip frequencies per trl level
* @bucket_core_counts: Maximum number of cores for a bucket
* @trl_freq_mhz: Frequencies per trl level for each bucket
diff --git a/include/uapi/linux/lockd_netlink.h b/include/uapi/linux/lockd_netlink.h
index 21c65aec3bc6..2d766a0fa6ea 100644
--- a/include/uapi/linux/lockd_netlink.h
+++ b/include/uapi/linux/lockd_netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/lockd.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_LOCKD_NETLINK_H
#define _UAPI_LINUX_LOCKD_NETLINK_H
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 6975f182b22c..8d769f100de6 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -116,10 +116,24 @@
#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
/* 10PASS-TS/2BASE-TL */
#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04)
+/* Note: the MDIO_CTRL1_SPEED_XXX values for everything past 10PASS-TS/2BASE-TL
+ * do not match between the PCS and PMA values. Any additions past this point
+ * should be PMA or PCS specific. The following 2 defines are workarounds for
+ * values added before this was caught. They should be considered deprecated.
+ */
+#define MDIO_CTRL1_SPEED2_5G MDIO_PMA_CTRL1_SPEED2_5G
+#define MDIO_CTRL1_SPEED5G MDIO_PMA_CTRL1_SPEED5G
+/* 100 Gb/s */
+#define MDIO_PCS_CTRL1_SPEED100G (MDIO_CTRL1_SPEEDSELEXT | 0x10)
+/* 25 Gb/s */
+#define MDIO_PCS_CTRL1_SPEED25G (MDIO_CTRL1_SPEEDSELEXT | 0x14)
+/* 50 Gb/s */
+#define MDIO_PCS_CTRL1_SPEED50G (MDIO_CTRL1_SPEEDSELEXT | 0x18)
/* 2.5 Gb/s */
-#define MDIO_CTRL1_SPEED2_5G (MDIO_CTRL1_SPEEDSELEXT | 0x18)
+#define MDIO_PMA_CTRL1_SPEED2_5G (MDIO_CTRL1_SPEEDSELEXT | 0x18)
/* 5 Gb/s */
-#define MDIO_CTRL1_SPEED5G (MDIO_CTRL1_SPEEDSELEXT | 0x1c)
+#define MDIO_PMA_CTRL1_SPEED5G (MDIO_CTRL1_SPEEDSELEXT | 0x1c)
+
/* Status register 1. */
#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */
@@ -133,6 +147,11 @@
#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */
#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */
+/* Device Identifier 2 */
+#define MDIO_DEVID2_OUI 0xfc00 /* OUI Portion of PHY ID */
+#define MDIO_DEVID2_MODEL_NUM 0x03f0 /* Manufacturer's Model Number */
+#define MDIO_DEVID2_REV_NUM 0x000f /* Revision Number */
+
/* Speed register. */
#define MDIO_SPEED_10G 0x0001 /* 10G capable */
#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 7fa67c2031a5..5d3f8c9e3a62 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -197,7 +197,7 @@ struct statmount {
*/
struct mnt_id_req {
__u32 size;
- __u32 spare;
+ __u32 mnt_ns_fd;
__u64 mnt_id;
__u64 param;
__u64 mnt_ns_id;
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
index bf44a5cf5b5a..c97d060ee90b 100644
--- a/include/uapi/linux/mptcp_pm.h
+++ b/include/uapi/linux/mptcp_pm.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/mptcp_pm.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_MPTCP_PM_H
#define _UAPI_LINUX_MPTCP_PM_H
diff --git a/include/uapi/linux/net_shaper.h b/include/uapi/linux/net_shaper.h
index d8834b59f7d7..3dd22c2930d9 100644
--- a/include/uapi/linux/net_shaper.h
+++ b/include/uapi/linux/net_shaper.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/net_shaper.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_NET_SHAPER_H
#define _UAPI_LINUX_NET_SHAPER_H
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 048c8de1a130..e0b579a1df4f 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_NETDEV_H
#define _UAPI_LINUX_NETDEV_H
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
index 887cbd12b695..e157e2009ea8 100644
--- a/include/uapi/linux/nfsd_netlink.h
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/nfsd.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_NFSD_NETLINK_H
#define _UAPI_LINUX_NFSD_NETLINK_H
diff --git a/include/uapi/linux/ovpn.h b/include/uapi/linux/ovpn.h
index 680d1522dc87..959b41def61f 100644
--- a/include/uapi/linux/ovpn.h
+++ b/include/uapi/linux/ovpn.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/ovpn.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_OVPN_H
#define _UAPI_LINUX_OVPN_H
diff --git a/include/uapi/linux/psp.h b/include/uapi/linux/psp.h
index d8449c043ba1..a3a336488dc3 100644
--- a/include/uapi/linux/psp.h
+++ b/include/uapi/linux/psp.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/psp.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_PSP_H
#define _UAPI_LINUX_PSP_H
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 386ad36f1a0a..cab5cadca8ef 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -249,8 +249,9 @@ struct tee_ioctl_param {
* @cancel_id: [in] Cancellation id, a unique value to identify this request
* @session: [out] Session id
* @ret: [out] return value
- * @ret_origin [out] origin of the return value
- * @num_params [in] number of parameters following this struct
+ * @ret_origin: [out] origin of the return value
+ * @num_params: [in] number of &struct tee_ioctl_param entries in @params
+ * @params: array of ioctl parameters
*/
struct tee_ioctl_open_session_arg {
__u8 uuid[TEE_IOCTL_UUID_LEN];
@@ -276,14 +277,14 @@ struct tee_ioctl_open_session_arg {
struct tee_ioctl_buf_data)
/**
- * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
- * Application
+ * struct tee_ioctl_invoke_arg - Invokes a function in a Trusted Application
* @func: [in] Trusted Application function, specific to the TA
* @session: [in] Session id
* @cancel_id: [in] Cancellation id, a unique value to identify this request
* @ret: [out] return value
- * @ret_origin [out] origin of the return value
- * @num_params [in] number of parameters following this struct
+ * @ret_origin: [out] origin of the return value
+ * @num_params: [in] number of parameters following this struct
+ * @params: array of ioctl parameters
*/
struct tee_ioctl_invoke_arg {
__u32 func;
@@ -338,7 +339,8 @@ struct tee_ioctl_close_session_arg {
/**
* struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function
* @func: [in] supplicant function
- * @num_params [in/out] number of parameters following this struct
+ * @num_params: [in/out] number of &struct tee_ioctl_param entries in @params
+ * @params: array of ioctl parameters
*
* @num_params is the number of params that tee-supplicant has room to
* receive when input, @num_params is the number of actual params
@@ -363,7 +365,8 @@ struct tee_iocl_supp_recv_arg {
/**
* struct tee_iocl_supp_send_arg - Send a response to a received request
* @ret: [out] return value
- * @num_params [in] number of parameters following this struct
+ * @num_params: [in] number of &struct tee_ioctl_param entries in @params
+ * @params: array of ioctl parameters
*/
struct tee_iocl_supp_send_arg {
__u32 ret;
@@ -454,11 +457,13 @@ struct tee_ioctl_shm_register_fd_data {
*/
/**
- * struct tee_ioctl_invoke_func_arg - Invokes an object in a Trusted Application
+ * struct tee_ioctl_object_invoke_arg - Invokes an object in a
+ * Trusted Application
* @id: [in] Object id
* @op: [in] Object operation, specific to the object
* @ret: [out] return value
* @num_params: [in] number of parameters following this struct
+ * @params: array of ioctl parameters
*/
struct tee_ioctl_object_invoke_arg {
__u64 id;
diff --git a/io_uring/cmd_net.c b/io_uring/cmd_net.c
index 27a09aa4c9d0..3b75931bd569 100644
--- a/io_uring/cmd_net.c
+++ b/io_uring/cmd_net.c
@@ -127,7 +127,7 @@ static int io_uring_cmd_timestamp(struct socket *sock,
if (!unlikely(skb_queue_empty(&list))) {
scoped_guard(spinlock_irqsave, &q->lock)
- skb_queue_splice(q, &list);
+ skb_queue_splice(&list, q);
}
return -EAGAIN;
}
diff --git a/io_uring/query.c b/io_uring/query.c
index 645301bd2c82..cf02893ba911 100644
--- a/io_uring/query.c
+++ b/io_uring/query.c
@@ -20,6 +20,8 @@ static ssize_t io_query_ops(void *data)
e->ring_setup_flags = IORING_SETUP_FLAGS;
e->enter_flags = IORING_ENTER_FLAGS;
e->sqe_flags = SQE_VALID_FLAGS;
+ e->nr_query_opcodes = __IO_URING_QUERY_MAX;
+ e->__pad = 0;
return sizeof(*e);
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 2602d76d5ff0..0010c4992490 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -943,8 +943,8 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
struct req_iterator rq_iter;
struct io_mapped_ubuf *imu;
struct io_rsrc_node *node;
- struct bio_vec bv, *bvec;
- u16 nr_bvecs;
+ struct bio_vec bv;
+ unsigned int nr_bvecs = 0;
int ret = 0;
io_ring_submit_lock(ctx, issue_flags);
@@ -965,8 +965,11 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
goto unlock;
}
- nr_bvecs = blk_rq_nr_phys_segments(rq);
- imu = io_alloc_imu(ctx, nr_bvecs);
+ /*
+ * blk_rq_nr_phys_segments() may overestimate the number of bvecs
+ * but avoids needing to iterate over the bvecs
+ */
+ imu = io_alloc_imu(ctx, blk_rq_nr_phys_segments(rq));
if (!imu) {
kfree(node);
ret = -ENOMEM;
@@ -977,16 +980,15 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
imu->len = blk_rq_bytes(rq);
imu->acct_pages = 0;
imu->folio_shift = PAGE_SHIFT;
- imu->nr_bvecs = nr_bvecs;
refcount_set(&imu->refs, 1);
imu->release = release;
imu->priv = rq;
imu->is_kbuf = true;
imu->dir = 1 << rq_data_dir(rq);
- bvec = imu->bvec;
rq_for_each_bvec(bv, rq, rq_iter)
- *bvec++ = bv;
+ imu->bvec[nr_bvecs++] = bv;
+ imu->nr_bvecs = nr_bvecs;
node->buf = imu;
data->nodes[index] = node;
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 5b2241a5813c..abe68ba9c9dc 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -463,7 +463,10 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
void io_readv_writev_cleanup(struct io_kiocb *req)
{
+ struct io_async_rw *rw = req->async_data;
+
lockdep_assert_held(&req->ctx->uring_lock);
+ io_vec_free(&rw->vec);
io_rw_recycle(req, 0);
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 3e830fd31f5f..0785eb739eb8 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -4167,7 +4167,8 @@ release_prog:
}
/**
- * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
+ * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
+ * mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@@ -4176,15 +4177,17 @@ release_prog:
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
-__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
- void *map__map, bpf_task_work_callback_t callback,
- void *aux__prog)
+__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
+ struct bpf_task_work *tw, void *map__map,
+ bpf_task_work_callback_t callback,
+ void *aux__prog)
{
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
}
/**
- * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
+ * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
+ * mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@@ -4193,9 +4196,10 @@ __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct b
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
-__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
- void *map__map, bpf_task_work_callback_t callback,
- void *aux__prog)
+__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
+ struct bpf_task_work *tw, void *map__map,
+ bpf_task_work_callback_t callback,
+ void *aux__prog)
{
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
}
@@ -4374,9 +4378,9 @@ BTF_ID_FLAGS(func, bpf_strnstr);
#if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
#endif
-BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS)
-BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
-BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_stream_vprintk_impl, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
diff --git a/kernel/bpf/stream.c b/kernel/bpf/stream.c
index eb6c5a21c2ef..ff16c631951b 100644
--- a/kernel/bpf/stream.c
+++ b/kernel/bpf/stream.c
@@ -355,7 +355,8 @@ __bpf_kfunc_start_defs();
* Avoid using enum bpf_stream_id so that kfunc users don't have to pull in the
* enum in headers.
*/
-__bpf_kfunc int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, u32 len__sz, void *aux__prog)
+__bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
+ u32 len__sz, void *aux__prog)
{
struct bpf_bprintf_data data = {
.get_bin_args = true,
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 5949095e51c3..f2cb0b097093 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -479,11 +479,6 @@ again:
* BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
* trampoline again, and retry register.
*/
- /* reset fops->func and fops->trampoline for re-register */
- tr->fops->func = NULL;
- tr->fops->trampoline = 0;
-
- /* free im memory and reallocate later */
bpf_tramp_image_free(im);
goto again;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ff40e5e65c43..fbe4bb91c564 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8866,7 +8866,7 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
struct bpf_verifier_state *cur)
{
struct bpf_func_state *fold, *fcur;
- int i, fr;
+ int i, fr, num_slots;
reset_idmap_scratch(env);
for (fr = old->curframe; fr >= 0; fr--) {
@@ -8879,7 +8879,9 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
&fcur->regs[i],
&env->idmap_scratch);
- for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) {
+ num_slots = min(fold->allocated_stack / BPF_REG_SIZE,
+ fcur->allocated_stack / BPF_REG_SIZE);
+ for (i = 0; i < num_slots; i++) {
if (!is_spilled_reg(&fold->stack[i]) ||
!is_spilled_reg(&fcur->stack[i]))
continue;
@@ -12259,8 +12261,8 @@ enum special_kfunc_type {
KF_bpf_res_spin_lock_irqsave,
KF_bpf_res_spin_unlock_irqrestore,
KF___bpf_trap,
- KF_bpf_task_work_schedule_signal,
- KF_bpf_task_work_schedule_resume,
+ KF_bpf_task_work_schedule_signal_impl,
+ KF_bpf_task_work_schedule_resume_impl,
};
BTF_ID_LIST(special_kfunc_list)
@@ -12331,13 +12333,13 @@ BTF_ID(func, bpf_res_spin_unlock)
BTF_ID(func, bpf_res_spin_lock_irqsave)
BTF_ID(func, bpf_res_spin_unlock_irqrestore)
BTF_ID(func, __bpf_trap)
-BTF_ID(func, bpf_task_work_schedule_signal)
-BTF_ID(func, bpf_task_work_schedule_resume)
+BTF_ID(func, bpf_task_work_schedule_signal_impl)
+BTF_ID(func, bpf_task_work_schedule_resume_impl)
static bool is_task_work_add_kfunc(u32 func_id)
{
- return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
- func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
+ return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
+ func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
}
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 3b1c43382eec..99dac1aa972a 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -373,7 +373,7 @@ static int __crash_shrink_memory(struct resource *old_res,
old_res->start = 0;
old_res->end = 0;
} else {
- crashk_res.end = ram_res->start - 1;
+ old_res->end = ram_res->start - 1;
}
crash_free_reserved_phys_range(ram_res->start, ram_res->end);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1fd347da9026..2c35acc2722b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11901,7 +11901,7 @@ static int cpu_clock_event_add(struct perf_event *event, int flags)
static void cpu_clock_event_del(struct perf_event *event, int flags)
{
- cpu_clock_event_stop(event, flags);
+ cpu_clock_event_stop(event, PERF_EF_UPDATE);
}
static void cpu_clock_event_read(struct perf_event *event)
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 53166ef86ba4..26e45f86b955 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -821,8 +821,7 @@ int hibernate(void)
goto Restore;
ksys_sync_helper();
- if (filesystem_freeze_enabled)
- filesystems_freeze();
+ filesystems_freeze(filesystem_freeze_enabled);
error = freeze_processes();
if (error)
@@ -928,8 +927,7 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data)
if (error)
goto restore;
- if (filesystem_freeze_enabled)
- filesystems_freeze();
+ filesystems_freeze(filesystem_freeze_enabled);
error = freeze_processes();
if (error)
@@ -1079,8 +1077,7 @@ static int software_resume(void)
if (error)
goto Restore;
- if (filesystem_freeze_enabled)
- filesystems_freeze();
+ filesystems_freeze(filesystem_freeze_enabled);
pm_pr_dbg("Preparing processes for hibernation restore.\n");
error = freeze_processes();
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index b4ca17c2fecf..3d4ebedad69f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -375,8 +375,7 @@ static int suspend_prepare(suspend_state_t state)
if (error)
goto Restore;
- if (filesystem_freeze_enabled)
- filesystems_freeze();
+ filesystems_freeze(filesystem_freeze_enabled);
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
trace_suspend_resume(TPS("freeze_processes"), 0, false);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 0beff7eeaaba..70ae21f7370d 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -635,7 +635,7 @@ struct cmp_data {
};
/* Indicates the image size after compression */
-static atomic_t compressed_size = ATOMIC_INIT(0);
+static atomic64_t compressed_size = ATOMIC_INIT(0);
/*
* Compression function that runs in its own thread.
@@ -664,7 +664,7 @@ static int compress_threadfn(void *data)
d->ret = crypto_acomp_compress(d->cr);
d->cmp_len = d->cr->dlen;
- atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
+ atomic64_add(d->cmp_len, &compressed_size);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
@@ -689,14 +689,14 @@ static int save_compressed_image(struct swap_map_handle *handle,
ktime_t start;
ktime_t stop;
size_t off;
- unsigned thr, run_threads, nr_threads;
+ unsigned int thr, run_threads, nr_threads;
unsigned char *page = NULL;
struct cmp_data *data = NULL;
struct crc_data *crc = NULL;
hib_init_batch(&hb);
- atomic_set(&compressed_size, 0);
+ atomic64_set(&compressed_size, 0);
/*
* We'll limit the number of threads for compression to limit memory
@@ -877,11 +877,14 @@ out_finish:
stop = ktime_get();
if (!ret)
ret = err2;
- if (!ret)
+ if (!ret) {
+ swsusp_show_speed(start, stop, nr_to_write, "Wrote");
+ pr_info("Image size after compression: %lld kbytes\n",
+ (atomic64_read(&compressed_size) / 1024));
pr_info("Image saving done\n");
- swsusp_show_speed(start, stop, nr_to_write, "Wrote");
- pr_info("Image size after compression: %d kbytes\n",
- (atomic_read(&compressed_size) / 1024));
+ } else {
+ pr_err("Image saving failed: %d\n", ret);
+ }
out_clean:
hib_finish_batch(&hb);
@@ -899,7 +902,8 @@ out_clean:
}
vfree(data);
}
- if (page) free_page((unsigned long)page);
+ if (page)
+ free_page((unsigned long)page);
return ret;
}
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index ecb251e883ea..979484dab2d3 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -25,7 +25,7 @@ static struct scx_sched __rcu *scx_root;
* guarantee system safety. Maintain a dedicated task list which contains every
* task between its fork and eventual free.
*/
-static DEFINE_SPINLOCK(scx_tasks_lock);
+static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
static LIST_HEAD(scx_tasks);
/* ops enable/disable */
@@ -476,7 +476,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
- spin_lock_irq(&scx_tasks_lock);
+ raw_spin_lock_irq(&scx_tasks_lock);
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
list_add(&iter->cursor.tasks_node, &scx_tasks);
@@ -507,14 +507,14 @@ static void scx_task_iter_unlock(struct scx_task_iter *iter)
__scx_task_iter_rq_unlock(iter);
if (iter->list_locked) {
iter->list_locked = false;
- spin_unlock_irq(&scx_tasks_lock);
+ raw_spin_unlock_irq(&scx_tasks_lock);
}
}
static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
{
if (!iter->list_locked) {
- spin_lock_irq(&scx_tasks_lock);
+ raw_spin_lock_irq(&scx_tasks_lock);
iter->list_locked = true;
}
}
@@ -2940,9 +2940,9 @@ void scx_post_fork(struct task_struct *p)
}
}
- spin_lock_irq(&scx_tasks_lock);
+ raw_spin_lock_irq(&scx_tasks_lock);
list_add_tail(&p->scx.tasks_node, &scx_tasks);
- spin_unlock_irq(&scx_tasks_lock);
+ raw_spin_unlock_irq(&scx_tasks_lock);
percpu_up_read(&scx_fork_rwsem);
}
@@ -2966,9 +2966,9 @@ void sched_ext_free(struct task_struct *p)
{
unsigned long flags;
- spin_lock_irqsave(&scx_tasks_lock, flags);
+ raw_spin_lock_irqsave(&scx_tasks_lock, flags);
list_del_init(&p->scx.tasks_node);
- spin_unlock_irqrestore(&scx_tasks_lock, flags);
+ raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
/*
* @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
@@ -4276,7 +4276,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
size_t avail, used;
bool idle;
- rq_lock(rq, &rf);
+ rq_lock_irqsave(rq, &rf);
idle = list_empty(&rq->scx.runnable_list) &&
rq->curr->sched_class == &idle_sched_class;
@@ -4345,7 +4345,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
scx_dump_task(&s, &dctx, p, ' ');
next:
- rq_unlock(rq, &rf);
+ rq_unlock_irqrestore(rq, &rf);
}
dump_newline(&s);
@@ -4479,8 +4479,11 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
goto err_free_gdsqs;
sch->helper = kthread_run_worker(0, "sched_ext_helper");
- if (!sch->helper)
+ if (IS_ERR(sch->helper)) {
+ ret = PTR_ERR(sch->helper);
goto err_free_pcpu;
+ }
+
sched_set_fifo(sch->helper->task);
atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
@@ -5321,8 +5324,8 @@ void __init init_sched_ext_class(void)
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
- init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
- init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
+ rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
+ rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
if (cpu_online(cpu))
cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
@@ -6401,7 +6404,7 @@ __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
guard(rcu)();
- sch = rcu_dereference(sch);
+ sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index aa3120104a51..56e17b625c72 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -475,12 +475,6 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
if (!kc->timer_create)
return -EOPNOTSUPP;
- new_timer = alloc_posix_timer();
- if (unlikely(!new_timer))
- return -EAGAIN;
-
- spin_lock_init(&new_timer->it_lock);
-
/* Special case for CRIU to restore timers with a given timer ID. */
if (unlikely(current->signal->timer_create_restore_ids)) {
if (copy_from_user(&req_id, created_timer_id, sizeof(req_id)))
@@ -490,6 +484,12 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
return -EINVAL;
}
+ new_timer = alloc_posix_timer();
+ if (unlikely(!new_timer))
+ return -EAGAIN;
+
+ spin_lock_init(&new_timer->it_lock);
+
/*
* Add the timer to the hash table. The timer is not yet valid
* after insertion, but has a unique ID allocated.
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c527b421c865..466e083c8272 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1152,16 +1152,15 @@ static bool report_idle_softirq(void)
return false;
}
- if (ratelimit >= 10)
- return false;
-
/* On RT, softirq handling may be waiting on some lock */
if (local_bh_blocked())
return false;
- pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
- pending);
- ratelimit++;
+ if (ratelimit < 10) {
+ pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
+ pending);
+ ratelimit++;
+ }
return true;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3a4d3b2e3f74..08e0943b54da 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -3060,29 +3060,32 @@ static const struct attribute_group aux_clock_enable_attr_group = {
static int __init tk_aux_sysfs_init(void)
{
struct kobject *auxo, *tko = kobject_create_and_add("time", kernel_kobj);
+ int ret = -ENOMEM;
if (!tko)
- return -ENOMEM;
+ return ret;
auxo = kobject_create_and_add("aux_clocks", tko);
- if (!auxo) {
- kobject_put(tko);
- return -ENOMEM;
- }
+ if (!auxo)
+ goto err_clean;
for (int i = 0; i < MAX_AUX_CLOCKS; i++) {
char id[2] = { [0] = '0' + i, };
struct kobject *clk = kobject_create_and_add(id, auxo);
if (!clk)
- return -ENOMEM;
-
- int ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
+ goto err_clean;
+ ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
if (ret)
- return ret;
+ goto err_clean;
}
return 0;
+
+err_clean:
+ kobject_put(auxo);
+ kobject_put(tko);
+ return ret;
}
late_initcall(tk_aux_sysfs_init);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 553fa469d7cc..d5ebb1d927ea 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1458,10 +1458,11 @@ static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
base = lock_timer_base(timer, &flags);
- if (base->running_timer != timer)
+ if (base->running_timer != timer) {
ret = detach_if_pending(timer, base, true);
- if (shutdown)
- timer->function = NULL;
+ if (shutdown)
+ timer->function = NULL;
+ }
raw_spin_unlock_irqrestore(&base->lock, flags);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 42bd2ba68a82..59cfacb8a5bb 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1971,7 +1971,8 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
*/
static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
struct ftrace_hash *old_hash,
- struct ftrace_hash *new_hash)
+ struct ftrace_hash *new_hash,
+ bool update_target)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec, *end = NULL;
@@ -2006,10 +2007,13 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
if (rec->flags & FTRACE_FL_DISABLED)
continue;
- /* We need to update only differences of filter_hash */
+ /*
+ * Unless we are updating the target of a direct function,
+ * we only need to update differences of filter_hash
+ */
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
- if (in_old == in_new)
+ if (!update_target && (in_old == in_new))
continue;
if (in_new) {
@@ -2020,7 +2024,16 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
if (is_ipmodify)
goto rollback;
- FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
+ /*
+ * If this is called by __modify_ftrace_direct()
+ * then it is only changing where the direct
+ * pointer is jumping to, and the record already
+ * points to a direct trampoline. If it isn't,
+ * then it is a bug to update ipmodify on a direct
+ * caller.
+ */
+ FTRACE_WARN_ON(!update_target &&
+ (rec->flags & FTRACE_FL_DIRECT));
/*
* Another ops with IPMODIFY is already
@@ -2076,7 +2089,7 @@ static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
if (ftrace_hash_empty(hash))
hash = NULL;
- return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
+ return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash, false);
}
/* Disabling always succeeds */
@@ -2087,7 +2100,7 @@ static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
if (ftrace_hash_empty(hash))
hash = NULL;
- __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
+ __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH, false);
}
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
@@ -2101,7 +2114,7 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
if (ftrace_hash_empty(new_hash))
new_hash = NULL;
- return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
+ return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash, false);
}
static void print_ip_ins(const char *fmt, const unsigned char *p)
@@ -5953,6 +5966,17 @@ static void register_ftrace_direct_cb(struct rcu_head *rhp)
free_ftrace_hash(fhp);
}
+static void reset_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+ struct ftrace_hash *hash = ops->func_hash->filter_hash;
+
+ remove_direct_functions_hash(hash, addr);
+
+ /* cleanup for possible another register call */
+ ops->func = NULL;
+ ops->trampoline = 0;
+}
+
/**
* register_ftrace_direct - Call a custom trampoline directly
* for multiple functions registered in @ops
@@ -6048,6 +6072,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
ops->direct_call = addr;
err = register_ftrace_function_nolock(ops);
+ if (err)
+ reset_direct(ops, addr);
out_unlock:
mutex_unlock(&direct_mutex);
@@ -6080,7 +6106,6 @@ EXPORT_SYMBOL_GPL(register_ftrace_direct);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
bool free_filters)
{
- struct ftrace_hash *hash = ops->func_hash->filter_hash;
int err;
if (check_direct_multi(ops))
@@ -6090,13 +6115,9 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
mutex_lock(&direct_mutex);
err = unregister_ftrace_function(ops);
- remove_direct_functions_hash(hash, addr);
+ reset_direct(ops, addr);
mutex_unlock(&direct_mutex);
- /* cleanup for possible another register call */
- ops->func = NULL;
- ops->trampoline = 0;
-
if (free_filters)
ftrace_free_filter(ops);
return err;
@@ -6106,7 +6127,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
static int
__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
- struct ftrace_hash *hash;
+ struct ftrace_hash *hash = ops->func_hash->filter_hash;
struct ftrace_func_entry *entry, *iter;
static struct ftrace_ops tmp_ops = {
.func = ftrace_stub,
@@ -6127,12 +6148,20 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
return err;
/*
+ * Call __ftrace_hash_update_ipmodify() here, so that we can call
+ * ops->ops_func for the ops. This is needed because the above
+ * register_ftrace_function_nolock() worked on tmp_ops.
+ */
+ err = __ftrace_hash_update_ipmodify(ops, hash, hash, true);
+ if (err)
+ goto out;
+
+ /*
* Now the ftrace_ops_list_func() is called to do the direct callers.
* We can safely change the direct functions attached to each entry.
*/
mutex_lock(&ftrace_lock);
- hash = ops->func_hash->filter_hash;
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
@@ -6147,6 +6176,7 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
mutex_unlock(&ftrace_lock);
+out:
/* Removing the tmp_ops will add the updated direct callers to the functions */
unregister_ftrace_function(&tmp_ops);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d1e527cf2aae..304e93597126 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8781,8 +8781,18 @@ static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
put_snapshot_map(iter->tr);
}
+static int tracing_buffers_may_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ /*
+ * Trace buffer mappings require the complete buffer including
+ * the meta page. Partial mappings are not supported.
+ */
+ return -EINVAL;
+}
+
static const struct vm_operations_struct tracing_buffers_vmops = {
.close = tracing_buffers_mmap_close,
+ .may_split = tracing_buffers_may_split,
};
static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3034e294d50d..713cc94caa02 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -445,8 +445,7 @@ config FRAME_WARN
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 2048 if PARISC
default 1536 if (!64BIT && XTENSA)
- default 1280 if KASAN && !64BIT
- default 1024 if !64BIT
+ default 1280 if !64BIT
default 2048 if 64BIT
help
Tell the compiler to warn at build time for stack frames larger than this.
diff --git a/lib/crypto/tests/sha256_kunit.c b/lib/crypto/tests/sha256_kunit.c
index dcedfca06df6..5dccdee79693 100644
--- a/lib/crypto/tests/sha256_kunit.c
+++ b/lib/crypto/tests/sha256_kunit.c
@@ -68,6 +68,7 @@ static void test_sha256_finup_2x(struct kunit *test)
rand_bytes(data1_buf, max_data_len);
rand_bytes(data2_buf, max_data_len);
rand_bytes(salt, sizeof(salt));
+ memset(ctx, 0, sizeof(*ctx));
for (size_t i = 0; i < 500; i++) {
size_t salt_len = rand_length(sizeof(salt));
diff --git a/lib/test_kho.c b/lib/test_kho.c
index 60cd899ea745..fff018e5548d 100644
--- a/lib/test_kho.c
+++ b/lib/test_kho.c
@@ -301,6 +301,9 @@ static int __init kho_test_init(void)
phys_addr_t fdt_phys;
int err;
+ if (!kho_is_enabled())
+ return 0;
+
err = kho_retrieve_subtree(KHO_TEST_FDT, &fdt_phys);
if (!err)
return kho_test_restore(fdt_phys);
diff --git a/mm/Kconfig b/mm/Kconfig
index 0e26f4fc8717..ca3f146bc705 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -908,6 +908,13 @@ config PAGE_MAPCOUNT
config PGTABLE_HAS_HUGE_LEAVES
def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
+#
+# We can end up creating gigantic folio.
+#
+config HAVE_GIGANTIC_FOLIOS
+ def_bool (HUGETLB_PAGE && ARCH_HAS_GIGANTIC_PAGE) || \
+ (ZONE_DEVICE && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+
# TODO: Allow to be enabled without THP
config ARCH_SUPPORTS_HUGE_PFNMAP
def_bool n
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f1e7e283a51..024b71da5224 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages,
unsigned long *rss, unsigned short *mmap_miss,
- bool can_map_large)
+ pgoff_t file_end)
{
+ struct address_space *mapping = folio->mapping;
unsigned int ref_from_caller = 1;
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
@@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
unsigned long addr0;
/*
- * Map the large folio fully where possible.
+ * Map the large folio fully where possible:
*
- * The folio must not cross VMA or page table boundary.
+ * - The folio is fully within size of the file or belong
+ * to shmem/tmpfs;
+ * - The folio doesn't cross VMA boundary;
+ * - The folio doesn't cross page table boundary;
*/
addr0 = addr - start * PAGE_SIZE;
- if (can_map_large && folio_within_vma(folio, vmf->vma) &&
+ if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
+ folio_within_vma(folio, vmf->vma) &&
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
vmf->pte -= start;
page -= start;
@@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long rss = 0;
unsigned int nr_pages = 0, folio_type;
unsigned short mmap_miss = 0, mmap_miss_saved;
- bool can_map_large;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end_pgoff = min(end_pgoff, file_end);
/*
- * Do not allow to map with PTEs beyond i_size and with PMD
- * across i_size to preserve SIGBUS semantics.
+ * Do not allow to map with PMD across i_size to preserve
+ * SIGBUS semantics.
*
* Make an exception for shmem/tmpfs that for long time
* intentionally mapped with PMDs across i_size.
*/
- can_map_large = shmem_mapping(mapping) ||
- file_end >= folio_next_index(folio);
-
- if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
+ if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
+ filemap_map_pmd(vmf, folio, start_pgoff)) {
ret = VM_FAULT_NOPAGE;
goto out;
}
@@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
else
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr,
- nr_pages, &rss, &mmap_miss,
- can_map_large);
+ nr_pages, &rss, &mmap_miss, file_end);
folio_unlock(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 323654fb4f8c..6cba1cb14b23 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3522,7 +3522,8 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
/* order-1 is not supported for anonymous THP. */
VM_WARN_ONCE(warns && new_order == 1,
"Cannot split to order-1 folio");
- return new_order != 1;
+ if (new_order == 1)
+ return false;
} else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
!mapping_large_folio_support(folio->mapping)) {
/*
@@ -3553,7 +3554,8 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
if (folio_test_anon(folio)) {
VM_WARN_ONCE(warns && new_order == 1,
"Cannot split to order-1 folio");
- return new_order != 1;
+ if (new_order == 1)
+ return false;
} else if (new_order) {
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
!mapping_large_folio_support(folio->mapping)) {
@@ -3617,6 +3619,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
return -EINVAL;
+ /*
+ * Folios that just got truncated cannot get split. Signal to the
+ * caller that there was a race.
+ *
+ * TODO: this will also currently refuse shmem folios that are in the
+ * swapcache.
+ */
+ if (!is_anon && !folio->mapping)
+ return -EBUSY;
+
if (new_order >= folio_order(folio))
return -EINVAL;
@@ -3657,18 +3669,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
gfp_t gfp;
mapping = folio->mapping;
-
- /* Truncated ? */
- /*
- * TODO: add support for large shmem folio in swap cache.
- * When shmem is in swap cache, mapping is NULL and
- * folio_test_swapcache() is true.
- */
- if (!mapping) {
- ret = -EBUSY;
- goto out;
- }
-
min_order = mapping_min_folio_order(folio->mapping);
if (new_order < min_order) {
ret = -EINVAL;
diff --git a/mm/memblock.c b/mm/memblock.c
index e23e16618e9b..f0f2dc66e9a2 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1826,7 +1826,8 @@ phys_addr_t __init_memblock memblock_reserved_kern_size(phys_addr_t limit, int n
*/
unsigned long __init memblock_estimated_nr_free_pages(void)
{
- return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ return PHYS_PFN(memblock_phys_mem_size() -
+ memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, NUMA_NO_NODE));
}
/* lowest address */
diff --git a/mm/memfd.c b/mm/memfd.c
index 1d109c1acf21..a405eaa451ee 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
NULL,
gfp_mask);
if (folio) {
+ u32 hash;
+
+ /*
+ * Zero the folio to prevent information leaks to userspace.
+ * Use folio_zero_user() which is optimized for huge/gigantic
+ * pages. Pass 0 as addr_hint since this is not a faulting path
+ * and we don't have a user virtual address yet.
+ */
+ folio_zero_user(folio, 0);
+
+ /*
+ * Mark the folio uptodate before adding to page cache,
+ * as required by filemap.c and other hugetlb paths.
+ */
+ __folio_mark_uptodate(folio);
+
+ /*
+ * Serialize hugepage allocation and instantiation to prevent
+ * races with concurrent allocations, as required by all other
+ * callers of hugetlb_add_to_page_cache().
+ */
+ hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
+
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+
if (err) {
folio_put(folio);
goto err_unresv;
diff --git a/mm/mempool.c b/mm/mempool.c
index 1c38e873e546..d7bbf1189db9 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -68,10 +68,20 @@ static void check_element(mempool_t *pool, void *element)
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_local_page((struct page *)element);
- __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
- kunmap_local(addr);
+#ifdef CONFIG_HIGHMEM
+ for (int i = 0; i < (1 << order); i++) {
+ struct page *page = (struct page *)element;
+ void *addr = kmap_local_page(page + i);
+
+ __check_element(pool, addr, PAGE_SIZE);
+ kunmap_local(addr);
+ }
+#else
+ void *addr = page_address((struct page *)element);
+
+ __check_element(pool, addr, PAGE_SIZE << order);
+#endif
}
}
@@ -97,10 +107,20 @@ static void poison_element(mempool_t *pool, void *element)
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_local_page((struct page *)element);
- __poison_element(addr, 1UL << (PAGE_SHIFT + order));
- kunmap_local(addr);
+#ifdef CONFIG_HIGHMEM
+ for (int i = 0; i < (1 << order); i++) {
+ struct page *page = (struct page *)element;
+ void *addr = kmap_local_page(page + i);
+
+ __poison_element(addr, PAGE_SIZE);
+ kunmap_local(addr);
+ }
+#else
+ void *addr = page_address((struct page *)element);
+
+ __poison_element(addr, PAGE_SIZE << order);
+#endif
}
}
#else /* CONFIG_SLUB_DEBUG_ON */
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index 0a0db5849b8e..42e3dde73e74 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -241,6 +241,7 @@ retry:
if (PTR_ERR(vma) == -EAGAIN) {
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
+ mas_set(&mas, address);
goto retry;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 600d9e981c23..ed82ee55e66a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1822,14 +1822,9 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
* If memory tags should be zeroed
* (which happens only when memory should be initialized as well).
*/
- if (zero_tags) {
- /* Initialize both memory and memory tags. */
- for (i = 0; i != 1 << order; ++i)
- tag_clear_highpage(page + i);
+ if (zero_tags)
+ init = !tag_clear_highpages(page, 1 << order);
- /* Take note that memory was initialized by the loop above. */
- init = false;
- }
if (!should_skip_kasan_unpoison(gfp_flags) &&
kasan_unpoison_pages(page, order, init)) {
/* Take note that memory was initialized by KASAN. */
diff --git a/mm/shmem.c b/mm/shmem.c
index 58701d14dd96..5a3f0f754dc0 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -131,8 +131,7 @@ struct shmem_options {
#define SHMEM_SEEN_INODES 2
#define SHMEM_SEEN_HUGE 4
#define SHMEM_SEEN_INUMS 8
-#define SHMEM_SEEN_NOSWAP 16
-#define SHMEM_SEEN_QUOTA 32
+#define SHMEM_SEEN_QUOTA 16
};
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -4680,7 +4679,6 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
"Turning off swap in unprivileged tmpfs mounts unsupported");
}
ctx->noswap = true;
- ctx->seen |= SHMEM_SEEN_NOSWAP;
break;
case Opt_quota:
if (fc->user_ns != &init_user_ns)
@@ -4830,14 +4828,15 @@ static int shmem_reconfigure(struct fs_context *fc)
err = "Current inum too high to switch to 32-bit inums";
goto out;
}
- if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
+
+ /*
+ * "noswap" doesn't use fsparam_flag_no, i.e. there's no "swap"
+ * counterpart for (re-)enabling swap.
+ */
+ if (ctx->noswap && !sbinfo->noswap) {
err = "Cannot disable swap on remount";
goto out;
}
- if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
- err = "Cannot enable swap on remount if it was disabled on first mount";
- goto out;
- }
if (ctx->seen & SHMEM_SEEN_QUOTA &&
!sb_any_quota_loaded(fc->root->d_sb)) {
diff --git a/mm/slub.c b/mm/slub.c
index 1bf65c421325..a0b905c2a557 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6336,8 +6336,6 @@ next_remote_batch:
if (unlikely(!slab_free_hook(s, p[i], init, false))) {
p[i] = p[--size];
- if (!size)
- goto flush_remote;
continue;
}
@@ -6352,6 +6350,9 @@ next_remote_batch:
i++;
}
+ if (!size)
+ goto flush_remote;
+
next_batch:
if (!local_trylock(&s->cpu_sheaves->lock))
goto fallback;
@@ -6406,6 +6407,9 @@ do_free:
goto next_batch;
}
+ if (remote_nr)
+ goto flush_remote;
+
return;
no_empty:
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b13e9c4baa90..f4980dde5394 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -748,6 +748,8 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
blk_start_plug(&plug);
for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
+ struct swap_info_struct *si = NULL;
+
if (!pte++) {
pte = pte_offset_map(vmf->pmd, addr);
if (!pte)
@@ -761,8 +763,19 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
continue;
pte_unmap(pte);
pte = NULL;
+ /*
+ * Readahead entry may come from a device that we are not
+ * holding a reference to, try to grab a reference, or skip.
+ */
+ if (swp_type(entry) != swp_type(targ_entry)) {
+ si = get_swap_device(entry);
+ if (!si)
+ continue;
+ }
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
+ if (si)
+ put_swap_device(si);
if (!folio)
continue;
if (page_allocated) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 10760240a3a2..a1b4b9d80e3b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2005,10 +2005,8 @@ swp_entry_t get_swap_page_of_type(int type)
local_lock(&percpu_swap_cluster.lock);
offset = cluster_alloc_swap_entry(si, 0, 1);
local_unlock(&percpu_swap_cluster.lock);
- if (offset) {
+ if (offset)
entry = swp_entry(si->type, offset);
- atomic_long_dec(&nr_swap_pages);
- }
}
put_swap_device(si);
}
diff --git a/net/atm/common.c b/net/atm/common.c
index cecc71a8bee1..fe77f51f6ce1 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -881,7 +881,7 @@ out_atmproc_exit:
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
- atmsvc_exit();
+ atmpvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3418d7b964a1..8ccec73dce45 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3832,13 +3832,14 @@ static void hci_tx_work(struct work_struct *work)
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr;
- struct hci_conn *conn;
__u16 handle, flags;
+ int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ACL packet too small");
- goto drop;
+ kfree_skb(skb);
+ return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3850,36 +3851,27 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.acl_rx++;
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- hci_dev_unlock(hdev);
-
- if (conn) {
- hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
-
- /* Send to upper protocol */
- l2cap_recv_acldata(conn, skb, flags);
- return;
- } else {
+ err = l2cap_recv_acldata(hdev, handle, skb, flags);
+ if (err == -ENOENT)
bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
handle);
- }
-
-drop:
- kfree_skb(skb);
+ else if (err)
+ bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d",
+ handle, err);
}
/* SCO data packet */
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr;
- struct hci_conn *conn;
__u16 handle, flags;
+ int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "SCO packet too small");
- goto drop;
+ kfree_skb(skb);
+ return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3891,34 +3883,28 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.sco_rx++;
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- hci_dev_unlock(hdev);
+ hci_skb_pkt_status(skb) = flags & 0x03;
- if (conn) {
- /* Send to upper protocol */
- hci_skb_pkt_status(skb) = flags & 0x03;
- sco_recv_scodata(conn, skb);
- return;
- } else {
+ err = sco_recv_scodata(hdev, handle, skb);
+ if (err == -ENOENT)
bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
handle);
- }
-
-drop:
- kfree_skb(skb);
+ else if (err)
+ bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d",
+ handle, err);
}
static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_iso_hdr *hdr;
- struct hci_conn *conn;
__u16 handle, flags;
+ int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ISO packet too small");
- goto drop;
+ kfree_skb(skb);
+ return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3928,22 +3914,13 @@ static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
handle, flags);
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- hci_dev_unlock(hdev);
-
- if (!conn) {
+ err = iso_recv(hdev, handle, skb, flags);
+ if (err == -ENOENT)
bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
handle);
- goto drop;
- }
-
- /* Send to upper protocol */
- iso_recv(conn, skb, flags);
- return;
-
-drop:
- kfree_skb(skb);
+ else if (err)
+ bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d",
+ handle, err);
}
static bool hci_req_is_complete(struct hci_dev *hdev)
@@ -4121,7 +4098,7 @@ static void hci_rx_work(struct work_struct *work)
}
}
-static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
{
int err;
@@ -4133,16 +4110,19 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
if (!hdev->sent_cmd) {
skb_queue_head(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
- return;
+ return -EINVAL;
}
if (hci_skb_opcode(skb) != HCI_OP_NOP) {
err = hci_send_frame(hdev, skb);
if (err < 0) {
hci_cmd_sync_cancel_sync(hdev, -err);
- return;
+ return err;
}
atomic_dec(&hdev->cmd_cnt);
+ } else {
+ err = -ENODATA;
+ kfree_skb(skb);
}
if (hdev->req_status == HCI_REQ_PEND &&
@@ -4150,12 +4130,15 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
}
+
+ return err;
}
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
+ int err;
BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
@@ -4166,7 +4149,9 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- hci_send_cmd_sync(hdev, skb);
+ err = hci_send_cmd_sync(hdev, skb);
+ if (err)
+ return;
rcu_read_lock();
if (test_bit(HCI_RESET, &hdev->flags) ||
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ba9f48771e11..4e7bf63af9c5 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1311,7 +1311,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr_unsized *addr,
goto done;
}
+ hci_dev_lock(hdev);
mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
err = hci_dev_open(hdev->id);
if (err) {
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 243505b89733..5859ec1c04dd 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -2314,14 +2314,31 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
iso_conn_del(hcon, bt_to_errno(reason));
}
-void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb, u16 flags)
{
- struct iso_conn *conn = hcon->iso_data;
+ struct hci_conn *hcon;
+ struct iso_conn *conn;
struct skb_shared_hwtstamps *hwts;
__u16 pb, ts, len, sn;
- if (!conn)
- goto drop;
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ kfree_skb(skb);
+ return -ENOENT;
+ }
+
+ conn = iso_conn_hold_unless_zero(hcon->iso_data);
+ hcon = NULL;
+
+ hci_dev_unlock(hdev);
+
+ if (!conn) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
pb = hci_iso_flags_pb(flags);
ts = hci_iso_flags_ts(flags);
@@ -2377,7 +2394,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hci_skb_pkt_status(skb) = flags & 0x03;
hci_skb_pkt_seqnum(skb) = sn;
iso_recv_frame(conn, skb);
- return;
+ goto done;
}
if (pb == ISO_SINGLE) {
@@ -2455,6 +2472,9 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
drop:
kfree_skb(skb);
+done:
+ iso_conn_put(conn);
+ return 0;
}
static struct hci_cb iso_cb = {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 35c57657bcf4..07b493331fd7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7510,13 +7510,24 @@ struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
return c;
}
-void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
+ struct sk_buff *skb, u16 flags)
{
+ struct hci_conn *hcon;
struct l2cap_conn *conn;
int len;
- /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
- hci_dev_lock(hcon->hdev);
+ /* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ kfree_skb(skb);
+ return -ENOENT;
+ }
+
+ hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
conn = hcon->l2cap_data;
@@ -7524,12 +7535,13 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
conn = l2cap_conn_add(hcon);
conn = l2cap_conn_hold_unless_zero(conn);
+ hcon = NULL;
- hci_dev_unlock(hcon->hdev);
+ hci_dev_unlock(hdev);
if (!conn) {
kfree_skb(skb);
- return;
+ return -EINVAL;
}
BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
@@ -7643,6 +7655,7 @@ drop:
unlock:
mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
+ return 0;
}
static struct hci_cb l2cap_cb = {
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 7afe65e7ff37..87ba90336e80 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1458,22 +1458,39 @@ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
sco_conn_del(hcon, bt_to_errno(reason));
}
-void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb)
{
- struct sco_conn *conn = hcon->sco_data;
+ struct hci_conn *hcon;
+ struct sco_conn *conn;
- if (!conn)
- goto drop;
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ kfree_skb(skb);
+ return -ENOENT;
+ }
+
+ conn = sco_conn_hold_unless_zero(hcon->sco_data);
+ hcon = NULL;
+
+ hci_dev_unlock(hdev);
+
+ if (!conn) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
BT_DBG("conn %p len %u", conn, skb->len);
- if (skb->len) {
+ if (skb->len)
sco_recv_frame(conn, skb);
- return;
- }
+ else
+ kfree_skb(skb);
-drop:
- kfree_skb(skb);
+ sco_conn_put(conn);
+ return 0;
}
static struct hci_cb sco_cb = {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 45512b2ba951..3a1ce04a7a53 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2136,7 +2136,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
u8 *pkax, *pkbx, *na, *nb, confirm_hint;
- u32 passkey;
+ u32 passkey = 0;
int err;
bt_dev_dbg(hcon->hdev, "conn %p", conn);
@@ -2188,24 +2188,6 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
-
- /* Only Just-Works pairing requires extra checks */
- if (smp->method != JUST_WORKS)
- goto mackey_and_ltk;
-
- /* If there already exists long term key in local host, leave
- * the decision to user space since the remote device could
- * be legitimate or malicious.
- */
- if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
- hcon->role)) {
- /* Set passkey to 0. The value can be any number since
- * it'll be ignored anyway.
- */
- passkey = 0;
- confirm_hint = 1;
- goto confirm;
- }
}
mackey_and_ltk:
@@ -2226,11 +2208,12 @@ mackey_and_ltk:
if (err)
return SMP_UNSPECIFIED;
- confirm_hint = 0;
-
-confirm:
- if (smp->method == JUST_WORKS)
- confirm_hint = 1;
+ /* Always require user confirmation for Just-Works pairing to prevent
+ * impersonation attacks, or in case of a legitimate device that is
+ * repairing use the confirmation as acknowledgment to proceed with the
+ * creation of new keys.
+ */
+ confirm_hint = smp->method == JUST_WORKS ? 1 : 0;
err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type,
hcon->dst_type, passkey, confirm_hint);
diff --git a/net/can/raw.c b/net/can/raw.c
index f36a83d3447c..be1ef7cf4204 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -892,20 +892,58 @@ static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
}
}
-static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
+static inline bool raw_dev_cc_enabled(struct net_device *dev,
+ struct can_priv *priv)
{
- /* Classical CAN -> no checks for flags and device capabilities */
- if (can_is_can_skb(skb))
+ /* The CANXL-only mode disables error-signalling on the CAN bus
+ * which is needed to send CAN CC/FD frames
+ */
+ if (priv)
+ return !can_dev_in_xl_only_mode(priv);
+
+ /* virtual CAN interfaces always support CAN CC */
+ return true;
+}
+
+static inline bool raw_dev_fd_enabled(struct net_device *dev,
+ struct can_priv *priv)
+{
+ /* check FD ctrlmode on real CAN interfaces */
+ if (priv)
+ return (priv->ctrlmode & CAN_CTRLMODE_FD);
+
+ /* check MTU for virtual CAN FD interfaces */
+ return (READ_ONCE(dev->mtu) >= CANFD_MTU);
+}
+
+static inline bool raw_dev_xl_enabled(struct net_device *dev,
+ struct can_priv *priv)
+{
+ /* check XL ctrlmode on real CAN interfaces */
+ if (priv)
+ return (priv->ctrlmode & CAN_CTRLMODE_XL);
+
+ /* check MTU for virtual CAN XL interfaces */
+ return can_is_canxl_dev_mtu(READ_ONCE(dev->mtu));
+}
+
+static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can_priv *priv = safe_candev_priv(dev);
+
+ /* Classical CAN */
+ if (can_is_can_skb(skb) && raw_dev_cc_enabled(dev, priv))
return CAN_MTU;
- /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
+ /* CAN FD */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
- (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
+ raw_dev_fd_enabled(dev, priv))
return CANFD_MTU;
- /* CAN XL -> needs to be enabled and a CAN XL device */
+ /* CAN XL */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
- can_is_canxl_dev_mtu(mtu))
+ raw_dev_xl_enabled(dev, priv))
return CANXL_MTU;
return 0;
@@ -961,7 +999,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
err = -EINVAL;
/* check for valid CAN (CC/FD/XL) frame content */
- txmtu = raw_check_txframe(ro, skb, READ_ONCE(dev->mtu));
+ txmtu = raw_check_txframe(ro, skb, dev);
if (!txmtu)
goto free_skb;
diff --git a/net/core/dev.c b/net/core/dev.c
index 69515edd17bc..9094c0fb8c68 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4069,17 +4069,23 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
}
EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
-static void qdisc_pkt_len_init(struct sk_buff *skb)
+static void qdisc_pkt_len_segs_init(struct sk_buff *skb)
{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u16 gso_segs;
qdisc_skb_cb(skb)->pkt_len = skb->len;
+ if (!shinfo->gso_size) {
+ qdisc_skb_cb(skb)->pkt_segs = 1;
+ return;
+ }
+
+ qdisc_skb_cb(skb)->pkt_segs = gso_segs = shinfo->gso_segs;
/* To get more precise estimation of bytes sent on wire,
* we add to pkt_len the headers size of all segments
*/
- if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
- u16 gso_segs = shinfo->gso_segs;
+ if (skb_transport_header_was_set(skb)) {
unsigned int hdr_len;
/* mac layer + network layer */
@@ -4112,6 +4118,8 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
if (payload <= 0)
return;
gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
+ shinfo->gso_segs = gso_segs;
+ qdisc_skb_cb(skb)->pkt_segs = gso_segs;
}
qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
}
@@ -4133,7 +4141,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{
- struct sk_buff *next, *to_free = NULL;
+ struct sk_buff *next, *to_free = NULL, *to_free2 = NULL;
spinlock_t *root_lock = qdisc_lock(q);
struct llist_node *ll_list, *first_n;
unsigned long defer_count = 0;
@@ -4152,9 +4160,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (unlikely(!nolock_qdisc_is_empty(q))) {
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
__qdisc_run(q);
- qdisc_run_end(q);
+ to_free2 = qdisc_run_end(q);
- goto no_lock_out;
+ goto free_skbs;
}
qdisc_bstats_cpu_update(q, skb);
@@ -4162,18 +4170,14 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
!nolock_qdisc_is_empty(q))
__qdisc_run(q);
- qdisc_run_end(q);
- return NET_XMIT_SUCCESS;
+ to_free2 = qdisc_run_end(q);
+ rc = NET_XMIT_SUCCESS;
+ goto free_skbs;
}
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
- qdisc_run(q);
-
-no_lock_out:
- if (unlikely(to_free))
- kfree_skb_list_reason(to_free,
- tcf_get_drop_reason(to_free));
- return rc;
+ to_free2 = qdisc_run(q);
+ goto free_skbs;
}
/* Open code llist_add(&skb->ll_node, &q->defer_list) + queue limit.
@@ -4186,7 +4190,7 @@ no_lock_out:
do {
if (first_n && !defer_count) {
defer_count = atomic_long_inc_return(&q->defer_count);
- if (unlikely(defer_count > q->limit)) {
+ if (unlikely(defer_count > READ_ONCE(q->limit))) {
kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_DROP);
return NET_XMIT_DROP;
}
@@ -4231,26 +4235,28 @@ no_lock_out:
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock, true))
__qdisc_run(q);
- qdisc_run_end(q);
+ to_free2 = qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
int count = 0;
llist_for_each_entry_safe(skb, next, ll_list, ll_node) {
prefetch(next);
+ prefetch(&next->priority);
skb_mark_not_on_list(skb);
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
count++;
}
- qdisc_run(q);
+ to_free2 = qdisc_run(q);
if (count != 1)
rc = NET_XMIT_SUCCESS;
}
unlock:
spin_unlock(root_lock);
- if (unlikely(to_free))
- kfree_skb_list_reason(to_free,
- tcf_get_drop_reason(to_free));
+
+free_skbs:
+ tcf_kfree_skb_list(to_free);
+ tcf_kfree_skb_list(to_free2);
return rc;
}
@@ -4355,7 +4361,7 @@ static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
return ret;
tc_skb_cb(skb)->mru = 0;
- tc_skb_cb(skb)->post_ct = false;
+ qdisc_skb_cb(skb)->post_ct = false;
tcf_set_drop_reason(skb, *drop_reason);
mini_qdisc_bstats_cpu_update(miniq, skb);
@@ -4426,7 +4432,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
*pt_prev = NULL;
}
- qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_pkt_len_segs_init(skb);
tcx_set_ingress(skb, true);
if (static_branch_unlikely(&tcx_needed_key)) {
@@ -4737,7 +4743,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
skb_update_prio(skb);
- qdisc_pkt_len_init(skb);
+ qdisc_pkt_len_segs_init(skb);
tcx_set_ingress(skb, false);
#ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
@@ -5743,8 +5749,9 @@ static __latent_entropy void net_tx_action(void)
rcu_read_lock();
while (head) {
- struct Qdisc *q = head;
spinlock_t *root_lock = NULL;
+ struct sk_buff *to_free;
+ struct Qdisc *q = head;
head = head->next_sched;
@@ -5771,9 +5778,10 @@ static __latent_entropy void net_tx_action(void)
}
clear_bit(__QDISC_STATE_SCHED, &q->state);
- qdisc_run(q);
+ to_free = qdisc_run(q);
if (root_lock)
spin_unlock(root_lock);
+ tcf_kfree_skb_list(to_free);
}
rcu_read_unlock();
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b3ce0fb24a69..53a53357cfef 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -249,10 +249,11 @@ int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg)
*
* Helper for calling the default hardware provider timestamping.
*
- * Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), and
- * there only exists a phydev->mii_ts->hwtstamp() method. So this will return
- * -EOPNOTSUPP for phylib for now, which is still more accurate than letting
- * the netdev handle the GET request.
+ * Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), but
+ * phydev->mii_ts has both hwtstamp_get() and hwtstamp_set() methods. So this
+ * will return -EOPNOTSUPP for phylib only if hwtstamp_get() is not
+ * implemented for now, which is still more accurate than letting the netdev
+ * handle the GET request.
*/
int dev_get_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg)
@@ -443,6 +444,9 @@ static int generic_hwtstamp_ioctl_lower(struct net_device *dev, int cmd,
struct ifreq ifrr;
int err;
+ if (!kernel_cfg->ifr)
+ return -EINVAL;
+
strscpy_pad(ifrr.ifr_name, dev->name, IFNAMSIZ);
ifrr.ifr_ifru = kernel_cfg->ifr->ifr_ifru;
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 1d04754bc756..ec4217d6c0b4 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -97,9 +97,9 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
index = offset / PAGE_SIZE;
niov = &owner->area.niovs[index];
- niov->pp_magic = 0;
- niov->pp = NULL;
- atomic_long_set(&niov->pp_ref_count, 0);
+ niov->desc.pp_magic = 0;
+ niov->desc.pp = NULL;
+ atomic_long_set(&niov->desc.pp_ref_count, 0);
return niov;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index adcfef55a66f..dfad7c03b809 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -1223,15 +1223,13 @@ static void __init netns_ipv4_struct_check(void)
sysctl_tcp_wmem);
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
sysctl_ip_fwd_use_pmtu);
- CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
-
- /* TXRX readonly hotpath cache lines */
- CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
- sysctl_tcp_moderate_rcvbuf);
- CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
/* RX readonly hotpath cache line */
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
+ sysctl_tcp_moderate_rcvbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
+ sysctl_tcp_rcvbuf_low_rtt);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
sysctl_ip_early_demux);
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
sysctl_tcp_early_demux);
@@ -1241,7 +1239,6 @@ static void __init netns_ipv4_struct_check(void)
sysctl_tcp_reordering);
CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
sysctl_tcp_rmem);
- CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
}
#endif
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index ff20435c45d2..ba673e81716f 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index cf3fad74511f..cffc08517a41 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_NETDEV_GEN_H
#define _LINUX_NETDEV_GEN_H
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f34372666e67..a00808f7be6a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -223,9 +223,9 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
skb_panic(skb, sz, addr, __func__);
}
-#define NAPI_SKB_CACHE_SIZE 64
-#define NAPI_SKB_CACHE_BULK 16
-#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
+#define NAPI_SKB_CACHE_SIZE 128
+#define NAPI_SKB_CACHE_BULK 32
+#define NAPI_SKB_CACHE_FREE 32
struct napi_alloc_cache {
local_lock_t bh_lock;
@@ -280,17 +280,18 @@ EXPORT_SYMBOL(__netdev_alloc_frag_align);
*/
static u32 skbuff_cache_size __read_mostly;
-static struct sk_buff *napi_skb_cache_get(void)
+static struct sk_buff *napi_skb_cache_get(bool alloc)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!nc->skb_count)) {
- nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
- GFP_ATOMIC | __GFP_NOWARN,
- NAPI_SKB_CACHE_BULK,
- nc->skb_cache);
+ if (alloc)
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ GFP_ATOMIC | __GFP_NOWARN,
+ NAPI_SKB_CACHE_BULK,
+ nc->skb_cache);
if (unlikely(!nc->skb_count)) {
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return NULL;
@@ -298,6 +299,8 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
+ if (nc->skb_count)
+ prefetch(nc->skb_cache[nc->skb_count - 1]);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
kasan_mempool_unpoison_object(skb, skbuff_cache_size);
@@ -530,7 +533,7 @@ static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = napi_skb_cache_get();
+ skb = napi_skb_cache_get(true);
if (unlikely(!skb))
return NULL;
@@ -645,25 +648,38 @@ out:
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
+ struct sk_buff *skb = NULL;
struct kmem_cache *cache;
- struct sk_buff *skb;
bool pfmemalloc;
u8 *data;
- cache = (flags & SKB_ALLOC_FCLONE)
- ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
-
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
- /* Get the HEAD */
- if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
- likely(node == NUMA_NO_NODE || node == numa_mem_id()))
- skb = napi_skb_cache_get();
- else
+ if (flags & SKB_ALLOC_FCLONE) {
+ cache = net_hotdata.skbuff_fclone_cache;
+ goto fallback;
+ }
+ cache = net_hotdata.skbuff_cache;
+ if (unlikely(node != NUMA_NO_NODE && node != numa_mem_id()))
+ goto fallback;
+
+ if (flags & SKB_ALLOC_NAPI) {
+ skb = napi_skb_cache_get(true);
+ if (unlikely(!skb))
+ return NULL;
+ } else if (!in_hardirq() && !irqs_disabled()) {
+ local_bh_disable();
+ skb = napi_skb_cache_get(false);
+ local_bh_enable();
+ }
+
+ if (!skb) {
+fallback:
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
- if (unlikely(!skb))
- return NULL;
+ if (unlikely(!skb))
+ return NULL;
+ }
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache
@@ -1431,7 +1447,6 @@ void __consume_stateless_skb(struct sk_buff *skb)
static void napi_skb_cache_put(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- u32 i;
if (!kasan_mempool_poison_object(skb))
return;
@@ -1440,13 +1455,16 @@ static void napi_skb_cache_put(struct sk_buff *skb)
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
- for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
+ u32 i, remaining = NAPI_SKB_CACHE_SIZE - NAPI_SKB_CACHE_FREE;
+
+ for (i = remaining; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
skbuff_cache_size);
- kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
- nc->skb_cache + NAPI_SKB_CACHE_HALF);
- nc->skb_count = NAPI_SKB_CACHE_HALF;
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache,
+ NAPI_SKB_CACHE_FREE,
+ nc->skb_cache + remaining);
+ nc->skb_count = remaining;
}
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
}
@@ -1472,7 +1490,7 @@ void napi_skb_free_stolen_head(struct sk_buff *skb)
void napi_consume_skb(struct sk_buff *skb, int budget)
{
/* Zero budget indicate non-NAPI context called us, like netpoll */
- if (unlikely(!budget)) {
+ if (unlikely(!budget || !skb)) {
dev_consume_skb_any(skb);
return;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 3b74fc71f51c..45c98bf524b2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3448,6 +3448,24 @@ void __sk_mem_reclaim(struct sock *sk, int amount)
}
EXPORT_SYMBOL(__sk_mem_reclaim);
+void __sk_charge(struct sock *sk, gfp_t gfp)
+{
+ int amt;
+
+ gfp |= __GFP_NOFAIL;
+ if (mem_cgroup_from_sk(sk)) {
+ /* The socket has not been accepted yet, no need
+ * to look at newsk->sk_wmem_queued.
+ */
+ amt = sk_mem_pages(sk->sk_forward_alloc +
+ atomic_read(&sk->sk_rmem_alloc));
+ if (amt)
+ mem_cgroup_sk_charge(sk, amt, gfp);
+ }
+
+ kmem_cache_charge(sk, gfp);
+}
+
int sk_set_peek_off(struct sock *sk, int val)
{
WRITE_ONCE(sk->sk_peek_off, val);
@@ -4501,14 +4519,14 @@ static int __init sock_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
- CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
- CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_pending_confirm);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_status);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index 5ad435aee29d..f4c61c2b4f22 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/devlink.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -301,12 +302,13 @@ static const struct nla_policy devlink_param_get_dump_nl_policy[DEVLINK_ATTR_DEV
};
/* DEVLINK_CMD_PARAM_SET - do */
-static const struct nla_policy devlink_param_set_nl_policy[DEVLINK_ATTR_PARAM_VALUE_CMODE + 1] = {
+static const struct nla_policy devlink_param_set_nl_policy[DEVLINK_ATTR_PARAM_RESET_DEFAULT + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_PARAM_TYPE] = NLA_POLICY_VALIDATE_FN(NLA_U8, &devlink_attr_param_type_validate),
[DEVLINK_ATTR_PARAM_VALUE_CMODE] = NLA_POLICY_MAX(NLA_U8, 2),
+ [DEVLINK_ATTR_PARAM_RESET_DEFAULT] = { .type = NLA_FLAG, },
};
/* DEVLINK_CMD_REGION_GET - do */
@@ -919,7 +921,7 @@ const struct genl_split_ops devlink_nl_ops[74] = {
.doit = devlink_nl_param_set_doit,
.post_doit = devlink_nl_post_doit,
.policy = devlink_param_set_nl_policy,
- .maxattr = DEVLINK_ATTR_PARAM_VALUE_CMODE,
+ .maxattr = DEVLINK_ATTR_PARAM_RESET_DEFAULT,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/net/devlink/netlink_gen.h b/net/devlink/netlink_gen.h
index 09cc6f264ccf..2817d53a0eba 100644
--- a/net/devlink/netlink_gen.h
+++ b/net/devlink/netlink_gen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/devlink.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_DEVLINK_GEN_H
#define _LINUX_DEVLINK_GEN_H
diff --git a/net/devlink/param.c b/net/devlink/param.c
index 6b233b13b69a..e0ea93eded43 100644
--- a/net/devlink/param.c
+++ b/net/devlink/param.c
@@ -174,11 +174,12 @@ devlink_param_cmode_is_supported(const struct devlink_param *param,
static int devlink_param_get(struct devlink *devlink,
const struct devlink_param *param,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
if (!param->get)
return -EOPNOTSUPP;
- return param->get(devlink, param->id, ctx);
+ return param->get(devlink, param->id, ctx, extack);
}
static int devlink_param_set(struct devlink *devlink,
@@ -191,68 +192,121 @@ static int devlink_param_set(struct devlink *devlink,
return param->set(devlink, param->id, ctx, extack);
}
-static int
-devlink_nl_param_value_fill_one(struct sk_buff *msg,
- enum devlink_param_type type,
- enum devlink_param_cmode cmode,
- union devlink_param_value val)
+static int devlink_param_get_default(struct devlink *devlink,
+ const struct devlink_param *param,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct nlattr *param_value_attr;
+ if (!param->get_default)
+ return -EOPNOTSUPP;
- param_value_attr = nla_nest_start_noflag(msg,
- DEVLINK_ATTR_PARAM_VALUE);
- if (!param_value_attr)
- goto nla_put_failure;
+ return param->get_default(devlink, param->id, ctx, extack);
+}
- if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
- goto value_nest_cancel;
+static int devlink_param_reset_default(struct devlink *devlink,
+ const struct devlink_param *param,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ if (!param->reset_default)
+ return -EOPNOTSUPP;
+ return param->reset_default(devlink, param->id, cmode, extack);
+}
+
+static int
+devlink_nl_param_value_put(struct sk_buff *msg, enum devlink_param_type type,
+ int nla_type, union devlink_param_value val,
+ bool flag_as_u8)
+{
switch (type) {
case DEVLINK_PARAM_TYPE_U8:
- if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
- goto value_nest_cancel;
+ if (nla_put_u8(msg, nla_type, val.vu8))
+ return -EMSGSIZE;
break;
case DEVLINK_PARAM_TYPE_U16:
- if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
- goto value_nest_cancel;
+ if (nla_put_u16(msg, nla_type, val.vu16))
+ return -EMSGSIZE;
break;
case DEVLINK_PARAM_TYPE_U32:
- if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
- goto value_nest_cancel;
+ if (nla_put_u32(msg, nla_type, val.vu32))
+ return -EMSGSIZE;
break;
case DEVLINK_PARAM_TYPE_U64:
- if (devlink_nl_put_u64(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
- val.vu64))
- goto value_nest_cancel;
+ if (devlink_nl_put_u64(msg, nla_type, val.vu64))
+ return -EMSGSIZE;
break;
case DEVLINK_PARAM_TYPE_STRING:
- if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
- val.vstr))
- goto value_nest_cancel;
+ if (nla_put_string(msg, nla_type, val.vstr))
+ return -EMSGSIZE;
break;
case DEVLINK_PARAM_TYPE_BOOL:
- if (val.vbool &&
- nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
- goto value_nest_cancel;
+ /* default values of type bool are encoded with u8, so that
+ * false can be distinguished from not present
+ */
+ if (flag_as_u8) {
+ if (nla_put_u8(msg, nla_type, val.vbool))
+ return -EMSGSIZE;
+ } else {
+ if (val.vbool && nla_put_flag(msg, nla_type))
+ return -EMSGSIZE;
+ }
break;
}
+ return 0;
+}
+
+static int
+devlink_nl_param_value_fill_one(struct sk_buff *msg,
+ enum devlink_param_type type,
+ enum devlink_param_cmode cmode,
+ union devlink_param_value val,
+ union devlink_param_value default_val,
+ bool has_default)
+{
+ struct nlattr *param_value_attr;
+ int err = -EMSGSIZE;
+
+ param_value_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_PARAM_VALUE);
+ if (!param_value_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
+ goto value_nest_cancel;
+
+ err = devlink_nl_param_value_put(msg, type,
+ DEVLINK_ATTR_PARAM_VALUE_DATA,
+ val, false);
+ if (err)
+ goto value_nest_cancel;
+
+ if (has_default) {
+ err = devlink_nl_param_value_put(msg, type,
+ DEVLINK_ATTR_PARAM_VALUE_DEFAULT,
+ default_val, true);
+ if (err)
+ goto value_nest_cancel;
+ }
nla_nest_end(msg, param_value_attr);
return 0;
value_nest_cancel:
nla_nest_cancel(msg, param_value_attr);
-nla_put_failure:
- return -EMSGSIZE;
+ return err;
}
static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
unsigned int port_index,
struct devlink_param_item *param_item,
enum devlink_command cmd,
- u32 portid, u32 seq, int flags)
+ u32 portid, u32 seq, int flags,
+ struct netlink_ext_ack *extack)
{
+ union devlink_param_value default_value[DEVLINK_PARAM_CMODE_MAX + 1];
union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+ bool default_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
const struct devlink_param *param = param_item->param;
struct devlink_param_gset_ctx ctx;
@@ -273,12 +327,26 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
param_value[i] = param_item->driverinit_value;
else
return -EOPNOTSUPP;
+
+ if (param_item->driverinit_value_valid) {
+ default_value[i] = param_item->driverinit_default;
+ default_value_set[i] = true;
+ }
} else {
ctx.cmode = i;
- err = devlink_param_get(devlink, param, &ctx);
+ err = devlink_param_get(devlink, param, &ctx, extack);
if (err)
return err;
param_value[i] = ctx.val;
+
+ err = devlink_param_get_default(devlink, param, &ctx,
+ extack);
+ if (!err) {
+ default_value[i] = ctx.val;
+ default_value_set[i] = true;
+ } else if (err != -EOPNOTSUPP) {
+ return err;
+ }
}
param_value_set[i] = true;
}
@@ -315,7 +383,9 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
if (!param_value_set[i])
continue;
err = devlink_nl_param_value_fill_one(msg, param->type,
- i, param_value[i]);
+ i, param_value[i],
+ default_value[i],
+ default_value_set[i]);
if (err)
goto values_list_nest_cancel;
}
@@ -357,7 +427,7 @@ static void devlink_param_notify(struct devlink *devlink,
if (!msg)
return;
err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
- 0, 0, 0);
+ 0, 0, 0, NULL);
if (err) {
nlmsg_free(msg);
return;
@@ -400,7 +470,8 @@ static int devlink_nl_param_get_dump_one(struct sk_buff *msg,
err = devlink_nl_param_fill(msg, devlink, 0, param_item,
DEVLINK_CMD_PARAM_GET,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
+ cb->nlh->nlmsg_seq, flags,
+ cb->extack);
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
@@ -509,8 +580,8 @@ int devlink_nl_param_get_doit(struct sk_buff *skb,
return -ENOMEM;
err = devlink_nl_param_fill(msg, devlink, 0, param_item,
- DEVLINK_CMD_PARAM_GET,
- info->snd_portid, info->snd_seq, 0);
+ DEVLINK_CMD_PARAM_GET, info->snd_portid,
+ info->snd_seq, 0, info->extack);
if (err) {
nlmsg_free(msg);
return err;
@@ -531,6 +602,7 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
struct devlink_param_item *param_item;
const struct devlink_param *param;
union devlink_param_value value;
+ bool reset_default;
int err = 0;
param_item = devlink_param_get_from_info(params, info);
@@ -542,13 +614,18 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return err;
if (param_type != param->type)
return -EINVAL;
- err = devlink_param_value_get_from_info(param, info, &value);
- if (err)
- return err;
- if (param->validate) {
- err = param->validate(devlink, param->id, value, info->extack);
+
+ reset_default = info->attrs[DEVLINK_ATTR_PARAM_RESET_DEFAULT];
+ if (!reset_default) {
+ err = devlink_param_value_get_from_info(param, info, &value);
if (err)
return err;
+ if (param->validate) {
+ err = param->validate(devlink, param->id, value,
+ info->extack);
+ if (err)
+ return err;
+ }
}
if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE))
@@ -558,6 +635,15 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return -EOPNOTSUPP;
if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+ if (reset_default) {
+ if (!param_item->driverinit_value_valid) {
+ NL_SET_ERR_MSG(info->extack,
+ "Default value not available");
+ return -EOPNOTSUPP;
+ }
+ value = param_item->driverinit_default;
+ }
+
param_item->driverinit_value_new = value;
param_item->driverinit_value_new_valid = true;
} else {
@@ -565,7 +651,12 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return -EOPNOTSUPP;
ctx.val = value;
ctx.cmode = cmode;
- err = devlink_param_set(devlink, param, &ctx, info->extack);
+ if (reset_default)
+ err = devlink_param_reset_default(devlink, param, cmode,
+ info->extack);
+ else
+ err = devlink_param_set(devlink, param, &ctx,
+ info->extack);
if (err)
return err;
}
@@ -813,6 +904,7 @@ void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
param_item->driverinit_value = init_val;
param_item->driverinit_value_valid = true;
+ param_item->driverinit_default = init_val;
devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
}
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
index 264fb82cba19..d157a8419bca 100644
--- a/net/devlink/rate.c
+++ b/net/devlink/rate.c
@@ -828,13 +828,15 @@ void devl_rate_nodes_destroy(struct devlink *devlink)
if (!devlink_rate->parent)
continue;
- refcount_dec(&devlink_rate->parent->refcnt);
if (devlink_rate_is_leaf(devlink_rate))
ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
NULL, NULL);
else if (devlink_rate_is_node(devlink_rate))
ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
NULL, NULL);
+
+ refcount_dec(&devlink_rate->parent->refcnt);
+ devlink_rate->parent = NULL;
}
list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
if (devlink_rate_is_node(devlink_rate)) {
diff --git a/net/dsa/conduit.c b/net/dsa/conduit.c
index 4ae255cfb23f..a1b044467bd6 100644
--- a/net/dsa/conduit.c
+++ b/net/dsa/conduit.c
@@ -26,7 +26,7 @@ static int dsa_conduit_get_regs_len(struct net_device *dev)
int ret = 0;
int len;
- if (ops->get_regs_len) {
+ if (ops && ops->get_regs_len) {
netdev_lock_ops(dev);
len = ops->get_regs_len(dev);
netdev_unlock_ops(dev);
@@ -59,7 +59,7 @@ static void dsa_conduit_get_regs(struct net_device *dev,
int port = cpu_dp->index;
int len;
- if (ops->get_regs_len && ops->get_regs) {
+ if (ops && ops->get_regs_len && ops->get_regs) {
netdev_lock_ops(dev);
len = ops->get_regs_len(dev);
if (len < 0) {
@@ -87,30 +87,56 @@ static void dsa_conduit_get_regs(struct net_device *dev,
}
}
+static ssize_t dsa_conduit_append_port_stats(struct dsa_switch *ds, int port,
+ u64 *data, size_t start)
+{
+ int count;
+
+ if (!ds->ops->get_sset_count)
+ return 0;
+
+ count = ds->ops->get_sset_count(ds, port, ETH_SS_STATS);
+ if (count < 0)
+ return count;
+
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, port, data + start);
+
+ return count;
+}
+
static void dsa_conduit_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
- uint64_t *data)
+ u64 *data)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
- int port = cpu_dp->index;
- int count = 0;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ int count, mcount = 0;
- if (ops->get_sset_count && ops->get_ethtool_stats) {
+ if (ops && ops->get_sset_count && ops->get_ethtool_stats) {
netdev_lock_ops(dev);
- count = ops->get_sset_count(dev, ETH_SS_STATS);
+ mcount = ops->get_sset_count(dev, ETH_SS_STATS);
ops->get_ethtool_stats(dev, stats, data);
netdev_unlock_ops(dev);
}
- if (ds->ops->get_ethtool_stats)
- ds->ops->get_ethtool_stats(ds, port, data + count);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ count = dsa_conduit_append_port_stats(dp->ds, dp->index,
+ data, mcount);
+ if (count < 0)
+ return;
+
+ mcount += count;
+ }
}
static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
struct ethtool_stats *stats,
- uint64_t *data)
+ u64 *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -118,11 +144,11 @@ static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
int port = cpu_dp->index;
int count = 0;
- if (dev->phydev && !ops->get_ethtool_phy_stats) {
+ if (dev->phydev && (!ops || !ops->get_ethtool_phy_stats)) {
count = phy_ethtool_get_sset_count(dev->phydev);
if (count >= 0)
phy_ethtool_get_stats(dev->phydev, stats, data);
- } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
+ } else if (ops && ops->get_sset_count && ops->get_ethtool_phy_stats) {
netdev_lock_ops(dev);
count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
ops->get_ethtool_phy_stats(dev, stats, data);
@@ -136,45 +162,82 @@ static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
ds->ops->get_ethtool_phy_stats(ds, port, data + count);
}
+static void dsa_conduit_append_port_sset_count(struct dsa_switch *ds, int port,
+ int sset, int *count)
+{
+ if (ds->ops->get_sset_count)
+ *count += ds->ops->get_sset_count(ds, port, sset);
+}
+
static int dsa_conduit_get_sset_count(struct net_device *dev, int sset)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
int count = 0;
netdev_lock_ops(dev);
if (sset == ETH_SS_PHY_STATS && dev->phydev &&
- !ops->get_ethtool_phy_stats)
+ (!ops || !ops->get_ethtool_phy_stats))
count = phy_ethtool_get_sset_count(dev->phydev);
- else if (ops->get_sset_count)
+ else if (ops && ops->get_sset_count)
count = ops->get_sset_count(dev, sset);
netdev_unlock_ops(dev);
if (count < 0)
count = 0;
- if (ds->ops->get_sset_count)
- count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ dsa_conduit_append_port_sset_count(dp->ds, dp->index, sset,
+ &count);
+ }
return count;
}
-static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset,
- uint8_t *data)
+static ssize_t dsa_conduit_append_port_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data,
+ size_t start)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
- const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
- int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
- int mcount = 0, count, i;
- uint8_t pfx[4];
- uint8_t *ndata;
+ u8 pfx[8], *ndata;
+ int count, i;
+
+ if (!ds->ops->get_strings)
+ return 0;
- snprintf(pfx, sizeof(pfx), "p%.2d", port);
+ snprintf(pfx, sizeof(pfx), "s%.2d_p%.2d", ds->index, port);
/* We do not want to be NULL-terminated, since this is a prefix */
pfx[sizeof(pfx) - 1] = '_';
+ ndata = data + start * len;
+ /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
+ * the output after to prepend our CPU port prefix we
+ * constructed earlier
+ */
+ ds->ops->get_strings(ds, port, stringset, ndata);
+ count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ memmove(ndata + (i * len + sizeof(pfx)),
+ ndata + i * len, len - sizeof(pfx));
+ memcpy(ndata + i * len, pfx, sizeof(pfx));
+ }
+
+ return count;
+}
+
+static void dsa_conduit_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ int count, mcount = 0;
netdev_lock_ops(dev);
if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
@@ -192,21 +255,17 @@ static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset,
}
netdev_unlock_ops(dev);
- if (ds->ops->get_strings) {
- ndata = data + mcount * len;
- /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
- * the output after to prepend our CPU port prefix we
- * constructed earlier
- */
- ds->ops->get_strings(ds, port, stringset, ndata);
- count = ds->ops->get_sset_count(ds, port, stringset);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ count = dsa_conduit_append_port_strings(dp->ds, dp->index,
+ stringset, data,
+ mcount);
if (count < 0)
return;
- for (i = 0; i < count; i++) {
- memmove(ndata + (i * len + sizeof(pfx)),
- ndata + i * len, len - sizeof(pfx));
- memcpy(ndata + i * len, pfx, sizeof(pfx));
- }
+
+ mcount += count;
}
}
diff --git a/net/dsa/devlink.c b/net/dsa/devlink.c
index f41f9fc2194e..ed342f345692 100644
--- a/net/dsa/devlink.c
+++ b/net/dsa/devlink.c
@@ -182,7 +182,8 @@ static const struct devlink_ops dsa_devlink_ops = {
};
int dsa_devlink_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 43e211e611b1..13a63b48b7ee 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -154,9 +154,9 @@ EXPORT_SYMBOL(eth_get_headlen);
*/
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
{
- unsigned short _service_access_point;
const unsigned short *sap;
const struct ethhdr *eth;
+ __be16 res;
skb->dev = dev;
skb_reset_mac_header(skb);
@@ -181,15 +181,15 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
+ * We use skb->dev as temporary storage to not hit
+ * CONFIG_STACKPROTECTOR_STRONG=y costs on some platforms.
*/
- sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point);
- if (sap && *sap == 0xFFFF)
- return htons(ETH_P_802_3);
+ sap = skb_header_pointer(skb, 0, sizeof(*sap), &skb->dev);
+ res = (sap && *sap == 0xFFFF) ? htons(ETH_P_802_3) : htons(ETH_P_802_2);
- /*
- * Real 802.2 LLC
- */
- return htons(ETH_P_802_2);
+ /* restore skb->dev in case it was mangled by skb_header_pointer(). */
+ skb->dev = dev;
+ return res;
}
EXPORT_SYMBOL(eth_type_trans);
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 55223ebc2a7e..369c05cf8163 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -233,6 +233,10 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(800000, DR4_2, Full),
__DEFINE_LINK_MODE_NAME(800000, SR4, Full),
__DEFINE_LINK_MODE_NAME(800000, VR4, Full),
+ __DEFINE_LINK_MODE_NAME(1600000, CR8, Full),
+ __DEFINE_LINK_MODE_NAME(1600000, KR8, Full),
+ __DEFINE_LINK_MODE_NAME(1600000, DR8, Full),
+ __DEFINE_LINK_MODE_NAME(1600000, DR8_2, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -422,6 +426,10 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(800000, DR4_2, Full),
__DEFINE_LINK_MODE_PARAMS(800000, SR4, Full),
__DEFINE_LINK_MODE_PARAMS(800000, VR4, Full),
+ __DEFINE_LINK_MODE_PARAMS(1600000, CR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(1600000, KR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(1600000, DR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(1600000, DR8_2, Full),
};
static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
EXPORT_SYMBOL_GPL(link_mode_params);
diff --git a/net/handshake/genl.c b/net/handshake/genl.c
index f55d14d7b726..870612609491 100644
--- a/net/handshake/genl.c
+++ b/net/handshake/genl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/handshake.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/net/handshake/genl.h b/net/handshake/genl.h
index ae72a596f6cc..8d3e18672daf 100644
--- a/net/handshake/genl.h
+++ b/net/handshake/genl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/handshake.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_HANDSHAKE_GEN_H
#define _LINUX_HANDSHAKE_GEN_H
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index a31b94ce8968..08d811f11896 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -756,23 +756,8 @@ EXPORT_SYMBOL(inet_stream_connect);
void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
{
if (mem_cgroup_sockets_enabled) {
- gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
-
mem_cgroup_sk_alloc(newsk);
-
- if (mem_cgroup_from_sk(newsk)) {
- int amt;
-
- /* The socket has not been accepted yet, no need
- * to look at newsk->sk_wmem_queued.
- */
- amt = sk_mem_pages(newsk->sk_forward_alloc +
- atomic_read(&newsk->sk_rmem_alloc));
- if (amt)
- mem_cgroup_sk_charge(newsk, amt, gfp);
- }
-
- kmem_cache_charge(newsk, gfp);
+ __sk_charge(newsk, GFP_KERNEL);
}
sock_rps_record_flow(newsk);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index e0d94270da28..05828d4cb6cd 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -122,8 +122,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
- : htons(ETH_P_IP);
+ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
+ XFRM_MODE_SKB_CB(skb)->protocol);
+ __be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6)
+ : htons(ETH_P_IP);
return skb_eth_gso_segment(skb, features, type);
}
diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c
index 506260b4a4dc..7a99639204b1 100644
--- a/net/ipv4/fou_nl.c
+++ b/net/ipv4/fou_nl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/fou.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/net/ipv4/fou_nl.h b/net/ipv4/fou_nl.h
index 63a6c4ed803d..438342dc8507 100644
--- a/net/ipv4/fou_nl.h
+++ b/net/ipv4/fou_nl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/fou.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_FOU_GEN_H
#define _LINUX_FOU_GEN_H
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b4eae731c9ba..97d57c52b9ad 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -737,9 +737,9 @@ void inet_csk_init_xmit_timers(struct sock *sk,
{
struct inet_connection_sock *icsk = inet_csk(sk);
- timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
+ timer_setup(&sk->tcp_retransmit_timer, retransmit_handler, 0);
timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
- timer_setup(&sk->sk_timer, keepalive_handler, 0);
+ timer_setup(&icsk->icsk_keepalive_timer, keepalive_handler, 0);
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
@@ -750,9 +750,9 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
smp_store_release(&icsk->icsk_pending, 0);
smp_store_release(&icsk->icsk_ack.pending, 0);
- sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer(sk, &sk->tcp_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
- sk_stop_timer(sk, &sk->sk_timer);
+ sk_stop_timer(sk, &icsk->icsk_keepalive_timer);
}
void inet_csk_clear_xmit_timers_sync(struct sock *sk)
@@ -765,9 +765,9 @@ void inet_csk_clear_xmit_timers_sync(struct sock *sk)
smp_store_release(&icsk->icsk_pending, 0);
smp_store_release(&icsk->icsk_ack.pending, 0);
- sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer_sync(sk, &sk->tcp_retransmit_timer);
sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
- sk_stop_timer_sync(sk, &sk->sk_timer);
+ sk_stop_timer_sync(sk, &icsk->icsk_keepalive_timer);
}
struct dst_entry *inet_csk_route_req(const struct sock *sk,
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f0b6c5a411a2..3f5b1418a610 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -287,17 +287,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 1;
r->idiag_retrans = READ_ONCE(icsk->icsk_retransmits);
r->idiag_expires =
- jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies);
+ jiffies_delta_to_msecs(tcp_timeout_expires(sk) - jiffies);
} else if (icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = READ_ONCE(icsk->icsk_probes_out);
r->idiag_expires =
- jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies);
- } else if (timer_pending(&sk->sk_timer)) {
+ jiffies_delta_to_msecs(tcp_timeout_expires(sk) - jiffies);
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
r->idiag_timer = 2;
r->idiag_retrans = READ_ONCE(icsk->icsk_probes_out);
r->idiag_expires =
- jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
+ jiffies_delta_to_msecs(icsk->icsk_keepalive_timer.expires - jiffies);
}
if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 35367f8e2da3..a1a50a5c80dc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1343,6 +1343,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dou8vec_minmax,
},
{
+ .procname = "tcp_rcvbuf_low_rtt",
+ .data = &init_net.ipv4.sysctl_tcp_rcvbuf_low_rtt,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
.procname = "tcp_tso_win_divisor",
.data = &init_net.ipv4.sysctl_tcp_tso_win_divisor,
.maxlen = sizeof(u8),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dee578aad690..f035440c475a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2587,7 +2587,7 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
if (err)
goto out;
- atomic_long_inc(&niov->pp_ref_count);
+ atomic_long_inc(&niov->desc.pp_ref_count);
tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
sent += copy;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9df5d7515605..198f8a0d37be 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -896,6 +896,7 @@ void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
const struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 rcvwin, rcvbuf, cap, oldval;
+ u32 rtt_threshold, rtt_us;
u64 grow;
oldval = tp->rcvq_space.space;
@@ -908,10 +909,19 @@ void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
/* DRS is always one RTT late. */
rcvwin = newval << 1;
- /* slow start: allow the sender to double its rate. */
- grow = (u64)rcvwin * (newval - oldval);
- do_div(grow, oldval);
- rcvwin += grow << 1;
+ rtt_us = tp->rcv_rtt_est.rtt_us >> 3;
+ rtt_threshold = READ_ONCE(net->ipv4.sysctl_tcp_rcvbuf_low_rtt);
+ if (rtt_us < rtt_threshold) {
+ /* For small RTT, we set @grow to rcvwin * rtt_us/rtt_threshold.
+ * It might take few additional ms to reach 'line rate',
+ * but will avoid sk_rcvbuf inflation and poor cache use.
+ */
+ grow = div_u64((u64)rcvwin * rtt_us, rtt_threshold);
+ } else {
+ /* slow start: allow the sender to double its rate. */
+ grow = div_u64(((u64)rcvwin << 1) * (newval - oldval), oldval);
+ }
+ rcvwin += grow;
if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7d9fec2950b..f8a9596e8f4d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2869,13 +2869,13 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk_timeout(icsk);
+ timer_expires = tcp_timeout_expires(sk);
} else if (icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk_timeout(icsk);
- } else if (timer_pending(&sk->sk_timer)) {
+ timer_expires = tcp_timeout_expires(sk);
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sk->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
@@ -3566,6 +3566,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_adv_win_scale = 1;
net->ipv4.sysctl_tcp_frto = 2;
net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
+ net->ipv4.sysctl_tcp_rcvbuf_low_rtt = USEC_PER_MSEC;
/* This limits the percentage of the congestion window which we
* will allow a single TSO frame to consume. Building TSO frames
* which are too large can cause TCP streams to be bursty.
@@ -3593,7 +3594,7 @@ static int __net_init tcp_sk_init(struct net *net)
sizeof(init_net.ipv4.sysctl_tcp_wmem));
}
net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
- net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
+ net->ipv4.sysctl_tcp_comp_sack_slack_ns = 10 * NSEC_PER_USEC;
net->ipv4.sysctl_tcp_comp_sack_nr = 44;
net->ipv4.sysctl_tcp_comp_sack_rtt_percent = 33;
net->ipv4.sysctl_tcp_backlog_ack_defer = 1;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d8f4d813e8dd..bd5462154f97 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -337,7 +337,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
- tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
tw->tw_mark = sk->sk_mark;
tw->tw_priority = READ_ONCE(sk->sk_priority);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 2cb93da93abc..fdda18b1abda 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -282,33 +282,6 @@ struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
return NULL;
}
-struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
-{
- unsigned int thlen, hlen, off;
- struct tcphdr *th;
-
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*th);
- th = skb_gro_header(skb, hlen, off);
- if (unlikely(!th))
- return NULL;
-
- thlen = th->doff * 4;
- if (thlen < sizeof(*th))
- return NULL;
-
- hlen = off + thlen;
- if (!skb_gro_may_pull(skb, hlen)) {
- th = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!th))
- return NULL;
- }
-
- skb_gro_pull(skb, thlen);
-
- return th;
-}
-
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct tcphdr *th)
{
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 0672c3d8f4f1..160080c9021d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -510,7 +510,7 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
* and tp->rcv_tstamp might very well have been written recently.
* rcv_delta can thus be negative.
*/
- rcv_delta = icsk_timeout(icsk) - tp->rcv_tstamp;
+ rcv_delta = tcp_timeout_expires(sk) - tp->rcv_tstamp;
if (rcv_delta <= timeout)
return false;
@@ -697,9 +697,9 @@ void tcp_write_timer_handler(struct sock *sk)
!icsk->icsk_pending)
return;
- if (time_after(icsk_timeout(icsk), jiffies)) {
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
- icsk_timeout(icsk));
+ if (time_after(tcp_timeout_expires(sk), jiffies)) {
+ sk_reset_timer(sk, &sk->tcp_retransmit_timer,
+ tcp_timeout_expires(sk));
return;
}
tcp_mstamp_refresh(tcp_sk(sk));
@@ -725,12 +725,10 @@ void tcp_write_timer_handler(struct sock *sk)
static void tcp_write_timer(struct timer_list *t)
{
- struct inet_connection_sock *icsk =
- timer_container_of(icsk, t, icsk_retransmit_timer);
- struct sock *sk = &icsk->icsk_inet.sk;
+ struct sock *sk = timer_container_of(sk, t, tcp_retransmit_timer);
/* Avoid locking the socket when there is no pending event. */
- if (!smp_load_acquire(&icsk->icsk_pending))
+ if (!smp_load_acquire(&inet_csk(sk)->icsk_pending))
goto out;
bh_lock_sock(sk);
@@ -755,12 +753,12 @@ void tcp_syn_ack_timeout(const struct request_sock *req)
void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
- sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
+ sk_reset_timer(sk, &inet_csk(sk)->icsk_keepalive_timer, jiffies + len);
}
static void tcp_delete_keepalive_timer(struct sock *sk)
{
- sk_stop_timer(sk, &sk->sk_timer);
+ sk_stop_timer(sk, &inet_csk(sk)->icsk_keepalive_timer);
}
void tcp_set_keepalive(struct sock *sk, int val)
@@ -777,8 +775,9 @@ EXPORT_IPV6_MOD_GPL(tcp_set_keepalive);
static void tcp_keepalive_timer(struct timer_list *t)
{
- struct sock *sk = timer_container_of(sk, t, sk_timer);
- struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk =
+ timer_container_of(icsk, t, icsk_keepalive_timer);
+ struct sock *sk = &icsk->icsk_inet.sk;
struct tcp_sock *tp = tcp_sk(sk);
u32 elapsed;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 40e9c336f6c5..b66217d1b2f8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1324,7 +1324,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
__in6_ifa_put(ifp);
}
- if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
+ if (!(ifp->flags & IFA_F_NOPREFIXROUTE))
action = check_cleanup_prefix_route(ifp, &expires);
list_del_rcu(&ifp->if_list);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 7b41fb4f00b5..22410243ebe8 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -158,8 +158,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
- : htons(ETH_P_IPV6);
+ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
+ XFRM_MODE_SKB_CB(skb)->protocol);
+ __be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP)
+ : htons(ETH_P_IPV6);
return skb_eth_gso_segment(skb, features, type);
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 02c16909f618..2111af022d94 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1138,6 +1138,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
fib6_set_expires(iter, rt->expires);
fib6_add_gc_list(iter);
}
+ if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT))) {
+ iter->fib6_flags &= ~RTF_ADDRCONF;
+ iter->fib6_flags &= ~RTF_PREFIX_RT;
+ }
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 08113f430124..280fe5978559 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2163,13 +2163,13 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk_timeout(icsk);
+ timer_expires = tcp_timeout_expires(sp);
} else if (icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk_timeout(icsk);
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = tcp_timeout_expires(sp);
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a4f1df92417d..1e62fbc22cb7 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -10,8 +10,7 @@
* Ursula Braun <ursula.braun@de.ibm.com>
*/
-#define KMSG_COMPONENT "af_iucv"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "af_iucv: " fmt
#include <linux/filter.h>
#include <linux/module.h>
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 008be0abe3a5..da2af413c89d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -20,8 +20,7 @@
* CP Programming Service, IBM document # SC24-5760
*/
-#define KMSG_COMPONENT "iucv"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "iucv: " fmt
#include <linux/kernel_stat.h>
#include <linux/export.h>
diff --git a/net/kcm/Kconfig b/net/kcm/Kconfig
index 16f39f2565d9..66660a06cacf 100644
--- a/net/kcm/Kconfig
+++ b/net/kcm/Kconfig
@@ -7,5 +7,5 @@ config AF_KCM
select STREAM_PARSER
help
KCM (Kernel Connection Multiplexor) sockets provide a method
- for multiplexing messages of a message based application
- protocol over kernel connectons (e.g. TCP connections).
+ for multiplexing messages of a message-based application
+ protocol over kernel connections (e.g. TCP connections).
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 2ebde0352245..571200433aa9 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3903,6 +3903,8 @@ static int __init ipsec_pfkey_init(void)
{
int err = proto_register(&key_proto, 0);
+ pr_warn_once("PFKEY is deprecated and scheduled to be removed in 2027, "
+ "please contact the netdev mailing list\n");
if (err != 0)
goto out;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index c4f4a57cd67c..687c1366a4d0 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1246,9 +1246,9 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
else
l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
- /* Reset skb netfilter state */
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ /* Reset control buffer */
+ memset(skb->cb, 0, sizeof(skb->cb));
+
nf_reset_ct(skb);
/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 4d314e062ba9..2ac4011a953f 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -623,6 +623,7 @@ static int mctp_dst_output(struct mctp_dst *dst, struct sk_buff *skb)
skb->protocol = htons(ETH_P_MCTP);
skb->pkt_type = PACKET_OUTGOING;
+ skb->dev = dst->dev->dev;
if (skb->len > dst->mtu) {
kfree_skb(skb);
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index b9e451197902..82ec15bcfd7f 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -32,7 +32,8 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
/* dequeue the skb from sk receive queue */
__skb_unlink(skb, &ssk->sk_receive_queue);
skb_ext_reset(skb);
- skb_orphan(skb);
+
+ mptcp_subflow_lend_fwdmem(subflow, skb);
/* We copy the fastopen data, but that don't belong to the mptcp sequence
* space, need to offset it in the subflow sequence, see mptcp_subflow_get_map_offset()
@@ -50,6 +51,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
mptcp_data_lock(sk);
DEBUG_NET_WARN_ON_ONCE(sock_owned_by_user_nocheck(sk));
+ mptcp_borrow_fwdmem(sk, skb);
skb_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);
mptcp_sk(sk)->bytes_received += skb->len;
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 171643815076..f23fda0c55a7 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -71,7 +71,6 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPFastcloseRx", MPTCP_MIB_MPFASTCLOSERX),
SNMP_MIB_ITEM("MPRstTx", MPTCP_MIB_MPRSTTX),
SNMP_MIB_ITEM("MPRstRx", MPTCP_MIB_MPRSTRX),
- SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED),
SNMP_MIB_ITEM("SubflowStale", MPTCP_MIB_SUBFLOWSTALE),
SNMP_MIB_ITEM("SubflowRecover", MPTCP_MIB_SUBFLOWRECOVER),
SNMP_MIB_ITEM("SndWndShared", MPTCP_MIB_SNDWNDSHARED),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index a1d3e9369fbb..812218b5ed2b 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -70,7 +70,6 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_MPFASTCLOSERX, /* Received a MP_FASTCLOSE */
MPTCP_MIB_MPRSTTX, /* Transmit a MP_RST */
MPTCP_MIB_MPRSTRX, /* Received a MP_RST */
- MPTCP_MIB_RCVPRUNED, /* Incoming packet dropped due to memory limit */
MPTCP_MIB_SUBFLOWSTALE, /* Subflows entered 'stale' status */
MPTCP_MIB_SUBFLOWRECOVER, /* Subflows returned to active status after being stale */
MPTCP_MIB_SNDWNDSHARED, /* Subflow snd wnd is overridden by msk's one */
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index ac974299de71..136c2d05c0ee 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -195,7 +195,8 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_info *info = _info;
- r->idiag_rqueue = sk_rmem_alloc_get(sk);
+ r->idiag_rqueue = sk_rmem_alloc_get(sk) +
+ READ_ONCE(mptcp_sk(sk)->backlog_len);
r->idiag_wqueue = sk_wmem_alloc_get(sk);
if (inet_sk_state_load(sk) == TCP_LISTEN) {
diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c
index dcffd847af33..c180930a8e0e 100644
--- a/net/mptcp/mptcp_pm_gen.c
+++ b/net/mptcp/mptcp_pm_gen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/mptcp_pm.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/net/mptcp/mptcp_pm_gen.h b/net/mptcp/mptcp_pm_gen.h
index e24258f6f819..b9280bcb43f5 100644
--- a/net/mptcp/mptcp_pm_gen.h
+++ b/net/mptcp/mptcp_pm_gen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/mptcp_pm.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_MPTCP_PM_GEN_H
#define _LINUX_MPTCP_PM_GEN_H
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 1103b3341a70..f24ae7d40e88 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -838,8 +838,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
opts->suboptions = 0;
+ /* Force later mptcp_write_options(), but do not use any actual
+ * option space.
+ */
if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
- return false;
+ return true;
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
@@ -1041,6 +1044,31 @@ static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
WRITE_ONCE(msk->snd_una, new_snd_una);
}
+static void rwin_update(struct mptcp_sock *msk, struct sock *ssk,
+ struct sk_buff *skb)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct tcp_sock *tp = tcp_sk(ssk);
+ u64 mptcp_rcv_wnd;
+
+ /* Avoid touching extra cachelines if TCP is going to accept this
+ * skb without filling the TCP-level window even with a possibly
+ * outdated mptcp-level rwin.
+ */
+ if (!skb->len || skb->len < tcp_receive_window(tp))
+ return;
+
+ mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent);
+ if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent))
+ return;
+
+ /* Some other subflow grew the mptcp-level rwin since rcv_wup,
+ * resync.
+ */
+ tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent;
+ subflow->rcv_wnd_sent = mptcp_rcv_wnd;
+}
+
static void ack_update_msk(struct mptcp_sock *msk,
struct sock *ssk,
struct mptcp_options_received *mp_opt)
@@ -1208,6 +1236,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
*/
if (mp_opt.use_ack)
ack_update_msk(msk, sk, &mp_opt);
+ rwin_update(msk, sk, skb);
/* Zero-data-length packets are dropped by the caller and not
* propagated to the MPTCP layer, so the skb extension does not
@@ -1294,6 +1323,10 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
if (rcv_wnd_new != rcv_wnd_old) {
raise_win:
+ /* The msk-level rcv wnd is after the tcp level one,
+ * sync the latter.
+ */
+ rcv_wnd_new = rcv_wnd_old;
win = rcv_wnd_old - ack_seq;
tp->rcv_wnd = min_t(u64, win, U32_MAX);
new_win = tp->rcv_wnd;
@@ -1317,6 +1350,21 @@ raise_win:
update_wspace:
WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
+ subflow->rcv_wnd_sent = rcv_wnd_new;
+}
+
+static void mptcp_track_rwin(struct tcp_sock *tp)
+{
+ const struct sock *ssk = (const struct sock *)tp;
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk;
+
+ if (!ssk)
+ return;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ msk = mptcp_sk(subflow->conn);
+ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
}
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
@@ -1611,6 +1659,10 @@ mp_rst:
opts->reset_transient,
opts->reset_reason);
return;
+ } else if (unlikely(!opts->suboptions)) {
+ /* Fallback to TCP */
+ mptcp_track_rwin(tp);
+ return;
}
if (OPTION_MPTCP_PRIO & opts->suboptions) {
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 2ff1b9499568..e2040c327af6 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -18,6 +18,7 @@ struct mptcp_pm_add_entry {
u8 retrans_times;
struct timer_list add_timer;
struct mptcp_sock *sock;
+ struct rcu_head rcu;
};
static DEFINE_SPINLOCK(mptcp_pm_list_lock);
@@ -155,7 +156,7 @@ bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
entry = mptcp_pm_del_add_timer(msk, addr, false);
ret = entry;
- kfree(entry);
+ kfree_rcu(entry, rcu);
return ret;
}
@@ -345,22 +346,27 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
- struct timer_list *add_timer = NULL;
+ bool stop_timer = false;
+
+ rcu_read_lock();
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
if (entry && (!check_id || entry->addr.id == addr->id)) {
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
- add_timer = &entry->add_timer;
+ stop_timer = true;
}
if (!check_id && entry)
list_del(&entry->list);
spin_unlock_bh(&msk->pm.lock);
- /* no lock, because sk_stop_timer_sync() is calling timer_delete_sync() */
- if (add_timer)
- sk_stop_timer_sync(sk, add_timer);
+ /* Note: entry might have been removed by another thread.
+ * We hold rcu_read_lock() to ensure it is not freed under us.
+ */
+ if (stop_timer)
+ sk_stop_timer_sync(sk, &entry->add_timer);
+ rcu_read_unlock();
return entry;
}
@@ -415,7 +421,7 @@ static void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
list_for_each_entry_safe(entry, tmp, &free_list, list) {
sk_stop_timer_sync(sk, &entry->add_timer);
- kfree(entry);
+ kfree_rcu(entry, rcu);
}
}
@@ -588,6 +594,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
const struct mptcp_subflow_context *subflow)
{
+ struct sock *sk = (struct sock *)msk;
struct mptcp_pm_data *pm = &msk->pm;
bool update_subflows;
@@ -611,7 +618,8 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
/* Even if this subflow is not really established, tell the PM to try
* to pick the next ones, if possible.
*/
- if (mptcp_pm_nl_check_work_pending(msk))
+ if (mptcp_is_fully_established(sk) &&
+ mptcp_pm_nl_check_work_pending(msk))
mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
spin_unlock_bh(&pm->lock);
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index 598f01a573c1..57570a44e418 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -337,6 +337,8 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
struct mptcp_pm_local local;
mptcp_mpc_endpoint_setup(msk);
+ if (!mptcp_is_fully_established(sk))
+ return;
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
msk->pm.local_addr_used, endp_subflow_max,
@@ -680,7 +682,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id)
{
- if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
+ if (rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
u8 limit_add_addr_accepted =
mptcp_pm_get_limit_add_addr_accepted(msk);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 4cd5df01446e..e212c1374bd0 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -61,11 +61,13 @@ static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
{
+ unsigned short family = READ_ONCE(sk->sk_family);
+
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- if (sk->sk_prot == &tcpv6_prot)
+ if (family == AF_INET6)
return &inet6_stream_ops;
#endif
- WARN_ON_ONCE(sk->sk_prot != &tcp_prot);
+ WARN_ON_ONCE(family != AF_INET);
return &inet_stream_ops;
}
@@ -76,6 +78,13 @@ bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
if (__mptcp_check_fallback(msk))
return true;
+ /* The caller possibly is not holding the msk socket lock, but
+ * in the fallback case only the current subflow is touching
+ * the OoO queue.
+ */
+ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
+ return false;
+
spin_lock_bh(&msk->fallback_lock);
if (!msk->allow_infinite_fallback) {
spin_unlock_bh(&msk->fallback_lock);
@@ -349,7 +358,7 @@ end:
static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
int copy_len)
{
- const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
/* the skb map_seq accounts for the skb offset:
@@ -374,11 +383,7 @@ static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
struct mptcp_sock *msk = mptcp_sk(sk);
struct sk_buff *tail;
- /* try to fetch required memory from subflow */
- if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
- goto drop;
- }
+ mptcp_borrow_fwdmem(sk, skb);
if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
/* in sequence */
@@ -400,16 +405,13 @@ static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
* will retransmit as needed, if needed.
*/
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
-drop:
mptcp_drop(sk, skb);
return false;
}
static void mptcp_stop_rtx_timer(struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer(sk, &sk->mptcp_retransmit_timer);
mptcp_sk(sk)->timer_ival = 0;
}
@@ -515,7 +517,7 @@ static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subfl
const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
- icsk_timeout(inet_csk(ssk)) - jiffies : 0;
+ tcp_timeout_expires(ssk) - jiffies : 0;
}
static void mptcp_set_timeout(struct sock *sk)
@@ -655,8 +657,50 @@ static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
}
}
+static void __mptcp_add_backlog(struct sock *sk,
+ struct mptcp_subflow_context *subflow,
+ struct sk_buff *skb)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *tail = NULL;
+ struct sock *ssk = skb->sk;
+ bool fragstolen;
+ int delta;
+
+ if (unlikely(sk->sk_state == TCP_CLOSE)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
+ return;
+ }
+
+ /* Try to coalesce with the last skb in our backlog */
+ if (!list_empty(&msk->backlog_list))
+ tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
+
+ if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
+ ssk == tail->sk &&
+ __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
+ skb->truesize -= delta;
+ kfree_skb_partial(skb, fragstolen);
+ __mptcp_subflow_lend_fwdmem(subflow, delta);
+ goto account;
+ }
+
+ list_add_tail(&skb->list, &msk->backlog_list);
+ mptcp_subflow_lend_fwdmem(subflow, skb);
+ delta = skb->truesize;
+
+account:
+ WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
+
+ /* Possibly not accept()ed yet, keep track of memory not CG
+ * accounted, mptcp_graft_subflows() will handle it.
+ */
+ if (!mem_cgroup_from_sk(ssk))
+ msk->backlog_unaccounted += delta;
+}
+
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
- struct sock *ssk)
+ struct sock *ssk, bool own_msk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = (struct sock *)msk;
@@ -672,9 +716,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct sk_buff *skb;
bool fin;
- if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
- break;
-
/* try to move as much data as available */
map_remaining = subflow->map_data_len -
mptcp_subflow_get_map_offset(subflow);
@@ -701,8 +742,13 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
size_t len = skb->len - offset;
mptcp_init_skb(ssk, skb, offset, len);
- skb_orphan(skb);
- ret = __mptcp_move_skb(sk, skb) || ret;
+
+ if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) {
+ mptcp_subflow_lend_fwdmem(subflow, skb);
+ ret |= __mptcp_move_skb(sk, skb);
+ } else {
+ __mptcp_add_backlog(sk, subflow, skb);
+ }
seq += len;
if (unlikely(map_remaining < len)) {
@@ -821,7 +867,7 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
struct sock *sk = (struct sock *)msk;
bool moved;
- moved = __mptcp_move_skbs_from_subflow(msk, ssk);
+ moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
__mptcp_ofo_queue(msk);
if (unlikely(ssk->sk_err))
__mptcp_subflow_error_report(sk, ssk);
@@ -836,31 +882,26 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
return moved;
}
-static void __mptcp_data_ready(struct sock *sk, struct sock *ssk)
-{
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- /* Wake-up the reader only for in-sequence data */
- if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
- sk->sk_data_ready(sk);
-}
-
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct mptcp_sock *msk = mptcp_sk(sk);
/* The peer can send data while we are shutting down this
- * subflow at msk destruction time, but we must avoid enqueuing
+ * subflow at subflow destruction time, but we must avoid enqueuing
* more data to the msk receive queue
*/
- if (unlikely(subflow->disposable))
+ if (unlikely(subflow->closing))
return;
mptcp_data_lock(sk);
- if (!sock_owned_by_user(sk))
- __mptcp_data_ready(sk, ssk);
- else
- __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags);
+ if (!sock_owned_by_user(sk)) {
+ /* Wake-up the reader only for in-sequence data */
+ if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
+ sk->sk_data_ready(sk);
+ } else {
+ __mptcp_move_skbs_from_subflow(msk, ssk, false);
+ }
mptcp_data_unlock(sk);
}
@@ -886,12 +927,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
mptcp_subflow_joined(msk, ssk);
spin_unlock_bh(&msk->fallback_lock);
- /* attach to msk socket only after we are sure we will deal with it
- * at close time
- */
- if (sk->sk_socket && !ssk->sk_socket)
- mptcp_sock_graft(ssk, sk->sk_socket);
-
mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
mptcp_sockopt_sync_locked(msk, ssk);
mptcp_stop_tout_timer(sk);
@@ -917,12 +952,11 @@ static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list
static bool mptcp_rtx_timer_pending(struct sock *sk)
{
- return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
+ return timer_pending(&sk->mptcp_retransmit_timer);
}
static void mptcp_reset_rtx_timer(struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
unsigned long tout;
/* prevent rescheduling on close */
@@ -930,19 +964,24 @@ static void mptcp_reset_rtx_timer(struct sock *sk)
return;
tout = mptcp_sk(sk)->timer_ival;
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
+ sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout);
}
bool mptcp_schedule_work(struct sock *sk)
{
- if (inet_sk_state_load(sk) != TCP_CLOSE &&
- schedule_work(&mptcp_sk(sk)->work)) {
- /* each subflow already holds a reference to the sk, and the
- * workqueue is invoked by a subflow, so sk can't go away here.
- */
- sock_hold(sk);
+ if (inet_sk_state_load(sk) == TCP_CLOSE)
+ return false;
+
+ /* Get a reference on this socket, mptcp_worker() will release it.
+ * As mptcp_worker() might complete before us, we can not avoid
+ * a sock_hold()/sock_put() if schedule_work() returns false.
+ */
+ sock_hold(sk);
+
+ if (schedule_work(&mptcp_sk(sk)->work))
return true;
- }
+
+ sock_put(sk);
return false;
}
@@ -2100,60 +2139,80 @@ new_measure:
msk->rcvq_space.time = mstamp;
}
-static struct mptcp_subflow_context *
-__mptcp_first_ready_from(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
+static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
{
- struct mptcp_subflow_context *start_subflow = subflow;
+ struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ bool moved = false;
+
+ *delta = 0;
+ while (1) {
+ /* If the msk recvbuf is full stop, don't drop */
+ if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
+ break;
+
+ prefetch(skb->next);
+ list_del(&skb->list);
+ *delta += skb->truesize;
- while (!READ_ONCE(subflow->data_avail)) {
- subflow = mptcp_next_subflow(msk, subflow);
- if (subflow == start_subflow)
- return NULL;
+ moved |= __mptcp_move_skb(sk, skb);
+ if (list_empty(skbs))
+ break;
+
+ skb = list_first_entry(skbs, struct sk_buff, list);
}
- return subflow;
+
+ __mptcp_ofo_queue(msk);
+ if (moved)
+ mptcp_check_data_fin((struct sock *)msk);
+ return moved;
}
-static bool __mptcp_move_skbs(struct sock *sk)
+static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs)
{
- struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- bool ret = false;
- if (list_empty(&msk->conn_list))
+ /* After CG initialization, subflows should never add skb before
+ * gaining the CG themself.
+ */
+ DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket &&
+ mem_cgroup_from_sk(sk));
+
+ /* Don't spool the backlog if the rcvbuf is full. */
+ if (list_empty(&msk->backlog_list) ||
+ sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
return false;
- subflow = list_first_entry(&msk->conn_list,
- struct mptcp_subflow_context, node);
- for (;;) {
- struct sock *ssk;
- bool slowpath;
+ INIT_LIST_HEAD(skbs);
+ list_splice_init(&msk->backlog_list, skbs);
+ return true;
+}
- /*
- * As an optimization avoid traversing the subflows list
- * and ev. acquiring the subflow socket lock before baling out
- */
- if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
- break;
+static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
+ struct list_head *skbs)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
- subflow = __mptcp_first_ready_from(msk, subflow);
- if (!subflow)
- break;
+ WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
+ list_splice(skbs, &msk->backlog_list);
+}
- ssk = mptcp_subflow_tcp_sock(subflow);
- slowpath = lock_sock_fast(ssk);
- ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret;
- if (unlikely(ssk->sk_err))
- __mptcp_error_report(sk);
- unlock_sock_fast(ssk, slowpath);
+static bool mptcp_move_skbs(struct sock *sk)
+{
+ struct list_head skbs;
+ bool enqueued = false;
+ u32 moved;
- subflow = mptcp_next_subflow(msk, subflow);
- }
+ mptcp_data_lock(sk);
+ while (mptcp_can_spool_backlog(sk, &skbs)) {
+ mptcp_data_unlock(sk);
+ enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
- __mptcp_ofo_queue(msk);
- if (ret)
- mptcp_check_data_fin((struct sock *)msk);
- return ret;
+ mptcp_data_lock(sk);
+ mptcp_backlog_spooled(sk, moved, &skbs);
+ }
+ mptcp_data_unlock(sk);
+ return enqueued;
}
static unsigned int mptcp_inq_hint(const struct sock *sk)
@@ -2219,7 +2278,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
copied += bytes_read;
- if (skb_queue_empty(&sk->sk_receive_queue) && __mptcp_move_skbs(sk))
+ if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
continue;
/* only the MPTCP socket status is relevant here. The exit
@@ -2292,9 +2351,7 @@ out_err:
static void mptcp_retransmit_timer(struct timer_list *t)
{
- struct inet_connection_sock *icsk = timer_container_of(icsk, t,
- icsk_retransmit_timer);
- struct sock *sk = &icsk->icsk_inet.sk;
+ struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer);
struct mptcp_sock *msk = mptcp_sk(sk);
bh_lock_sock(sk);
@@ -2312,7 +2369,9 @@ static void mptcp_retransmit_timer(struct timer_list *t)
static void mptcp_tout_timer(struct timer_list *t)
{
- struct sock *sk = timer_container_of(sk, t, sk_timer);
+ struct inet_connection_sock *icsk =
+ timer_container_of(icsk, t, mptcp_tout_timer);
+ struct sock *sk = &icsk->icsk_inet.sk;
mptcp_schedule_work(sk);
sock_put(sk);
@@ -2398,7 +2457,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
/* flags for __mptcp_close_ssk() */
#define MPTCP_CF_PUSH BIT(1)
-#define MPTCP_CF_FASTCLOSE BIT(2)
/* be sure to send a reset only if the caller asked for it, also
* clean completely the subflow status when the subflow reaches
@@ -2409,7 +2467,7 @@ static void __mptcp_subflow_disconnect(struct sock *ssk,
unsigned int flags)
{
if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
- (flags & MPTCP_CF_FASTCLOSE)) {
+ subflow->send_fastclose) {
/* The MPTCP code never wait on the subflow sockets, TCP-level
* disconnect should never fail
*/
@@ -2434,6 +2492,25 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
{
struct mptcp_sock *msk = mptcp_sk(sk);
bool dispose_it, need_push = false;
+ int fwd_remaining;
+
+ /* Do not pass RX data to the msk, even if the subflow socket is not
+ * going to be freed (i.e. even for the first subflow on graceful
+ * subflow close.
+ */
+ lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ subflow->closing = 1;
+
+ /* Borrow the fwd allocated page left-over; fwd memory for the subflow
+ * could be negative at this point, but will be reach zero soon - when
+ * the data allocated using such fragment will be freed.
+ */
+ if (subflow->lent_mem_frag) {
+ fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag;
+ sk_forward_alloc_add(sk, fwd_remaining);
+ sk_forward_alloc_add(ssk, -fwd_remaining);
+ subflow->lent_mem_frag = 0;
+ }
/* If the first subflow moved to a close state before accept, e.g. due
* to an incoming reset or listener shutdown, the subflow socket is
@@ -2445,7 +2522,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
/* ensure later check in mptcp_worker() will dispose the msk */
sock_set_flag(sk, SOCK_DEAD);
mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1));
- lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
mptcp_subflow_drop_ctx(ssk);
goto out_release;
}
@@ -2454,16 +2530,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
if (dispose_it)
list_del(&subflow->node);
- lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
-
- if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
- /* be sure to force the tcp_close path
- * to generate the egress reset
- */
- ssk->sk_lingertime = 0;
- sock_set_flag(ssk, SOCK_LINGER);
- subflow->send_fastclose = 1;
- }
+ if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
+ tcp_set_state(ssk, TCP_CLOSE);
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
if (!dispose_it) {
@@ -2524,6 +2592,9 @@ out:
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow)
{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *skb;
+
/* The first subflow can already be closed and still in the list */
if (subflow->close_event_done)
return;
@@ -2533,6 +2604,17 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
if (sk->sk_state == TCP_ESTABLISHED)
mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+ /* Remove any reference from the backlog to this ssk; backlog skbs consume
+ * space in the msk receive queue, no need to touch sk->sk_rmem_alloc
+ */
+ list_for_each_entry(skb, &msk->backlog_list, list) {
+ if (skb->sk != ssk)
+ continue;
+
+ atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc);
+ skb->sk = NULL;
+ }
+
/* subflow aborted before reaching the fully_established status
* attempt the creation of the next subflow
*/
@@ -2559,7 +2641,8 @@ static void __mptcp_close_subflow(struct sock *sk)
if (ssk_state != TCP_CLOSE &&
(ssk_state != TCP_CLOSE_WAIT ||
- inet_sk_state_load(sk) != TCP_ESTABLISHED))
+ inet_sk_state_load(sk) != TCP_ESTABLISHED ||
+ __mptcp_check_fallback(msk)))
continue;
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2658,7 +2741,7 @@ static void __mptcp_retrans(struct sock *sk)
}
if (!mptcp_send_head(sk))
- return;
+ goto clear_scheduled;
goto reset_timer;
}
@@ -2689,7 +2772,7 @@ static void __mptcp_retrans(struct sock *sk)
if (__mptcp_check_fallback(msk)) {
spin_unlock_bh(&msk->fallback_lock);
release_sock(ssk);
- return;
+ goto clear_scheduled;
}
while (info.sent < info.limit) {
@@ -2721,6 +2804,15 @@ reset_timer:
if (!mptcp_rtx_timer_pending(sk))
mptcp_reset_rtx_timer(sk);
+
+clear_scheduled:
+ /* If no rtx data was available or in case of fallback, there
+ * could be left-over scheduled subflows; clear them all
+ * or later xmit could use bad ones
+ */
+ mptcp_for_each_subflow(msk, subflow)
+ if (READ_ONCE(subflow->scheduled))
+ mptcp_subflow_set_scheduled(subflow, false);
}
/* schedule the timeout timer for the relevant event: either close timeout
@@ -2742,7 +2834,7 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
*/
timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
- sk_reset_timer(sk, &sk->sk_timer, timeout);
+ sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout);
}
static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
@@ -2761,15 +2853,57 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
unlock_sock_fast(ssk, slow);
}
+static void mptcp_backlog_purge(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *tmp, *skb;
+ LIST_HEAD(backlog);
+
+ mptcp_data_lock(sk);
+ list_splice_init(&msk->backlog_list, &backlog);
+ msk->backlog_len = 0;
+ mptcp_data_unlock(sk);
+
+ list_for_each_entry_safe(skb, tmp, &backlog, list) {
+ mptcp_borrow_fwdmem(sk, skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
+ }
+ sk_mem_reclaim(sk);
+}
+
static void mptcp_do_fastclose(struct sock *sk)
{
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
mptcp_set_state(sk, TCP_CLOSE);
- mptcp_for_each_subflow_safe(msk, subflow, tmp)
- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
- subflow, MPTCP_CF_FASTCLOSE);
+ mptcp_backlog_purge(sk);
+
+ /* Explicitly send the fastclose reset as need */
+ if (__mptcp_check_fallback(msk))
+ return;
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+
+ /* Some subflow socket states don't allow/need a reset.*/
+ if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ goto unlock;
+
+ subflow->send_fastclose = 1;
+
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+ * issue in __tcp_select_window(), see tcp_disconnect().
+ */
+ inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
+
+ tcp_send_active_reset(ssk, ssk->sk_allocation,
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE);
+unlock:
+ release_sock(ssk);
+ }
}
static void mptcp_worker(struct work_struct *work)
@@ -2796,7 +2930,11 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_close_subflow(sk);
if (mptcp_close_tout_expired(sk)) {
+ struct mptcp_subflow_context *subflow, *tmp;
+
mptcp_do_fastclose(sk);
+ mptcp_for_each_subflow_safe(msk, subflow, tmp)
+ __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
mptcp_close_wake_up(sk);
}
@@ -2824,11 +2962,13 @@ static void __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->conn_list);
INIT_LIST_HEAD(&msk->join_list);
INIT_LIST_HEAD(&msk->rtx_queue);
+ INIT_LIST_HEAD(&msk->backlog_list);
INIT_WORK(&msk->work, mptcp_worker);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
+ msk->backlog_len = 0;
WRITE_ONCE(msk->first, NULL);
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
@@ -2845,8 +2985,8 @@ static void __mptcp_init_sock(struct sock *sk)
spin_lock_init(&msk->fallback_lock);
/* re-use the csk retrans timer for MPTCP-level retrans */
- timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
- timer_setup(&sk->sk_timer, mptcp_tout_timer, 0);
+ timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0);
+ timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0);
}
static void mptcp_ca_reset(struct sock *sk)
@@ -3048,7 +3188,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
might_sleep();
mptcp_stop_rtx_timer(sk);
- sk_stop_timer(sk, &sk->sk_timer);
+ sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
msk->pm.status = 0;
mptcp_release_sched(msk);
@@ -3199,6 +3339,28 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}
+static void mptcp_destroy_common(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct sock *sk = (struct sock *)msk;
+
+ __mptcp_clear_xmit(sk);
+ mptcp_backlog_purge(sk);
+
+ /* join list will be eventually flushed (with rst) at sock lock release time */
+ mptcp_for_each_subflow_safe(msk, subflow, tmp)
+ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
+
+ __skb_queue_purge(&sk->sk_receive_queue);
+ skb_rbtree_purge(&msk->out_of_order_queue);
+
+ /* move all the rx fwd alloc into the sk_mem_reclaim_final in
+ * inet_sock_destruct() will dispose it
+ */
+ mptcp_token_destroy(msk);
+ mptcp_pm_destroy(msk);
+}
+
static int mptcp_disconnect(struct sock *sk, int flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -3221,7 +3383,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
/* msk->subflow is still intact, the following will not free the first
* subflow
*/
- mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
+ mptcp_do_fastclose(sk);
+ mptcp_destroy_common(msk);
/* The first subflow is already in TCP_CLOSE status, the following
* can't overlap with a fallback anymore
@@ -3250,6 +3413,9 @@ static int mptcp_disconnect(struct sock *sk, int flags)
msk->bytes_retrans = 0;
msk->rcvspace_init = 0;
+ /* for fallback's sake */
+ WRITE_ONCE(msk->ack_seq, 0);
+
WRITE_ONCE(sk->sk_shutdown, 0);
sk_error_report(sk);
return 0;
@@ -3400,34 +3566,13 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
}
-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
-{
- struct mptcp_subflow_context *subflow, *tmp;
- struct sock *sk = (struct sock *)msk;
-
- __mptcp_clear_xmit(sk);
-
- /* join list will be eventually flushed (with rst) at sock lock release time */
- mptcp_for_each_subflow_safe(msk, subflow, tmp)
- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
-
- __skb_queue_purge(&sk->sk_receive_queue);
- skb_rbtree_purge(&msk->out_of_order_queue);
-
- /* move all the rx fwd alloc into the sk_mem_reclaim_final in
- * inet_sock_destruct() will dispose it
- */
- mptcp_token_destroy(msk);
- mptcp_pm_destroy(msk);
-}
-
static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
/* allow the following to close even the initial subflow */
msk->free_first = 1;
- mptcp_destroy_common(msk, 0);
+ mptcp_destroy_common(msk);
sk_sockets_allocated_dec(sk);
}
@@ -3449,8 +3594,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
#define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
BIT(MPTCP_RETRANSMIT) | \
- BIT(MPTCP_FLUSH_JOIN_LIST) | \
- BIT(MPTCP_DEQUEUE))
+ BIT(MPTCP_FLUSH_JOIN_LIST))
/* processes deferred events and flush wmem */
static void mptcp_release_cb(struct sock *sk)
@@ -3460,9 +3604,12 @@ static void mptcp_release_cb(struct sock *sk)
for (;;) {
unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
- struct list_head join_list;
+ struct list_head join_list, skbs;
+ bool spool_bl;
+ u32 moved;
- if (!flags)
+ spool_bl = mptcp_can_spool_backlog(sk, &skbs);
+ if (!flags && !spool_bl)
break;
INIT_LIST_HEAD(&join_list);
@@ -3484,7 +3631,7 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_push_pending(sk, 0);
if (flags & BIT(MPTCP_RETRANSMIT))
__mptcp_retrans(sk);
- if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk)) {
+ if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
/* notify ack seq update */
mptcp_cleanup_rbuf(msk, 0);
sk->sk_data_ready(sk);
@@ -3492,6 +3639,8 @@ static void mptcp_release_cb(struct sock *sk)
cond_resched();
spin_lock_bh(&sk->sk_lock.slock);
+ if (spool_bl)
+ mptcp_backlog_spooled(sk, moved, &skbs);
}
if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
@@ -3617,6 +3766,23 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
+/* Can be called without holding the msk socket lock; use the callback lock
+ * to avoid {READ_,WRITE_}ONCE annotations on sk_socket.
+ */
+static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk)
+{
+ struct socket *sock;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ sock = sk->sk_socket;
+ write_unlock_bh(&sk->sk_callback_lock);
+ if (sock) {
+ mptcp_sock_graft(ssk, sock);
+ __mptcp_inherit_cgrp_data(sk, ssk);
+ __mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC);
+ }
+}
+
bool mptcp_finish_join(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -3632,7 +3798,9 @@ bool mptcp_finish_join(struct sock *ssk)
return false;
}
- /* active subflow, already present inside the conn_list */
+ /* Active subflow, already present inside the conn_list; is grafted
+ * either by __mptcp_subflow_connect() or accept.
+ */
if (!list_empty(&subflow->node)) {
spin_lock_bh(&msk->fallback_lock);
if (!msk->allow_subflows) {
@@ -3659,11 +3827,17 @@ bool mptcp_finish_join(struct sock *ssk)
if (ret) {
sock_hold(ssk);
list_add_tail(&subflow->node, &msk->conn_list);
+ mptcp_sock_check_graft(parent, ssk);
}
} else {
sock_hold(ssk);
list_add_tail(&subflow->node, &msk->join_list);
__set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
+
+ /* In case of later failures, __mptcp_flush_join_list() will
+ * properly orphan the ssk via mptcp_close_ssk().
+ */
+ mptcp_sock_check_graft(parent, ssk);
}
mptcp_data_unlock(parent);
@@ -3724,7 +3898,7 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
return -EINVAL;
lock_sock(sk);
- if (__mptcp_move_skbs(sk))
+ if (mptcp_move_skbs(sk))
mptcp_cleanup_rbuf(msk, 0);
*karg = mptcp_inq_hint(sk);
release_sock(sk);
@@ -3924,6 +4098,69 @@ unlock:
return err;
}
+static void mptcp_graft_subflows(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (mem_cgroup_sockets_enabled) {
+ LIST_HEAD(join_list);
+
+ /* Subflows joining after __inet_accept() will get the
+ * mem CG properly initialized at mptcp_finish_join() time,
+ * but subflows pending in join_list need explicit
+ * initialization before flushing `backlog_unaccounted`
+ * or MPTCP can later unexpectedly observe unaccounted memory.
+ */
+ mptcp_data_lock(sk);
+ list_splice_init(&msk->join_list, &join_list);
+ mptcp_data_unlock(sk);
+
+ __mptcp_flush_join_list(sk, &join_list);
+ }
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+
+ /* Set ssk->sk_socket of accept()ed flows to mptcp socket.
+ * This is needed so NOSPACE flag can be set from tcp stack.
+ */
+ if (!ssk->sk_socket)
+ mptcp_sock_graft(ssk, sk->sk_socket);
+
+ if (!mem_cgroup_sk_enabled(sk))
+ goto unlock;
+
+ __mptcp_inherit_cgrp_data(sk, ssk);
+ __mptcp_inherit_memcg(sk, ssk, GFP_KERNEL);
+
+unlock:
+ release_sock(ssk);
+ }
+
+ if (mem_cgroup_sk_enabled(sk)) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
+ int amt;
+
+ /* Account the backlog memory; prior accept() is aware of
+ * fwd and rmem only.
+ */
+ mptcp_data_lock(sk);
+ amt = sk_mem_pages(sk->sk_forward_alloc +
+ msk->backlog_unaccounted +
+ atomic_read(&sk->sk_rmem_alloc)) -
+ sk_mem_pages(sk->sk_forward_alloc +
+ atomic_read(&sk->sk_rmem_alloc));
+ msk->backlog_unaccounted = 0;
+ mptcp_data_unlock(sk);
+
+ if (amt)
+ mem_cgroup_sk_charge(sk, amt, gfp);
+ }
+}
+
static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
struct proto_accept_arg *arg)
{
@@ -3971,26 +4208,17 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
msk = mptcp_sk(newsk);
msk->in_accept_queue = 0;
- /* set ssk->sk_socket of accept()ed flows to mptcp socket.
- * This is needed so NOSPACE flag can be set from tcp stack.
- */
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- if (!ssk->sk_socket)
- mptcp_sock_graft(ssk, newsock);
- }
-
+ mptcp_graft_subflows(newsk);
mptcp_rps_record_subflows(msk);
/* Do late cleanup for the first subflow as necessary. Also
* deal with bad peers not doing a complete shutdown.
*/
if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
- __mptcp_close_ssk(newsk, msk->first,
- mptcp_subflow_ctx(msk->first), 0);
if (unlikely(list_is_singular(&msk->conn_list)))
mptcp_set_state(newsk, TCP_CLOSE);
+ mptcp_close_ssk(newsk, msk->first,
+ mptcp_subflow_ctx(msk->first));
}
} else {
tcpfallback:
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 9a3429175758..9c0d17876b22 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -124,7 +124,6 @@
#define MPTCP_FLUSH_JOIN_LIST 5
#define MPTCP_SYNC_STATE 6
#define MPTCP_SYNC_SNDBUF 7
-#define MPTCP_DEQUEUE 8
struct mptcp_skb_cb {
u64 map_seq;
@@ -357,6 +356,10 @@ struct mptcp_sock {
* allow_infinite_fallback and
* allow_join
*/
+
+ struct list_head backlog_list; /* protected by the data lock */
+ u32 backlog_len;
+ u32 backlog_unaccounted;
};
#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
@@ -407,6 +410,7 @@ static inline int mptcp_space_from_win(const struct sock *sk, int win)
static inline int __mptcp_space(const struct sock *sk)
{
return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(mptcp_sk(sk)->backlog_len) -
sk_rmem_alloc_get(sk));
}
@@ -509,6 +513,7 @@ struct mptcp_subflow_context {
u64 remote_key;
u64 idsn;
u64 map_seq;
+ u64 rcv_wnd_sent;
u32 snd_isn;
u32 token;
u32 rel_write_seq;
@@ -535,16 +540,18 @@ struct mptcp_subflow_context {
send_infinite_map : 1,
remote_key_valid : 1, /* received the peer key from */
disposable : 1, /* ctx can be free at ulp release time */
+ closing : 1, /* must not pass rx data to msk anymore */
stale : 1, /* unable to snd/rcv data, do not use for xmit */
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
close_event_done : 1, /* has done the post-closed part */
mpc_drop : 1, /* the MPC option has been dropped in a rtx */
- __unused : 9;
+ __unused : 8;
bool data_avail;
bool scheduled;
bool pm_listener; /* a listener managed by the kernel PM? */
bool fully_established; /* path validated */
+ u32 lent_mem_frag;
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
@@ -644,6 +651,42 @@ mptcp_send_active_reset_reason(struct sock *sk)
tcp_send_active_reset(sk, GFP_ATOMIC, reason);
}
+/* Made the fwd mem carried by the given skb available to the msk,
+ * To be paired with a previous mptcp_subflow_lend_fwdmem() before freeing
+ * the skb or setting the skb ownership.
+ */
+static inline void mptcp_borrow_fwdmem(struct sock *sk, struct sk_buff *skb)
+{
+ struct sock *ssk = skb->sk;
+
+ /* The subflow just lend the skb fwd memory; if the subflow meanwhile
+ * closed, mptcp_close_ssk() already released the ssk rcv memory.
+ */
+ DEBUG_NET_WARN_ON_ONCE(skb->destructor);
+ sk_forward_alloc_add(sk, skb->truesize);
+ if (!ssk)
+ return;
+
+ atomic_sub(skb->truesize, &ssk->sk_rmem_alloc);
+ skb->sk = NULL;
+}
+
+static inline void
+__mptcp_subflow_lend_fwdmem(struct mptcp_subflow_context *subflow, int size)
+{
+ int frag = (subflow->lent_mem_frag + size) & (PAGE_SIZE - 1);
+
+ subflow->lent_mem_frag = frag;
+}
+
+static inline void
+mptcp_subflow_lend_fwdmem(struct mptcp_subflow_context *subflow,
+ struct sk_buff *skb)
+{
+ __mptcp_subflow_lend_fwdmem(subflow, skb->truesize);
+ skb->destructor = NULL;
+}
+
static inline u64
mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow)
{
@@ -706,6 +749,9 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
return ret;
}
+void __mptcp_inherit_memcg(struct sock *sk, struct sock *ssk, gfp_t gfp);
+void __mptcp_inherit_cgrp_data(struct sock *sk, struct sock *ssk);
+
int mptcp_is_enabled(const struct net *net);
unsigned int mptcp_get_add_addr_timeout(const struct net *net);
int mptcp_is_checksum_enabled(const struct net *net);
@@ -846,7 +892,7 @@ static inline void mptcp_stop_tout_timer(struct sock *sk)
if (!inet_csk(sk)->icsk_mtup.probe_timestamp)
return;
- sk_stop_timer(sk, &sk->sk_timer);
+ sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
inet_csk(sk)->icsk_mtup.probe_timestamp = 0;
}
@@ -976,8 +1022,6 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
local_bh_enable();
}
-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
-
#define MPTCP_TOKEN_MAX_RETRIES 4
void __init mptcp_token_init(void);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 30961b3d1702..86ce58ae533d 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -491,6 +491,9 @@ static void subflow_set_remote_key(struct mptcp_sock *msk,
mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
subflow->iasn++;
+ /* for fallback's sake */
+ subflow->map_seq = subflow->iasn;
+
WRITE_ONCE(msk->remote_key, subflow->remote_key);
WRITE_ONCE(msk->ack_seq, subflow->iasn);
WRITE_ONCE(msk->can_ack, true);
@@ -1285,6 +1288,7 @@ static bool subflow_is_done(const struct sock *sk)
/* sched mptcp worker for subflow cleanup if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
+ const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = (struct sock *)msk;
if (likely(ssk->sk_state != TCP_CLOSE &&
@@ -1303,7 +1307,8 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
*/
if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) &&
msk->first == ssk &&
- mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
+ mptcp_update_rcv_data_fin(msk, subflow->map_seq +
+ subflow->map_data_len, true))
mptcp_schedule_work(sk);
}
@@ -1433,9 +1438,12 @@ reset:
skb = skb_peek(&ssk->sk_receive_queue);
subflow->map_valid = 1;
- subflow->map_seq = READ_ONCE(msk->ack_seq);
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+ subflow->map_seq = __mptcp_expand_seq(subflow->map_seq,
+ subflow->iasn +
+ TCP_SKB_CB(skb)->seq -
+ subflow->ssn_offset - 1);
WRITE_ONCE(subflow->data_avail, true);
return true;
}
@@ -1712,21 +1720,35 @@ err_out:
return err;
}
-static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
+void __mptcp_inherit_memcg(struct sock *sk, struct sock *ssk, gfp_t gfp)
+{
+ /* Only if the msk has been accepted already (and not orphaned).*/
+ if (!mem_cgroup_sockets_enabled || !sk->sk_socket)
+ return;
+
+ mem_cgroup_sk_inherit(sk, ssk);
+ __sk_charge(ssk, gfp);
+}
+
+void __mptcp_inherit_cgrp_data(struct sock *sk, struct sock *ssk)
{
#ifdef CONFIG_SOCK_CGROUP_DATA
- struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
- *child_skcd = &child->sk_cgrp_data;
+ struct sock_cgroup_data *sk_cd = &sk->sk_cgrp_data,
+ *ssk_cd = &ssk->sk_cgrp_data;
/* only the additional subflows created by kworkers have to be modified */
- if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
- cgroup_id(sock_cgroup_ptr(child_skcd))) {
- cgroup_sk_free(child_skcd);
- *child_skcd = *parent_skcd;
- cgroup_sk_clone(child_skcd);
+ if (cgroup_id(sock_cgroup_ptr(sk_cd)) !=
+ cgroup_id(sock_cgroup_ptr(ssk_cd))) {
+ cgroup_sk_free(ssk_cd);
+ *ssk_cd = *sk_cd;
+ cgroup_sk_clone(sk_cd);
}
#endif /* CONFIG_SOCK_CGROUP_DATA */
+}
+static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
+{
+ __mptcp_inherit_cgrp_data(parent, child);
if (mem_cgroup_sockets_enabled)
mem_cgroup_sk_inherit(parent, child);
}
@@ -2144,6 +2166,10 @@ void __init mptcp_subflow_init(void)
tcp_prot_override = tcp_prot;
tcp_prot_override.release_cb = tcp_release_cb_override;
tcp_prot_override.diag_destroy = tcp_abort_override;
+#ifdef CONFIG_BPF_SYSCALL
+ /* Disable sockmap processing for subflows */
+ tcp_prot_override.psock_update_sk_prot = NULL;
+#endif
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
@@ -2180,6 +2206,10 @@ void __init mptcp_subflow_init(void)
tcpv6_prot_override = tcpv6_prot;
tcpv6_prot_override.release_cb = tcp_release_cb_override;
tcpv6_prot_override.diag_destroy = tcp_abort_override;
+#ifdef CONFIG_BPF_SYSCALL
+ /* Disable sockmap processing for subflows */
+ tcpv6_prot_override.psock_update_sk_prot = NULL;
+#endif
#endif
mptcp_diag_subflow_init(&subflow_ulp_ops);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index fdacbc3c15be..d54d7da58334 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -13,8 +13,7 @@
* Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 37ebb0cb62b8..50cc492c7553 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -17,8 +17,7 @@
* Changes:
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/interrupt.h>
#include <linux/in.h>
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5ea7ab8bf4dc..90d56f92c0f6 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -19,8 +19,7 @@
* Harald Welte don't use nfcache
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 4c8fa22be88a..068702894377 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -13,8 +13,7 @@
* Changes:
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/init.h>
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 75f4c231f4a0..bb7aca4601ff 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -30,8 +30,7 @@
*
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/ip.h>
#include <linux/slab.h>
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 93a925f1ed9b..77f4f637ff67 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -12,8 +12,7 @@
* get_stats()) do the per cpu summing.
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/kernel.h>
#include <linux/jiffies.h>
diff --git a/net/netfilter/ipvs/ip_vs_fo.c b/net/netfilter/ipvs/ip_vs_fo.c
index ab117e5bc34e..d657b47c6511 100644
--- a/net/netfilter/ipvs/ip_vs_fo.c
+++ b/net/netfilter/ipvs/ip_vs_fo.c
@@ -8,8 +8,7 @@
* Kenny Mathis : added initial functionality based on weight
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 206c6700e200..b315c608fda4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -16,8 +16,7 @@
* Author: Wouter Gadeyne
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 156181a3bacd..e6c8ed0c92f6 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -34,8 +34,7 @@
* me to write this module.
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/ip.h>
#include <linux/slab.h>
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index a021e6aba3d7..a25cf7bb6185 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -32,8 +32,7 @@
*
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/ip.h>
#include <linux/module.h>
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index c2764505e380..38cc38c5d8bb 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -9,8 +9,7 @@
* Wensong Zhang : added any dest with weight=0 is quiesced
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_mh.c b/net/netfilter/ipvs/ip_vs_mh.c
index e3d7f5c879ce..f61f54004c9e 100644
--- a/net/netfilter/ipvs/ip_vs_mh.c
+++ b/net/netfilter/ipvs/ip_vs_mh.c
@@ -17,8 +17,7 @@ https://www.usenix.org/system/files/conference/nsdi16/nsdi16-paper-eisenbud.pdf
*
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/ip.h>
#include <linux/slab.h>
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 08adcb222986..81974f69e5bb 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -30,8 +30,7 @@
* PASV response can not be NAT-ed) but Active FTP should work
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/types.h>
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index ed7f5c889b41..ada158c610ce 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -26,8 +26,7 @@
*
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
index c7708b809700..c5c67df80a0b 100644
--- a/net/netfilter/ipvs/ip_vs_ovf.c
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -12,8 +12,7 @@
* active connections
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 166c669f0763..3035079ebd99 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index e4ce1d9a63f9..85f31d71e29a 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index a9fd1d3fc2cb..fd9dbca24c85 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -8,8 +8,7 @@
* Changes:
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 89602c16f6b6..44e14acc187e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -6,8 +6,7 @@
* Wensong Zhang <wensong@linuxvirtualserver.org>
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/in.h>
#include <linux/ip.h>
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 7da51390cea6..f68a1533ee45 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -13,8 +13,7 @@
* protocol ip_vs_proto_data and is handled by netns
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/kernel.h>
#include <linux/ip.h>
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 68260d91c988..0f0107c80dd2 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -9,8 +9,7 @@
* Network name space (netns) aware.
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/in.h>
#include <linux/ip.h>
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 6baa34dff9f0..4125ee561cdc 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -14,8 +14,7 @@
* Wensong Zhang : added any dest with weight=0 is quiesced
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index d4903723be7e..c6e421c4e299 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -12,8 +12,7 @@
* Changes:
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index a46f99a56618..245a323c84cd 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -30,8 +30,7 @@
*
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 92e77d7a6b50..0e85e07e23b9 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -32,8 +32,7 @@
*
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/ip.h>
#include <linux/slab.h>
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 5a0c6f42bd8f..54dd1514ac45 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -32,8 +32,7 @@
* Persistence support, fwmark and time-out.
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/net/netfilter/ipvs/ip_vs_twos.c b/net/netfilter/ipvs/ip_vs_twos.c
index 8d5419edde50..dbb7f5fd4688 100644
--- a/net/netfilter/ipvs/ip_vs_twos.c
+++ b/net/netfilter/ipvs/ip_vs_twos.c
@@ -4,8 +4,7 @@
* Authors: Darby Payne <darby.payne@applovin.com>
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 9fa500927c0a..9da445ca09a1 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -14,8 +14,7 @@
* Wensong Zhang : added any dest with weight=0 is quiesced
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 85ce0d04afac..99f09cbf2d9b 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -13,8 +13,7 @@
* with weight 0 when all weights are zero
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 95af252b2939..3162ce3c2640 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -21,8 +21,7 @@
* - the only place where we can see skb->sk != NULL
*/
-#define KMSG_COMPONENT "IPVS"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "IPVS: " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2832e0794197..792ca44a461d 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -572,69 +572,6 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
-static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
- const struct nlattr *a)
-{
- struct nshhdr *nh;
- size_t length;
- int err;
- u8 flags;
- u8 ttl;
- int i;
-
- struct ovs_key_nsh key;
- struct ovs_key_nsh mask;
-
- err = nsh_key_from_nlattr(a, &key, &mask);
- if (err)
- return err;
-
- /* Make sure the NSH base header is there */
- if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
- return -ENOMEM;
-
- nh = nsh_hdr(skb);
- length = nsh_hdr_len(nh);
-
- /* Make sure the whole NSH header is there */
- err = skb_ensure_writable(skb, skb_network_offset(skb) +
- length);
- if (unlikely(err))
- return err;
-
- nh = nsh_hdr(skb);
- skb_postpull_rcsum(skb, nh, length);
- flags = nsh_get_flags(nh);
- flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
- flow_key->nsh.base.flags = flags;
- ttl = nsh_get_ttl(nh);
- ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
- flow_key->nsh.base.ttl = ttl;
- nsh_set_flags_and_ttl(nh, flags, ttl);
- nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
- mask.base.path_hdr);
- flow_key->nsh.base.path_hdr = nh->path_hdr;
- switch (nh->mdtype) {
- case NSH_M_TYPE1:
- for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
- nh->md1.context[i] =
- OVS_MASKED(nh->md1.context[i], key.context[i],
- mask.context[i]);
- }
- memcpy(flow_key->nsh.context, nh->md1.context,
- sizeof(nh->md1.context));
- break;
- case NSH_M_TYPE2:
- memset(flow_key->nsh.context, 0,
- sizeof(flow_key->nsh.context));
- break;
- default:
- return -EINVAL;
- }
- skb_postpush_rcsum(skb, nh, length);
- return 0;
-}
-
/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
@@ -1130,10 +1067,6 @@ static int execute_masked_set_action(struct sk_buff *skb,
get_mask(a, struct ovs_key_ethernet *));
break;
- case OVS_KEY_ATTR_NSH:
- err = set_nsh(skb, flow_key, a);
- break;
-
case OVS_KEY_ATTR_IPV4:
err = set_ipv4(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_ipv4 *));
@@ -1170,6 +1103,7 @@ static int execute_masked_set_action(struct sk_buff *skb,
case OVS_KEY_ATTR_CT_LABELS:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
+ case OVS_KEY_ATTR_NSH:
err = -EINVAL;
break;
}
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index ad64bb9ab5e2..1cb4f97335d8 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1305,6 +1305,11 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
return 0;
}
+/*
+ * Constructs NSH header 'nh' from attributes of OVS_ACTION_ATTR_PUSH_NSH,
+ * where 'nh' points to a memory block of 'size' bytes. It's assumed that
+ * attributes were previously validated with validate_push_nsh().
+ */
int nsh_hdr_from_nlattr(const struct nlattr *attr,
struct nshhdr *nh, size_t size)
{
@@ -1314,8 +1319,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr,
u8 ttl = 0;
int mdlen = 0;
- /* validate_nsh has check this, so we needn't do duplicate check here
- */
if (size < NSH_BASE_HDR_LEN)
return -ENOBUFS;
@@ -1359,46 +1362,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr,
return 0;
}
-int nsh_key_from_nlattr(const struct nlattr *attr,
- struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
-{
- struct nlattr *a;
- int rem;
-
- /* validate_nsh has check this, so we needn't do duplicate check here
- */
- nla_for_each_nested(a, attr, rem) {
- int type = nla_type(a);
-
- switch (type) {
- case OVS_NSH_KEY_ATTR_BASE: {
- const struct ovs_nsh_key_base *base = nla_data(a);
- const struct ovs_nsh_key_base *base_mask = base + 1;
-
- nsh->base = *base;
- nsh_mask->base = *base_mask;
- break;
- }
- case OVS_NSH_KEY_ATTR_MD1: {
- const struct ovs_nsh_key_md1 *md1 = nla_data(a);
- const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
-
- memcpy(nsh->context, md1->context, sizeof(*md1));
- memcpy(nsh_mask->context, md1_mask->context,
- sizeof(*md1_mask));
- break;
- }
- case OVS_NSH_KEY_ATTR_MD2:
- /* Not supported yet */
- return -ENOTSUPP;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int nsh_key_put_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool is_push_nsh, bool log)
@@ -2839,17 +2802,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
return err;
}
-static bool validate_nsh(const struct nlattr *attr, bool is_mask,
- bool is_push_nsh, bool log)
+static bool validate_push_nsh(const struct nlattr *attr, bool log)
{
struct sw_flow_match match;
struct sw_flow_key key;
- int ret = 0;
ovs_match_init(&match, &key, true, NULL);
- ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
- is_push_nsh, log);
- return !ret;
+ return !nsh_key_put_from_nlattr(attr, &match, false, true, log);
}
/* Return false if there are any non-masked bits set.
@@ -2997,13 +2956,6 @@ static int validate_set(const struct nlattr *a,
break;
- case OVS_KEY_ATTR_NSH:
- if (eth_type != htons(ETH_P_NSH))
- return -EINVAL;
- if (!validate_nsh(nla_data(a), masked, false, log))
- return -EINVAL;
- break;
-
default:
return -EINVAL;
}
@@ -3437,7 +3389,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
}
mac_proto = MAC_PROTO_NONE;
- if (!validate_nsh(nla_data(a), false, true, true))
+ if (!validate_push_nsh(nla_data(a), log))
return -EINVAL;
break;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index fe7f77fc5f18..ff8cdecbe346 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -65,8 +65,6 @@ int ovs_nla_put_actions(const struct nlattr *attr,
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
-int nsh_key_from_nlattr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
- struct ovs_key_nsh *nsh_mask);
int nsh_hdr_from_nlattr(const struct nlattr *attr, struct nshhdr *nh,
size_t size);
diff --git a/net/psp/psp-nl-gen.c b/net/psp/psp-nl-gen.c
index 73f8b06d66f0..22a48d0fa378 100644
--- a/net/psp/psp-nl-gen.c
+++ b/net/psp/psp-nl-gen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/psp.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/net/psp/psp-nl-gen.h b/net/psp/psp-nl-gen.h
index 5bc3b5d5a53e..599c5f1c82f2 100644
--- a/net/psp/psp-nl-gen.h
+++ b/net/psp/psp-nl-gen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/psp.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_PSP_GEN_H
#define _LINUX_PSP_GEN_H
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 396b576390d0..c2b5bc19e091 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -47,12 +47,10 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(filter, skb);
+ filter_res = bpf_prog_run_data_pointers(filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(filter, skb);
+ filter_res = bpf_prog_run_data_pointers(filter, skb);
}
if (unlikely(!skb->tstamp && skb->tstamp_type))
skb->tstamp_type = SKB_CLOCK_REALTIME;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6749a4a9a9cd..2b6ac7069dc1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -948,9 +948,9 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
return err & NF_VERDICT_MASK;
if (action & BIT(NF_NAT_MANIP_SRC))
- tc_skb_cb(skb)->post_ct_snat = 1;
+ qdisc_skb_cb(skb)->post_ct_snat = 1;
if (action & BIT(NF_NAT_MANIP_DST))
- tc_skb_cb(skb)->post_ct_dnat = 1;
+ qdisc_skb_cb(skb)->post_ct_dnat = 1;
return err;
#else
@@ -986,7 +986,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_action_update_bstats(&c->common, skb);
if (clear) {
- tc_skb_cb(skb)->post_ct = false;
+ qdisc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
nf_ct_put(ct);
@@ -1097,7 +1097,7 @@ do_nat:
out_push:
skb_push_rcsum(skb, nh_ofs);
- tc_skb_cb(skb)->post_ct = true;
+ qdisc_skb_cb(skb)->post_ct = true;
tc_skb_cb(skb)->zone = p->zone;
out_clear:
if (defrag)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 7c6975632fc2..1dfdda6c2d4c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -649,9 +649,9 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
memset(&opt, 0, sizeof(opt));
- opt.index = ife->tcf_index,
- opt.refcnt = refcount_read(&ife->tcf_refcnt) - ref,
- opt.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
+ opt.index = ife->tcf_index;
+ opt.refcnt = refcount_read(&ife->tcf_refcnt) - ref;
+ opt.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind;
spin_lock_bh(&ife->tcf_lock);
opt.action = ife->tcf_action;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f751cd5eeac8..ebca4b926dcf 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1872,9 +1872,9 @@ int tcf_classify(struct sk_buff *skb,
}
ext->chain = last_executed_chain;
ext->mru = cb->mru;
- ext->post_ct = cb->post_ct;
- ext->post_ct_snat = cb->post_ct_snat;
- ext->post_ct_dnat = cb->post_ct_dnat;
+ ext->post_ct = qdisc_skb_cb(skb)->post_ct;
+ ext->post_ct_snat = qdisc_skb_cb(skb)->post_ct_snat;
+ ext->post_ct_dnat = qdisc_skb_cb(skb)->post_ct_dnat;
ext->zone = cb->zone;
}
}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 7fbe42f0e5c2..a32754a2658b 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -97,12 +97,10 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
} else if (at_ingress) {
/* It is safe to push/pull even if skb_shared() */
__skb_push(skb, skb->mac_len);
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(prog->filter, skb);
+ filter_res = bpf_prog_run_data_pointers(prog->filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(prog->filter, skb);
+ filter_res = bpf_prog_run_data_pointers(prog->filter, skb);
}
if (unlikely(!skb->tstamp && skb->tstamp_type))
skb->tstamp_type = SKB_CLOCK_REALTIME;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 099ff6a3e1f5..7669371c1354 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -326,7 +326,7 @@ TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
struct tcf_result *res)
{
struct cls_fl_head *head = rcu_dereference_bh(tp->root);
- bool post_ct = tc_skb_cb(skb)->post_ct;
+ bool post_ct = qdisc_skb_cb(skb)->post_ct;
u16 zone = tc_skb_cb(skb)->zone;
struct fl_flow_key skb_key;
struct fl_flow_mask *mask;
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
index 5337bc462755..2d27f91d8441 100644
--- a/net/sched/em_canid.c
+++ b/net/sched/em_canid.c
@@ -99,6 +99,9 @@ static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
int i;
const struct can_filter *lp;
+ if (!pskb_may_pull(skb, CAN_MTU))
+ return 0;
+
can_id = em_canid_get_id(skb);
if (can_id & CAN_EFF_FLAG) {
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index 64b637f18bc7..48c1bce74f49 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -22,9 +22,12 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct tcf_em_cmp *cmp = (struct tcf_em_cmp *) em->data;
- unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer) + cmp->off;
+ unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer);
u32 val = 0;
+ if (!ptr)
+ return 0;
+ ptr += cmp->off;
if (!tcf_valid_offset(skb, ptr, cmp->align))
return 0;
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 4f9f21a05d5e..c65ffa5fff94 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -42,6 +42,8 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
struct nbyte_data *nbyte = (struct nbyte_data *) em->data;
unsigned char *ptr = tcf_get_base_ptr(skb, nbyte->hdr.layer);
+ if (!ptr)
+ return 0;
ptr += nbyte->hdr.off;
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 6b3d0af72c39..692e2be1793e 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -29,12 +29,19 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
struct tcf_pkt_info *info)
{
struct text_match *tm = EM_TEXT_PRIV(m);
+ unsigned char *ptr;
int from, to;
- from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
+ ptr = tcf_get_base_ptr(skb, tm->from_layer);
+ if (!ptr)
+ return 0;
+ from = ptr - skb->data;
from += tm->from_offset;
- to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
+ ptr = tcf_get_base_ptr(skb, tm->to_layer);
+ if (!ptr)
+ return 0;
+ to = ptr - skb->data;
to += tm->to_offset;
return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 32bacfc314c2..0ea9440f68c6 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1398,15 +1398,15 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int hdr_len, last_len = 0;
u32 off = skb_network_offset(skb);
+ u16 segs = qdisc_pkt_segs(skb);
u32 len = qdisc_pkt_len(skb);
- u16 segs = 1;
q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
- if (!shinfo->gso_size)
+ if (segs == 1)
return cake_calc_overhead(q, len, off);
- /* borrowed from qdisc_pkt_len_init() */
+ /* borrowed from qdisc_pkt_len_segs_init() */
if (!skb->encapsulation)
hdr_len = skb_transport_offset(skb);
else
@@ -1430,12 +1430,6 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
hdr_len += sizeof(struct udphdr);
}
- if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
- segs = DIV_ROUND_UP(skb->len - hdr_len,
- shinfo->gso_size);
- else
- segs = shinfo->gso_segs;
-
len = shinfo->gso_size + hdr_len;
last_len = skb->len - shinfo->gso_size * (segs - 1);
@@ -1788,7 +1782,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
- if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+ if (qdisc_pkt_segs(skb) > 1 && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
unsigned int slen = 0, numsegs = 0;
@@ -1800,6 +1794,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
segs);
@@ -2188,7 +2183,7 @@ retry:
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
- kfree_skb_reason(skb, reason);
+ qdisc_dequeue_drop(sch, skb, reason);
if (q->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
@@ -2729,6 +2724,8 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
int i, j, err;
sch->limit = 10240;
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
q->flow_mode = CAKE_FLOW_TRIPLE;
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index fa0314679e43..c6551578f1cf 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -52,7 +52,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
+ qdisc_dequeue_drop(sch, skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -182,6 +182,8 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt,
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
return 0;
}
diff --git a/net/sched/sch_dualpi2.c b/net/sched/sch_dualpi2.c
index 4b975feb52b1..6d7e6389758d 100644
--- a/net/sched/sch_dualpi2.c
+++ b/net/sched/sch_dualpi2.c
@@ -475,6 +475,7 @@ static int dualpi2_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
* (3) Enqueue fragment & set ts in dualpi2_enqueue_skb
*/
qdisc_skb_cb(nskb)->pkt_len = nskb->len;
+ qdisc_skb_cb(nskb)->pkt_segs = 1;
dualpi2_skb_cb(nskb)->classified =
dualpi2_skb_cb(skb)->classified;
dualpi2_skb_cb(nskb)->ect = dualpi2_skb_cb(skb)->ect;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index fee922da2f99..6e5f2f4f2415 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -480,7 +480,10 @@ static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
struct sk_buff *skb)
{
if (skb == flow->head) {
- flow->head = skb->next;
+ struct sk_buff *next = skb->next;
+
+ prefetch(next);
+ flow->head = next;
} else {
rb_erase(&skb->rbnode, &flow->t_root);
skb->dev = qdisc_dev(sch);
@@ -497,6 +500,7 @@ static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
skb_mark_not_on_list(skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
+ qdisc_bstats_update(sch, skb);
}
static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
@@ -711,6 +715,7 @@ begin:
goto begin;
}
prefetch(&skb->end);
+ fq_dequeue_skb(sch, f, skb);
if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
INET_ECN_set_ce(skb);
q->stat_ce_mark++;
@@ -718,7 +723,6 @@ begin:
if (--f->qlen == 0)
q->inactive_flows++;
q->band_pkt_count[fq_skb_cb(skb)->band]--;
- fq_dequeue_skb(sch, f, skb);
} else {
head->first = f->next;
/* force a pass through old_flows to prevent starvation */
@@ -776,7 +780,6 @@ begin:
f->time_next_packet = now + len;
}
out:
- qdisc_bstats_update(sch, skb);
return skb;
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a14142392939..dc187c7f06b1 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -275,7 +275,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
+ qdisc_dequeue_drop(sch, skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -519,6 +519,9 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
sch->flags |= TCQ_F_CAN_BYPASS;
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
return 0;
alloc_failure:
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index eafc316ae319..32a5f3304046 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -429,6 +429,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff *segs;
netdev_features_t features = netif_skb_features(skb);
+ qdisc_skb_cb(skb)->pkt_segs = 1;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 2255355e51d3..d920f57dc6d7 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1250,7 +1250,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
}
- gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ gso_segs = qdisc_pkt_segs(skb);
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 39b735386996..300d577b3286 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -595,6 +595,7 @@ static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
slen += segs->len;
/* FIXME: we should be segmenting to a smaller size
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 4c977f049670..f2340164f579 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -221,6 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
skb_mark_not_on_list(segs);
seg_len = segs->len;
qdisc_skb_cb(segs)->pkt_len = seg_len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
diff --git a/net/shaper/shaper_nl_gen.c b/net/shaper/shaper_nl_gen.c
index 204c8ae8c7b1..e8cccc4c1180 100644
--- a/net/shaper/shaper_nl_gen.c
+++ b/net/shaper/shaper_nl_gen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/net_shaper.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/net/shaper/shaper_nl_gen.h b/net/shaper/shaper_nl_gen.h
index cb7f9026fc23..ec41c90431a4 100644
--- a/net/shaper/shaper_nl_gen.h
+++ b/net/shaper/shaper_nl_gen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/net_shaper.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_NET_SHAPER_GEN_H
#define _LINUX_NET_SHAPER_GEN_H
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index e388de8dca09..f97f77b041d9 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -16,8 +16,7 @@
* based on prototype from Frank Blaschka
*/
-#define KMSG_COMPONENT "smc"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "smc: " fmt
#include <linux/module.h>
#include <linux/socket.h>
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3b44cadaed96..c627efc3698d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -733,8 +733,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
/* ---- Socket is dead now and most probably destroyed ---- */
- if (READ_ONCE(unix_tot_inflight))
- unix_gc(); /* Garbage collect fds */
+ unix_schedule_gc(NULL);
}
struct unix_peercred {
@@ -2099,8 +2098,6 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
if (err < 0)
return err;
- wait_for_unix_gc(scm.fp);
-
if (msg->msg_flags & MSG_OOB) {
err = -EOPNOTSUPP;
goto out;
@@ -2394,8 +2391,6 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
if (err < 0)
return err;
- wait_for_unix_gc(scm.fp);
-
if (msg->msg_flags & MSG_OOB) {
err = -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
@@ -2943,6 +2938,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
u = unix_sk(sk);
+redo:
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
@@ -2954,7 +2950,6 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
struct sk_buff *skb, *last;
int chunk;
-redo:
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
@@ -3004,7 +2999,6 @@ again:
goto out;
}
- mutex_lock(&u->iolock);
goto redo;
unlock:
unix_state_unlock(sk);
diff --git a/net/unix/af_unix.h b/net/unix/af_unix.h
index 59db179df9bb..c4f1b2da363d 100644
--- a/net/unix/af_unix.h
+++ b/net/unix/af_unix.h
@@ -24,14 +24,12 @@ struct unix_skb_parms {
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
/* GC for SCM_RIGHTS */
-extern unsigned int unix_tot_inflight;
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver);
void unix_del_edges(struct scm_fp_list *fpl);
void unix_update_edges(struct unix_sock *receiver);
int unix_prepare_fpl(struct scm_fp_list *fpl);
void unix_destroy_fpl(struct scm_fp_list *fpl);
-void unix_gc(void);
-void wait_for_unix_gc(struct scm_fp_list *fpl);
+void unix_schedule_gc(struct user_struct *user);
/* SOCK_DIAG */
long unix_inq_len(struct sock *sk);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 65396a4e1b07..78323d43e63e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -121,8 +121,13 @@ static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
return edge->successor->vertex;
}
-static bool unix_graph_maybe_cyclic;
-static bool unix_graph_grouped;
+enum {
+ UNIX_GRAPH_NOT_CYCLIC,
+ UNIX_GRAPH_MAYBE_CYCLIC,
+ UNIX_GRAPH_CYCLIC,
+};
+
+static unsigned char unix_graph_state;
static void unix_update_graph(struct unix_vertex *vertex)
{
@@ -132,8 +137,7 @@ static void unix_update_graph(struct unix_vertex *vertex)
if (!vertex)
return;
- unix_graph_maybe_cyclic = true;
- unix_graph_grouped = false;
+ WRITE_ONCE(unix_graph_state, UNIX_GRAPH_MAYBE_CYCLIC);
}
static LIST_HEAD(unix_unvisited_vertices);
@@ -196,7 +200,6 @@ static void unix_free_vertices(struct scm_fp_list *fpl)
}
static DEFINE_SPINLOCK(unix_gc_lock);
-unsigned int unix_tot_inflight;
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{
@@ -222,7 +225,6 @@ void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
} while (i < fpl->count_unix);
receiver->scm_stat.nr_unix_fds += fpl->count_unix;
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
out:
WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
@@ -253,7 +255,6 @@ void unix_del_edges(struct scm_fp_list *fpl)
receiver = fpl->edges[0].successor;
receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
}
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
out:
WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
@@ -299,6 +300,8 @@ int unix_prepare_fpl(struct scm_fp_list *fpl)
if (!fpl->edges)
goto err;
+ unix_schedule_gc(fpl->user);
+
return 0;
err:
@@ -404,9 +407,11 @@ static bool unix_scc_cyclic(struct list_head *scc)
static LIST_HEAD(unix_visited_vertices);
static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
-static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
- struct sk_buff_head *hitlist)
+static unsigned long __unix_walk_scc(struct unix_vertex *vertex,
+ unsigned long *last_index,
+ struct sk_buff_head *hitlist)
{
+ unsigned long cyclic_sccs = 0;
LIST_HEAD(vertex_stack);
struct unix_edge *edge;
LIST_HEAD(edge_stack);
@@ -497,8 +502,8 @@ prev_vertex:
if (unix_vertex_max_scc_index < vertex->scc_index)
unix_vertex_max_scc_index = vertex->scc_index;
- if (!unix_graph_maybe_cyclic)
- unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+ if (unix_scc_cyclic(&scc))
+ cyclic_sccs++;
}
list_del(&scc);
@@ -507,13 +512,17 @@ prev_vertex:
/* Need backtracking ? */
if (!list_empty(&edge_stack))
goto prev_vertex;
+
+ return cyclic_sccs;
}
+static unsigned long unix_graph_cyclic_sccs;
+
static void unix_walk_scc(struct sk_buff_head *hitlist)
{
unsigned long last_index = UNIX_VERTEX_INDEX_START;
+ unsigned long cyclic_sccs = 0;
- unix_graph_maybe_cyclic = false;
unix_vertex_max_scc_index = UNIX_VERTEX_INDEX_START;
/* Visit every vertex exactly once.
@@ -523,18 +532,20 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
struct unix_vertex *vertex;
vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
- __unix_walk_scc(vertex, &last_index, hitlist);
+ cyclic_sccs += __unix_walk_scc(vertex, &last_index, hitlist);
}
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
- unix_graph_grouped = true;
+ WRITE_ONCE(unix_graph_cyclic_sccs, cyclic_sccs);
+ WRITE_ONCE(unix_graph_state,
+ cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC);
}
static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
{
- unix_graph_maybe_cyclic = false;
+ unsigned long cyclic_sccs = unix_graph_cyclic_sccs;
while (!list_empty(&unix_unvisited_vertices)) {
struct unix_vertex *vertex;
@@ -551,34 +562,38 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
scc_dead = unix_vertex_dead(vertex);
}
- if (scc_dead)
+ if (scc_dead) {
+ cyclic_sccs--;
unix_collect_skb(&scc, hitlist);
- else if (!unix_graph_maybe_cyclic)
- unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+ }
list_del(&scc);
}
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+
+ WRITE_ONCE(unix_graph_cyclic_sccs, cyclic_sccs);
+ WRITE_ONCE(unix_graph_state,
+ cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC);
}
static bool gc_in_progress;
-static void __unix_gc(struct work_struct *work)
+static void unix_gc(struct work_struct *work)
{
struct sk_buff_head hitlist;
struct sk_buff *skb;
spin_lock(&unix_gc_lock);
- if (!unix_graph_maybe_cyclic) {
+ if (unix_graph_state == UNIX_GRAPH_NOT_CYCLIC) {
spin_unlock(&unix_gc_lock);
goto skip_gc;
}
__skb_queue_head_init(&hitlist);
- if (unix_graph_grouped)
+ if (unix_graph_state == UNIX_GRAPH_CYCLIC)
unix_walk_scc_fast(&hitlist);
else
unix_walk_scc(&hitlist);
@@ -595,36 +610,27 @@ skip_gc:
WRITE_ONCE(gc_in_progress, false);
}
-static DECLARE_WORK(unix_gc_work, __unix_gc);
+static DECLARE_WORK(unix_gc_work, unix_gc);
-void unix_gc(void)
-{
- WRITE_ONCE(gc_in_progress, true);
- queue_work(system_dfl_wq, &unix_gc_work);
-}
+#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
-#define UNIX_INFLIGHT_TRIGGER_GC 16000
-#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
-
-void wait_for_unix_gc(struct scm_fp_list *fpl)
+void unix_schedule_gc(struct user_struct *user)
{
- /* If number of inflight sockets is insane,
- * force a garbage collect right now.
- *
- * Paired with the WRITE_ONCE() in unix_inflight(),
- * unix_notinflight(), and __unix_gc().
- */
- if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
- !READ_ONCE(gc_in_progress))
- unix_gc();
+ if (READ_ONCE(unix_graph_state) == UNIX_GRAPH_NOT_CYCLIC)
+ return;
/* Penalise users who want to send AF_UNIX sockets
* but whose sockets have not been received yet.
*/
- if (!fpl || !fpl->count_unix ||
- READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+ if (user &&
+ READ_ONCE(user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
return;
- if (READ_ONCE(gc_in_progress))
+ if (!READ_ONCE(gc_in_progress)) {
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_dfl_wq, &unix_gc_work);
+ }
+
+ if (user && READ_ONCE(unix_graph_cyclic_sccs))
flush_work(&unix_gc_work);
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 72bb6b7ed386..adcba1b7bf74 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1661,18 +1661,40 @@ static int vsock_connect(struct socket *sock, struct sockaddr_unsized *addr,
timeout = schedule_timeout(timeout);
lock_sock(sk);
- if (signal_pending(current)) {
- err = sock_intr_errno(timeout);
- sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
- sock->state = SS_UNCONNECTED;
- vsock_transport_cancel_pkt(vsk);
- vsock_remove_connected(vsk);
- goto out_wait;
- } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
- err = -ETIMEDOUT;
+ /* Connection established. Whatever happens to socket once we
+ * release it, that's not connect()'s concern. No need to go
+ * into signal and timeout handling. Call it a day.
+ *
+ * Note that allowing to "reset" an already established socket
+ * here is racy and insecure.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED)
+ break;
+
+ /* If connection was _not_ established and a signal/timeout came
+ * to be, we want the socket's state reset. User space may want
+ * to retry.
+ *
+ * sk_state != TCP_ESTABLISHED implies that socket is not on
+ * vsock_connected_table. We keep the binding and the transport
+ * assigned.
+ */
+ if (signal_pending(current) || timeout == 0) {
+ err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout);
+
+ /* Listener might have already responded with
+ * VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our
+ * sk_state == TCP_SYN_SENT, which hereby we break.
+ * In such case VIRTIO_VSOCK_OP_RST will follow.
+ */
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
+
+ /* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by
+ * transport->connect().
+ */
vsock_transport_cancel_pkt(vsk);
+
goto out_wait;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index bcfd400e9cf8..f093c3453f64 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -36,20 +36,13 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET 32
-struct xsk_addr_node {
- u64 addr;
- struct list_head addr_node;
-};
-
-struct xsk_addr_head {
+struct xsk_addrs {
u32 num_descs;
- struct list_head addrs_list;
+ u64 addrs[MAX_SKB_FRAGS + 1];
};
static struct kmem_cache *xsk_tx_generic_cache;
-#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
-
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -557,29 +550,68 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
return ret;
}
+static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
+{
+ return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
+{
+ return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
+static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
+}
+
+static void xsk_inc_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (!xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ xsk_addr->num_descs++;
+ }
+}
+
+static u32 xsk_get_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb))
+ return 1;
+
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+
+ return xsk_addr->num_descs;
+}
+
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
struct sk_buff *skb)
{
- struct xsk_addr_node *pos, *tmp;
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addrs *xsk_addr;
u32 descs_processed = 0;
unsigned long flags;
- u32 idx;
+ u32 idx, i;
spin_lock_irqsave(&pool->cq_prod_lock, flags);
idx = xskq_get_prod(pool->cq);
- xskq_prod_write_addr(pool->cq, idx,
- (u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
- descs_processed++;
+ if (unlikely(num_descs > 1)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
- if (unlikely(XSKCB(skb)->num_descs > 1)) {
- list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ for (i = 0; i < num_descs; i++) {
xskq_prod_write_addr(pool->cq, idx + descs_processed,
- pos->addr);
+ xsk_addr->addrs[i]);
descs_processed++;
- list_del(&pos->addr_node);
- kmem_cache_free(xsk_tx_generic_cache, pos);
}
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
+ } else {
+ xskq_prod_write_addr(pool->cq, idx,
+ xsk_skb_destructor_get_addr(skb));
+ descs_processed++;
}
xskq_prod_submit_n(pool->cq, descs_processed);
spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
@@ -592,16 +624,6 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
spin_unlock(&pool->cq_cached_prod_lock);
}
-static void xsk_inc_num_desc(struct sk_buff *skb)
-{
- XSKCB(skb)->num_descs++;
-}
-
-static u32 xsk_get_num_desc(struct sk_buff *skb)
-{
- return XSKCB(skb)->num_descs;
-}
-
INDIRECT_CALLABLE_SCOPE
void xsk_destruct_skb(struct sk_buff *skb)
{
@@ -619,27 +641,22 @@ void xsk_destruct_skb(struct sk_buff *skb)
static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
u64 addr)
{
- BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
- INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
skb->dev = xs->dev;
skb->priority = READ_ONCE(xs->sk.sk_priority);
skb->mark = READ_ONCE(xs->sk.sk_mark);
- XSKCB(skb)->num_descs = 0;
skb->destructor = xsk_destruct_skb;
- skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
+ xsk_skb_destructor_set_addr(skb, addr);
}
static void xsk_consume_skb(struct sk_buff *skb)
{
struct xdp_sock *xs = xdp_sk(skb->sk);
u32 num_descs = xsk_get_num_desc(skb);
- struct xsk_addr_node *pos, *tmp;
+ struct xsk_addrs *xsk_addr;
if (unlikely(num_descs > 1)) {
- list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
- list_del(&pos->addr_node);
- kmem_cache_free(xsk_tx_generic_cache, pos);
- }
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
}
skb->destructor = sock_wfree;
@@ -699,7 +716,6 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
- struct xsk_addr_node *xsk_addr;
struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
@@ -725,16 +741,26 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return ERR_PTR(err);
}
} else {
- xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
- if (!xsk_addr)
- return ERR_PTR(-ENOMEM);
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr)
+ return ERR_PTR(-ENOMEM);
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
/* in case of -EOVERFLOW that could happen below,
* xsk_consume_skb() will release this node as whole skb
* would be dropped, which implies freeing all list elements
*/
- xsk_addr->addr = desc->addr;
- list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
len = desc->len;
@@ -811,10 +837,25 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
}
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
- struct xsk_addr_node *xsk_addr;
+ struct xsk_addrs *xsk_addr;
struct page *page;
u8 *vaddr;
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr) {
+ err = -ENOMEM;
+ goto free_err;
+ }
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
+
if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
err = -EOVERFLOW;
goto free_err;
@@ -826,13 +867,6 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
goto free_err;
}
- xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
- if (!xsk_addr) {
- __free_page(page);
- err = -ENOMEM;
- goto free_err;
- }
-
vaddr = kmap_local_page(page);
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
@@ -840,8 +874,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
- xsk_addr->addr = desc->addr;
- list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
}
@@ -1902,7 +1935,7 @@ static int __init xsk_init(void)
goto out_pernet;
xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
- sizeof(struct xsk_addr_node),
+ sizeof(struct xsk_addrs),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!xsk_tx_generic_cache) {
err = -ENOMEM;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index f0157702718f..4a62817a88f8 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -110,14 +110,17 @@ config XFRM_IPCOMP
select CRYPTO_DEFLATE
config NET_KEY
- tristate "PF_KEY sockets"
+ tristate "PF_KEY sockets (deprecated)"
select XFRM_ALGO
help
PF_KEYv2 socket family, compatible to KAME ones.
- They are required if you are going to use IPsec tools ported
- from KAME.
- Say Y unless you know what you are doing.
+ The PF_KEYv2 socket interface is deprecated and
+ scheduled for removal. All maintained IKE daemons
+ no longer need PF_KEY sockets. Please use the netlink
+ interface (XFRM_USER) to configure IPsec.
+
+ If unsure, say N.
config NET_KEY_MIGRATE
bool "PF_KEY MIGRATE"
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 44b9de6e4e77..52ae0e034d29 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -438,7 +438,7 @@ ok:
check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
x->props.mode == XFRM_MODE_TUNNEL;
- switch (x->inner_mode.family) {
+ switch (skb_dst(skb)->ops->family) {
case AF_INET:
/* Check for IPv4 options */
if (ip_hdr(skb)->ihl != 5)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index c9ddef869aa5..4ed346e682c7 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -505,6 +505,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
async = 1;
dev_put(skb->dev);
seq = XFRM_SKB_CB(skb)->seq.input.low;
+ spin_lock(&x->lock);
goto resume;
}
/* GRO call */
@@ -541,9 +542,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
+
+ nexthdr = x->type_offload->input_tail(x, skb);
}
- goto lock;
+ goto process;
}
family = XFRM_SPI_SKB_CB(skb)->family;
@@ -611,7 +614,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop;
}
-lock:
+process:
+ seq_hi = htonl(xfrm_replay_seqhi(x, seq));
+
+ XFRM_SKB_CB(skb)->seq.input.low = seq;
+ XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
+
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
@@ -638,21 +646,13 @@ lock:
goto drop_unlock;
}
- spin_unlock(&x->lock);
-
if (xfrm_tunnel_check(skb, x, family)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
- goto drop;
+ goto drop_unlock;
}
- seq_hi = htonl(xfrm_replay_seqhi(x, seq));
-
- XFRM_SKB_CB(skb)->seq.input.low = seq;
- XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
-
- if (crypto_done) {
- nexthdr = x->type_offload->input_tail(x, skb);
- } else {
+ if (!crypto_done) {
+ spin_unlock(&x->lock);
dev_hold(skb->dev);
nexthdr = x->type->input(x, skb);
@@ -660,9 +660,9 @@ lock:
return 0;
dev_put(skb->dev);
+ spin_lock(&x->lock);
}
resume:
- spin_lock(&x->lock);
if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {
xfrm_audit_state_icvfail(x, skb,
@@ -676,7 +676,7 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (xfrm_replay_recheck(x, skb, seq)) {
+ if (!crypto_done && xfrm_replay_recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 9077730ff7d0..54222fcbd7fd 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -698,7 +698,7 @@ static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
return;
if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
- switch (x->outer_mode.family) {
+ switch (skb_dst(skb)->ops->family) {
case AF_INET:
xo->inner_ipproto = ip_hdr(skb)->protocol;
break;
@@ -772,8 +772,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
/* Exclusive direct xmit for tunnel mode, as
* some filtering or matching rules may apply
* in transport mode.
+ * Locally generated packets also require
+ * the normal XFRM path for L2 header setup,
+ * as the hardware needs the L2 header to match
+ * for encryption, so skip direct output as well.
*/
- if (x->props.mode == XFRM_MODE_TUNNEL)
+ if (x->props.mode == XFRM_MODE_TUNNEL && !skb->sk)
return xfrm_dev_direct_output(sk, x, skb);
return xfrm_output_resume(sk, skb, 0);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d213ca3653a8..9e14e453b55c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -592,6 +592,7 @@ void xfrm_state_free(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_free);
+static void xfrm_state_delete_tunnel(struct xfrm_state *x);
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
if (x->mode_cbs && x->mode_cbs->destroy_state)
@@ -607,6 +608,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
kfree(x->replay_esn);
kfree(x->preplay_esn);
xfrm_unset_type_offload(x);
+ xfrm_state_delete_tunnel(x);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
@@ -806,7 +808,6 @@ void __xfrm_state_destroy(struct xfrm_state *x)
}
EXPORT_SYMBOL(__xfrm_state_destroy);
-static void xfrm_state_delete_tunnel(struct xfrm_state *x);
int __xfrm_state_delete(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -2073,6 +2074,7 @@ static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig,
return x;
error:
+ x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
out:
return NULL;
@@ -2157,11 +2159,15 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
xfrm_state_insert(xc);
} else {
if (xfrm_state_add(xc) < 0)
- goto error;
+ goto error_add;
}
return xc;
+error_add:
+ if (xuo)
+ xfrm_dev_state_delete(xc);
error:
+ xc->km.state = XFRM_STATE_DEAD;
xfrm_state_put(xc);
return NULL;
}
@@ -2191,14 +2197,18 @@ int xfrm_state_update(struct xfrm_state *x)
}
if (x1->km.state == XFRM_STATE_ACQ) {
- if (x->dir && x1->dir != x->dir)
+ if (x->dir && x1->dir != x->dir) {
+ to_put = x1;
goto out;
+ }
__xfrm_state_insert(x);
x = NULL;
} else {
- if (x1->dir != x->dir)
+ if (x1->dir != x->dir) {
+ to_put = x1;
goto out;
+ }
}
err = 0;
@@ -3298,6 +3308,7 @@ out_bydst:
void xfrm_state_fini(struct net *net)
{
unsigned int sz;
+ int i;
flush_work(&net->xfrm.state_hash_work);
xfrm_state_flush(net, 0, false);
@@ -3305,14 +3316,17 @@ void xfrm_state_fini(struct net *net)
WARN_ON(!list_empty(&net->xfrm.state_all));
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ WARN_ON(!hlist_empty(net->xfrm.state_byseq + i));
+ WARN_ON(!hlist_empty(net->xfrm.state_byspi + i));
+ WARN_ON(!hlist_empty(net->xfrm.state_bysrc + i));
+ WARN_ON(!hlist_empty(net->xfrm.state_bydst + i));
+ }
+
sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
- WARN_ON(!hlist_empty(net->xfrm.state_byseq));
xfrm_hash_free(net->xfrm.state_byseq, sz);
- WARN_ON(!hlist_empty(net->xfrm.state_byspi));
xfrm_hash_free(net->xfrm.state_byspi, sz);
- WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
xfrm_hash_free(net->xfrm.state_bysrc, sz);
- WARN_ON(!hlist_empty(net->xfrm.state_bydst));
xfrm_hash_free(net->xfrm.state_bydst, sz);
free_percpu(net->xfrm.state_cache_input);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 010c9e6638c0..403b5ecac2c5 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -947,8 +947,11 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (attrs[XFRMA_SA_PCPU]) {
x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
- if (x->pcpu_num >= num_possible_cpus())
+ if (x->pcpu_num >= num_possible_cpus()) {
+ err = -ERANGE;
+ NL_SET_ERR_MSG(extack, "pCPU number too big");
goto error;
+ }
}
err = __xfrm_init_state(x, extack);
@@ -3035,6 +3038,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
}
xfrm_state_free(x);
+ xfrm_dev_policy_delete(xp);
+ xfrm_dev_policy_free(xp);
+ security_xfrm_policy_free(xp->security);
kfree(xp);
return 0;
diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c
index 49c7a46cee07..424a6fa15723 100644
--- a/samples/vfs/test-statx.c
+++ b/samples/vfs/test-statx.c
@@ -19,6 +19,12 @@
#include <time.h>
#include <sys/syscall.h>
#include <sys/types.h>
+
+// Work around glibc header silliness
+#undef AT_RENAME_NOREPLACE
+#undef AT_RENAME_EXCHANGE
+#undef AT_RENAME_WHITEOUT
+
#include <linux/stat.h>
#include <linux/fcntl.h>
#define statx foo
diff --git a/samples/watch_queue/watch_test.c b/samples/watch_queue/watch_test.c
index 8c6cb57d5cfc..24cf7d7a1972 100644
--- a/samples/watch_queue/watch_test.c
+++ b/samples/watch_queue/watch_test.c
@@ -16,6 +16,12 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <limits.h>
+
+// Work around glibc header silliness
+#undef AT_RENAME_NOREPLACE
+#undef AT_RENAME_EXCHANGE
+#undef AT_RENAME_WHITEOUT
+
#include <linux/watch_queue.h>
#include <linux/unistd.h>
#include <linux/keyctl.h>
diff --git a/scripts/gendwarfksyms/gendwarfksyms.c b/scripts/gendwarfksyms/gendwarfksyms.c
index 08ae61eb327e..f5203d1640ee 100644
--- a/scripts/gendwarfksyms/gendwarfksyms.c
+++ b/scripts/gendwarfksyms/gendwarfksyms.c
@@ -138,7 +138,8 @@ int main(int argc, char **argv)
error("no input files?");
}
- symbol_read_exports(stdin);
+ if (!symbol_read_exports(stdin))
+ return 0;
if (symtypes_file) {
symfile = fopen(symtypes_file, "w");
diff --git a/scripts/gendwarfksyms/gendwarfksyms.h b/scripts/gendwarfksyms/gendwarfksyms.h
index d9c06d2cb1df..32cec8f7695a 100644
--- a/scripts/gendwarfksyms/gendwarfksyms.h
+++ b/scripts/gendwarfksyms/gendwarfksyms.h
@@ -123,7 +123,7 @@ struct symbol {
typedef void (*symbol_callback_t)(struct symbol *, void *arg);
bool is_symbol_ptr(const char *name);
-void symbol_read_exports(FILE *file);
+int symbol_read_exports(FILE *file);
void symbol_read_symtab(int fd);
struct symbol *symbol_get(const char *name);
void symbol_set_ptr(struct symbol *sym, Dwarf_Die *ptr);
diff --git a/scripts/gendwarfksyms/symbols.c b/scripts/gendwarfksyms/symbols.c
index 35ed594f0749..ecddcb5ffcdf 100644
--- a/scripts/gendwarfksyms/symbols.c
+++ b/scripts/gendwarfksyms/symbols.c
@@ -128,7 +128,7 @@ static bool is_exported(const char *name)
return for_each(name, NULL, NULL) > 0;
}
-void symbol_read_exports(FILE *file)
+int symbol_read_exports(FILE *file)
{
struct symbol *sym;
char *line = NULL;
@@ -159,6 +159,8 @@ void symbol_read_exports(FILE *file)
free(line);
debug("%d exported symbols", nsym);
+
+ return nsym;
}
static void get_symbol(struct symbol *sym, void *arg)
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 0bade2c5aa1d..d9c12b993fa7 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1335,11 +1335,10 @@ static void hook_sb_delete(struct super_block *const sb)
* At this point, we own the ihold() reference that was
* originally set up by get_inode_object() and the
* __iget() reference that we just set in this loop
- * walk. Therefore the following call to iput() will
- * not sleep nor drop the inode because there is now at
- * least two references to it.
+ * walk. Therefore there are at least two references
+ * on the inode.
*/
- iput(inode);
+ iput_not_last(inode);
} else {
spin_unlock(&object->lock);
rcu_read_unlock();
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index dfc22da42f30..e713291db873 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -210,12 +210,12 @@ static int selinux_lsm_notifier_avc_callback(u32 event)
*/
static void cred_init_security(void)
{
- struct task_security_struct *tsec;
+ struct cred_security_struct *crsec;
/* NOTE: the lsm framework zeros out the buffer on allocation */
- tsec = selinux_cred(unrcu_pointer(current->real_cred));
- tsec->osid = tsec->sid = tsec->avdcache.sid = SECINITSID_KERNEL;
+ crsec = selinux_cred(unrcu_pointer(current->real_cred));
+ crsec->osid = crsec->sid = SECINITSID_KERNEL;
}
/*
@@ -223,10 +223,10 @@ static void cred_init_security(void)
*/
static inline u32 cred_sid(const struct cred *cred)
{
- const struct task_security_struct *tsec;
+ const struct cred_security_struct *crsec;
- tsec = selinux_cred(cred);
- return tsec->sid;
+ crsec = selinux_cred(cred);
+ return crsec->sid;
}
static void __ad_net_init(struct common_audit_data *ad,
@@ -437,15 +437,15 @@ static int may_context_mount_sb_relabel(u32 sid,
struct superblock_security_struct *sbsec,
const struct cred *cred)
{
- const struct task_security_struct *tsec = selinux_cred(cred);
+ const struct cred_security_struct *crsec = selinux_cred(cred);
int rc;
- rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(crsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELFROM, NULL);
if (rc)
return rc;
- rc = avc_has_perm(tsec->sid, sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(crsec->sid, sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELTO, NULL);
return rc;
}
@@ -454,9 +454,9 @@ static int may_context_mount_inode_relabel(u32 sid,
struct superblock_security_struct *sbsec,
const struct cred *cred)
{
- const struct task_security_struct *tsec = selinux_cred(cred);
+ const struct cred_security_struct *crsec = selinux_cred(cred);
int rc;
- rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(crsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELFROM, NULL);
if (rc)
return rc;
@@ -1788,7 +1788,7 @@ out:
* Determine the label for an inode that might be unioned.
*/
static int
-selinux_determine_inode_label(const struct task_security_struct *tsec,
+selinux_determine_inode_label(const struct cred_security_struct *crsec,
struct inode *dir,
const struct qstr *name, u16 tclass,
u32 *_new_isid)
@@ -1800,11 +1800,11 @@ selinux_determine_inode_label(const struct task_security_struct *tsec,
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) {
*_new_isid = sbsec->mntpoint_sid;
} else if ((sbsec->flags & SBLABEL_MNT) &&
- tsec->create_sid) {
- *_new_isid = tsec->create_sid;
+ crsec->create_sid) {
+ *_new_isid = crsec->create_sid;
} else {
const struct inode_security_struct *dsec = inode_security(dir);
- return security_transition_sid(tsec->sid,
+ return security_transition_sid(crsec->sid,
dsec->sid, tclass,
name, _new_isid);
}
@@ -1817,7 +1817,7 @@ static int may_create(struct inode *dir,
struct dentry *dentry,
u16 tclass)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid;
@@ -1827,7 +1827,7 @@ static int may_create(struct inode *dir,
dsec = inode_security(dir);
sbsec = selinux_superblock(dir->i_sb);
- sid = tsec->sid;
+ sid = crsec->sid;
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
@@ -1838,7 +1838,7 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- rc = selinux_determine_inode_label(tsec, dir, &dentry->d_name, tclass,
+ rc = selinux_determine_inode_label(crsec, dir, &dentry->d_name, tclass,
&newsid);
if (rc)
return rc;
@@ -2251,8 +2251,8 @@ static u32 ptrace_parent_sid(void)
}
static int check_nnp_nosuid(const struct linux_binprm *bprm,
- const struct task_security_struct *old_tsec,
- const struct task_security_struct *new_tsec)
+ const struct cred_security_struct *old_crsec,
+ const struct cred_security_struct *new_crsec)
{
int nnp = (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS);
int nosuid = !mnt_may_suid(bprm->file->f_path.mnt);
@@ -2262,7 +2262,7 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
if (!nnp && !nosuid)
return 0; /* neither NNP nor nosuid */
- if (new_tsec->sid == old_tsec->sid)
+ if (new_crsec->sid == old_crsec->sid)
return 0; /* No change in credentials */
/*
@@ -2277,7 +2277,7 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
av |= PROCESS2__NNP_TRANSITION;
if (nosuid)
av |= PROCESS2__NOSUID_TRANSITION;
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(old_crsec->sid, new_crsec->sid,
SECCLASS_PROCESS2, av, NULL);
if (!rc)
return 0;
@@ -2288,8 +2288,8 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
* i.e. SIDs that are guaranteed to only be allowed a subset
* of the permissions of the current SID.
*/
- rc = security_bounded_transition(old_tsec->sid,
- new_tsec->sid);
+ rc = security_bounded_transition(old_crsec->sid,
+ new_crsec->sid);
if (!rc)
return 0;
@@ -2305,8 +2305,8 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
{
- const struct task_security_struct *old_tsec;
- struct task_security_struct *new_tsec;
+ const struct cred_security_struct *old_crsec;
+ struct cred_security_struct *new_crsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
struct inode *inode = file_inode(bprm->file);
@@ -2315,18 +2315,18 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
/* SELinux context only depends on initial program or script and not
* the script interpreter */
- old_tsec = selinux_cred(current_cred());
- new_tsec = selinux_cred(bprm->cred);
+ old_crsec = selinux_cred(current_cred());
+ new_crsec = selinux_cred(bprm->cred);
isec = inode_security(inode);
/* Default to the current task SID. */
- new_tsec->sid = old_tsec->sid;
- new_tsec->osid = old_tsec->sid;
+ new_crsec->sid = old_crsec->sid;
+ new_crsec->osid = old_crsec->sid;
/* Reset fs, key, and sock SIDs on execve. */
- new_tsec->create_sid = 0;
- new_tsec->keycreate_sid = 0;
- new_tsec->sockcreate_sid = 0;
+ new_crsec->create_sid = 0;
+ new_crsec->keycreate_sid = 0;
+ new_crsec->sockcreate_sid = 0;
/*
* Before policy is loaded, label any task outside kernel space
@@ -2335,26 +2335,26 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
* (if the policy chooses to set SECINITSID_INIT != SECINITSID_KERNEL).
*/
if (!selinux_initialized()) {
- new_tsec->sid = SECINITSID_INIT;
+ new_crsec->sid = SECINITSID_INIT;
/* also clear the exec_sid just in case */
- new_tsec->exec_sid = 0;
+ new_crsec->exec_sid = 0;
return 0;
}
- if (old_tsec->exec_sid) {
- new_tsec->sid = old_tsec->exec_sid;
+ if (old_crsec->exec_sid) {
+ new_crsec->sid = old_crsec->exec_sid;
/* Reset exec SID on execve. */
- new_tsec->exec_sid = 0;
+ new_crsec->exec_sid = 0;
/* Fail on NNP or nosuid if not an allowed transition. */
- rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+ rc = check_nnp_nosuid(bprm, old_crsec, new_crsec);
if (rc)
return rc;
} else {
/* Check for a default transition on this program. */
- rc = security_transition_sid(old_tsec->sid,
+ rc = security_transition_sid(old_crsec->sid,
isec->sid, SECCLASS_PROCESS, NULL,
- &new_tsec->sid);
+ &new_crsec->sid);
if (rc)
return rc;
@@ -2362,34 +2362,34 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
* Fallback to old SID on NNP or nosuid if not an allowed
* transition.
*/
- rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+ rc = check_nnp_nosuid(bprm, old_crsec, new_crsec);
if (rc)
- new_tsec->sid = old_tsec->sid;
+ new_crsec->sid = old_crsec->sid;
}
ad.type = LSM_AUDIT_DATA_FILE;
ad.u.file = bprm->file;
- if (new_tsec->sid == old_tsec->sid) {
- rc = avc_has_perm(old_tsec->sid, isec->sid,
+ if (new_crsec->sid == old_crsec->sid) {
+ rc = avc_has_perm(old_crsec->sid, isec->sid,
SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
if (rc)
return rc;
} else {
/* Check permissions for the transition. */
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(old_crsec->sid, new_crsec->sid,
SECCLASS_PROCESS, PROCESS__TRANSITION, &ad);
if (rc)
return rc;
- rc = avc_has_perm(new_tsec->sid, isec->sid,
+ rc = avc_has_perm(new_crsec->sid, isec->sid,
SECCLASS_FILE, FILE__ENTRYPOINT, &ad);
if (rc)
return rc;
/* Check for shared state */
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(old_crsec->sid, new_crsec->sid,
SECCLASS_PROCESS, PROCESS__SHARE,
NULL);
if (rc)
@@ -2401,7 +2401,7 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
if (bprm->unsafe & LSM_UNSAFE_PTRACE) {
u32 ptsid = ptrace_parent_sid();
if (ptsid != 0) {
- rc = avc_has_perm(ptsid, new_tsec->sid,
+ rc = avc_has_perm(ptsid, new_crsec->sid,
SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (rc)
@@ -2415,7 +2415,7 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
/* Enable secure mode for SIDs transitions unless
the noatsecure permission is granted between
the two SIDs, i.e. ahp returns 0. */
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(old_crsec->sid, new_crsec->sid,
SECCLASS_PROCESS, PROCESS__NOATSECURE,
NULL);
bprm->secureexec |= !!rc;
@@ -2483,12 +2483,12 @@ static inline void flush_unauthorized_files(const struct cred *cred,
*/
static void selinux_bprm_committing_creds(const struct linux_binprm *bprm)
{
- struct task_security_struct *new_tsec;
+ struct cred_security_struct *new_crsec;
struct rlimit *rlim, *initrlim;
int rc, i;
- new_tsec = selinux_cred(bprm->cred);
- if (new_tsec->sid == new_tsec->osid)
+ new_crsec = selinux_cred(bprm->cred);
+ if (new_crsec->sid == new_crsec->osid)
return;
/* Close files for which the new task SID is not authorized. */
@@ -2507,7 +2507,7 @@ static void selinux_bprm_committing_creds(const struct linux_binprm *bprm)
* higher than the default soft limit for cases where the default is
* lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK.
*/
- rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
+ rc = avc_has_perm(new_crsec->osid, new_crsec->sid, SECCLASS_PROCESS,
PROCESS__RLIMITINH, NULL);
if (rc) {
/* protect against do_prlimit() */
@@ -2529,12 +2529,12 @@ static void selinux_bprm_committing_creds(const struct linux_binprm *bprm)
*/
static void selinux_bprm_committed_creds(const struct linux_binprm *bprm)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
u32 osid, sid;
int rc;
- osid = tsec->osid;
- sid = tsec->sid;
+ osid = crsec->osid;
+ sid = crsec->sid;
if (sid == osid)
return;
@@ -2911,7 +2911,7 @@ static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
{
u32 newsid;
int rc;
- struct task_security_struct *tsec;
+ struct cred_security_struct *crsec;
rc = selinux_determine_inode_label(selinux_cred(old),
d_inode(dentry->d_parent), name,
@@ -2920,8 +2920,8 @@ static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
if (rc)
return rc;
- tsec = selinux_cred(new);
- tsec->create_sid = newsid;
+ crsec = selinux_cred(new);
+ crsec->create_sid = newsid;
return 0;
}
@@ -2929,7 +2929,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
struct xattr *xattrs, int *xattr_count)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
struct superblock_security_struct *sbsec;
struct xattr *xattr = lsm_get_xattr_slot(xattrs, xattr_count);
u32 newsid, clen;
@@ -2939,9 +2939,9 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
sbsec = selinux_superblock(dir->i_sb);
- newsid = tsec->create_sid;
+ newsid = crsec->create_sid;
newsclass = inode_mode_to_security_class(inode->i_mode);
- rc = selinux_determine_inode_label(tsec, dir, qstr, newsclass, &newsid);
+ rc = selinux_determine_inode_label(crsec, dir, qstr, newsclass, &newsid);
if (rc)
return rc;
@@ -3113,7 +3113,7 @@ static noinline int audit_inode_permission(struct inode *inode,
static inline void task_avdcache_reset(struct task_security_struct *tsec)
{
memset(&tsec->avdcache.dir, 0, sizeof(tsec->avdcache.dir));
- tsec->avdcache.sid = tsec->sid;
+ tsec->avdcache.sid = current_sid();
tsec->avdcache.seqno = avc_policy_seqno();
tsec->avdcache.dir_spot = TSEC_AVDC_DIR_SIZE - 1;
}
@@ -3137,7 +3137,7 @@ static inline int task_avdcache_search(struct task_security_struct *tsec,
if (isec->sclass != SECCLASS_DIR)
return -ENOENT;
- if (unlikely(tsec->sid != tsec->avdcache.sid ||
+ if (unlikely(current_sid() != tsec->avdcache.sid ||
tsec->avdcache.seqno != avc_policy_seqno())) {
task_avdcache_reset(tsec);
return -ENOENT;
@@ -3201,6 +3201,7 @@ static int selinux_inode_permission(struct inode *inode, int requested)
{
int mask;
u32 perms;
+ u32 sid = current_sid();
struct task_security_struct *tsec;
struct inode_security_struct *isec;
struct avdc_entry *avdc;
@@ -3213,8 +3214,8 @@ static int selinux_inode_permission(struct inode *inode, int requested)
if (!mask)
return 0;
- tsec = selinux_cred(current_cred());
- if (task_avdcache_permnoaudit(tsec))
+ tsec = selinux_task(current);
+ if (task_avdcache_permnoaudit(tsec, sid))
return 0;
isec = inode_security_rcu(inode, requested & MAY_NOT_BLOCK);
@@ -3234,7 +3235,7 @@ static int selinux_inode_permission(struct inode *inode, int requested)
struct av_decision avd;
/* Cache miss. */
- rc = avc_has_perm_noaudit(tsec->sid, isec->sid, isec->sclass,
+ rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass,
perms, 0, &avd);
audited = avc_audit_required(perms, &avd, rc,
(requested & MAY_ACCESS) ? FILE__AUDIT_ACCESS : 0,
@@ -3285,9 +3286,9 @@ static int selinux_inode_getattr(const struct path *path)
{
struct task_security_struct *tsec;
- tsec = selinux_cred(current_cred());
+ tsec = selinux_task(current);
- if (task_avdcache_permnoaudit(tsec))
+ if (task_avdcache_permnoaudit(tsec, current_sid()))
return 0;
return path_has_perm(current_cred(), path, FILE__GETATTR);
@@ -3659,7 +3660,7 @@ static void selinux_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop)
static int selinux_inode_copy_up(struct dentry *src, struct cred **new)
{
struct lsm_prop prop;
- struct task_security_struct *tsec;
+ struct cred_security_struct *crsec;
struct cred *new_creds = *new;
if (new_creds == NULL) {
@@ -3668,10 +3669,10 @@ static int selinux_inode_copy_up(struct dentry *src, struct cred **new)
return -ENOMEM;
}
- tsec = selinux_cred(new_creds);
+ crsec = selinux_cred(new_creds);
/* Get label from overlay inode and set it in create_sid */
selinux_inode_getlsmprop(d_inode(src), &prop);
- tsec->create_sid = prop.selinux.secid;
+ crsec->create_sid = prop.selinux.secid;
*new = new_creds;
return 0;
}
@@ -3697,7 +3698,7 @@ static int selinux_inode_copy_up_xattr(struct dentry *dentry, const char *name)
static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
u32 parent_sid, newsid, clen;
int rc;
char *context;
@@ -3725,8 +3726,8 @@ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
if (rc)
return rc;
- if (tsec->create_sid) {
- newsid = tsec->create_sid;
+ if (crsec->create_sid) {
+ newsid = crsec->create_sid;
} else {
u16 secclass = inode_mode_to_security_class(kn->mode);
const char *kn_name;
@@ -3737,7 +3738,7 @@ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
q.name = kn_name;
q.hash_len = hashlen_string(kn_dir, kn_name);
- rc = security_transition_sid(tsec->sid,
+ rc = security_transition_sid(crsec->sid,
parent_sid, secclass, &q,
&newsid);
if (rc)
@@ -4151,7 +4152,10 @@ static int selinux_task_alloc(struct task_struct *task,
u64 clone_flags)
{
u32 sid = current_sid();
+ struct task_security_struct *old_tsec = selinux_task(current);
+ struct task_security_struct *new_tsec = selinux_task(task);
+ *new_tsec = *old_tsec;
return avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__FORK, NULL);
}
@@ -4161,10 +4165,10 @@ static int selinux_task_alloc(struct task_struct *task,
static int selinux_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- const struct task_security_struct *old_tsec = selinux_cred(old);
- struct task_security_struct *tsec = selinux_cred(new);
+ const struct cred_security_struct *old_crsec = selinux_cred(old);
+ struct cred_security_struct *crsec = selinux_cred(new);
- *tsec = *old_tsec;
+ *crsec = *old_crsec;
return 0;
}
@@ -4173,10 +4177,10 @@ static int selinux_cred_prepare(struct cred *new, const struct cred *old,
*/
static void selinux_cred_transfer(struct cred *new, const struct cred *old)
{
- const struct task_security_struct *old_tsec = selinux_cred(old);
- struct task_security_struct *tsec = selinux_cred(new);
+ const struct cred_security_struct *old_crsec = selinux_cred(old);
+ struct cred_security_struct *crsec = selinux_cred(new);
- *tsec = *old_tsec;
+ *crsec = *old_crsec;
}
static void selinux_cred_getsecid(const struct cred *c, u32 *secid)
@@ -4195,7 +4199,7 @@ static void selinux_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop)
*/
static int selinux_kernel_act_as(struct cred *new, u32 secid)
{
- struct task_security_struct *tsec = selinux_cred(new);
+ struct cred_security_struct *crsec = selinux_cred(new);
u32 sid = current_sid();
int ret;
@@ -4204,10 +4208,10 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid)
KERNEL_SERVICE__USE_AS_OVERRIDE,
NULL);
if (ret == 0) {
- tsec->sid = secid;
- tsec->create_sid = 0;
- tsec->keycreate_sid = 0;
- tsec->sockcreate_sid = 0;
+ crsec->sid = secid;
+ crsec->create_sid = 0;
+ crsec->keycreate_sid = 0;
+ crsec->sockcreate_sid = 0;
}
return ret;
}
@@ -4219,7 +4223,7 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid)
static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
{
struct inode_security_struct *isec = inode_security(inode);
- struct task_security_struct *tsec = selinux_cred(new);
+ struct cred_security_struct *crsec = selinux_cred(new);
u32 sid = current_sid();
int ret;
@@ -4229,7 +4233,7 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
NULL);
if (ret == 0)
- tsec->create_sid = isec->sid;
+ crsec->create_sid = isec->sid;
return ret;
}
@@ -4744,15 +4748,15 @@ static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
/* socket security operations */
-static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+static int socket_sockcreate_sid(const struct cred_security_struct *crsec,
u16 secclass, u32 *socksid)
{
- if (tsec->sockcreate_sid > SECSID_NULL) {
- *socksid = tsec->sockcreate_sid;
+ if (crsec->sockcreate_sid > SECSID_NULL) {
+ *socksid = crsec->sockcreate_sid;
return 0;
}
- return security_transition_sid(tsec->sid, tsec->sid,
+ return security_transition_sid(crsec->sid, crsec->sid,
secclass, NULL, socksid);
}
@@ -4797,7 +4801,7 @@ static int sock_has_perm(struct sock *sk, u32 perms)
static int selinux_socket_create(int family, int type,
int protocol, int kern)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
u32 newsid;
u16 secclass;
int rc;
@@ -4806,17 +4810,17 @@ static int selinux_socket_create(int family, int type,
return 0;
secclass = socket_type_to_security_class(family, type, protocol);
- rc = socket_sockcreate_sid(tsec, secclass, &newsid);
+ rc = socket_sockcreate_sid(crsec, secclass, &newsid);
if (rc)
return rc;
- return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
+ return avc_has_perm(crsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
}
static int selinux_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock));
struct sk_security_struct *sksec;
u16 sclass = socket_type_to_security_class(family, type, protocol);
@@ -4824,7 +4828,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
int err = 0;
if (!kern) {
- err = socket_sockcreate_sid(tsec, sclass, &sid);
+ err = socket_sockcreate_sid(crsec, sclass, &sid);
if (err)
return err;
}
@@ -6526,37 +6530,37 @@ static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode)
static int selinux_lsm_getattr(unsigned int attr, struct task_struct *p,
char **value)
{
- const struct task_security_struct *tsec;
+ const struct cred_security_struct *crsec;
int error;
u32 sid;
u32 len;
rcu_read_lock();
- tsec = selinux_cred(__task_cred(p));
+ crsec = selinux_cred(__task_cred(p));
if (p != current) {
- error = avc_has_perm(current_sid(), tsec->sid,
+ error = avc_has_perm(current_sid(), crsec->sid,
SECCLASS_PROCESS, PROCESS__GETATTR, NULL);
if (error)
goto err_unlock;
}
switch (attr) {
case LSM_ATTR_CURRENT:
- sid = tsec->sid;
+ sid = crsec->sid;
break;
case LSM_ATTR_PREV:
- sid = tsec->osid;
+ sid = crsec->osid;
break;
case LSM_ATTR_EXEC:
- sid = tsec->exec_sid;
+ sid = crsec->exec_sid;
break;
case LSM_ATTR_FSCREATE:
- sid = tsec->create_sid;
+ sid = crsec->create_sid;
break;
case LSM_ATTR_KEYCREATE:
- sid = tsec->keycreate_sid;
+ sid = crsec->keycreate_sid;
break;
case LSM_ATTR_SOCKCREATE:
- sid = tsec->sockcreate_sid;
+ sid = crsec->sockcreate_sid;
break;
default:
error = -EOPNOTSUPP;
@@ -6581,7 +6585,7 @@ err_unlock:
static int selinux_lsm_setattr(u64 attr, void *value, size_t size)
{
- struct task_security_struct *tsec;
+ struct cred_security_struct *crsec;
struct cred *new;
u32 mysid = current_sid(), sid = 0, ptsid;
int error;
@@ -6667,11 +6671,11 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size)
operation. See selinux_bprm_creds_for_exec for the execve
checks and may_create for the file creation checks. The
operation will then fail if the context is not permitted. */
- tsec = selinux_cred(new);
+ crsec = selinux_cred(new);
if (attr == LSM_ATTR_EXEC) {
- tsec->exec_sid = sid;
+ crsec->exec_sid = sid;
} else if (attr == LSM_ATTR_FSCREATE) {
- tsec->create_sid = sid;
+ crsec->create_sid = sid;
} else if (attr == LSM_ATTR_KEYCREATE) {
if (sid) {
error = avc_has_perm(mysid, sid,
@@ -6679,22 +6683,22 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size)
if (error)
goto abort_change;
}
- tsec->keycreate_sid = sid;
+ crsec->keycreate_sid = sid;
} else if (attr == LSM_ATTR_SOCKCREATE) {
- tsec->sockcreate_sid = sid;
+ crsec->sockcreate_sid = sid;
} else if (attr == LSM_ATTR_CURRENT) {
error = -EINVAL;
if (sid == 0)
goto abort_change;
if (!current_is_single_threaded()) {
- error = security_bounded_transition(tsec->sid, sid);
+ error = security_bounded_transition(crsec->sid, sid);
if (error)
goto abort_change;
}
/* Check permissions for the transition. */
- error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS,
+ error = avc_has_perm(crsec->sid, sid, SECCLASS_PROCESS,
PROCESS__DYNTRANSITION, NULL);
if (error)
goto abort_change;
@@ -6709,7 +6713,7 @@ static int selinux_lsm_setattr(u64 attr, void *value, size_t size)
goto abort_change;
}
- tsec->sid = sid;
+ crsec->sid = sid;
} else {
error = -EINVAL;
goto abort_change;
@@ -6876,14 +6880,14 @@ static int selinux_inode_getsecctx(struct inode *inode, struct lsm_context *cp)
static int selinux_key_alloc(struct key *k, const struct cred *cred,
unsigned long flags)
{
- const struct task_security_struct *tsec;
+ const struct cred_security_struct *crsec;
struct key_security_struct *ksec = selinux_key(k);
- tsec = selinux_cred(cred);
- if (tsec->keycreate_sid)
- ksec->sid = tsec->keycreate_sid;
+ crsec = selinux_cred(cred);
+ if (crsec->keycreate_sid)
+ ksec->sid = crsec->keycreate_sid;
else
- ksec->sid = tsec->sid;
+ ksec->sid = crsec->sid;
return 0;
}
@@ -7137,7 +7141,8 @@ static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *att
#endif
struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = {
- .lbs_cred = sizeof(struct task_security_struct),
+ .lbs_cred = sizeof(struct cred_security_struct),
+ .lbs_task = sizeof(struct task_security_struct),
.lbs_file = sizeof(struct file_security_struct),
.lbs_inode = sizeof(struct inode_security_struct),
.lbs_ipc = sizeof(struct ipc_security_struct),
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 2d5139c6d45b..8fc3de5234ac 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -37,13 +37,16 @@ struct avdc_entry {
bool permissive; /* AVC permissive flag */
};
-struct task_security_struct {
+struct cred_security_struct {
u32 osid; /* SID prior to last execve */
u32 sid; /* current SID */
u32 exec_sid; /* exec SID */
u32 create_sid; /* fscreate SID */
u32 keycreate_sid; /* keycreate SID */
u32 sockcreate_sid; /* fscreate SID */
+} __randomize_layout;
+
+struct task_security_struct {
#define TSEC_AVDC_DIR_SIZE (1 << 2)
struct {
u32 sid; /* current SID for cached entries */
@@ -54,10 +57,11 @@ struct task_security_struct {
} avdcache;
} __randomize_layout;
-static inline bool task_avdcache_permnoaudit(struct task_security_struct *tsec)
+static inline bool task_avdcache_permnoaudit(struct task_security_struct *tsec,
+ u32 sid)
{
return (tsec->avdcache.permissive_neveraudit &&
- tsec->sid == tsec->avdcache.sid &&
+ sid == tsec->avdcache.sid &&
tsec->avdcache.seqno == avc_policy_seqno());
}
@@ -172,11 +176,17 @@ struct perf_event_security_struct {
};
extern struct lsm_blob_sizes selinux_blob_sizes;
-static inline struct task_security_struct *selinux_cred(const struct cred *cred)
+static inline struct cred_security_struct *selinux_cred(const struct cred *cred)
{
return cred->security + selinux_blob_sizes.lbs_cred;
}
+static inline struct task_security_struct *
+selinux_task(const struct task_struct *task)
+{
+ return task->security + selinux_blob_sizes.lbs_task;
+}
+
static inline struct file_security_struct *selinux_file(const struct file *file)
{
return file->f_security + selinux_blob_sizes.lbs_file;
@@ -207,9 +217,9 @@ selinux_ipc(const struct kern_ipc_perm *ipc)
*/
static inline u32 current_sid(void)
{
- const struct task_security_struct *tsec = selinux_cred(current_cred());
+ const struct cred_security_struct *crsec = selinux_cred(current_cred());
- return tsec->sid;
+ return crsec->sid;
}
static inline struct superblock_security_struct *
diff --git a/sound/hda/codecs/cirrus/cs420x.c b/sound/hda/codecs/cirrus/cs420x.c
index 823220d5cada..13f5f1711fa4 100644
--- a/sound/hda/codecs/cirrus/cs420x.c
+++ b/sound/hda/codecs/cirrus/cs420x.c
@@ -585,6 +585,7 @@ static const struct hda_quirk cs4208_mac_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7800, "MacPro 6,1", CS4208_MACMINI),
SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
{} /* terminator */
};
diff --git a/sound/hda/codecs/hdmi/nvhdmi-mcp.c b/sound/hda/codecs/hdmi/nvhdmi-mcp.c
index 8fd8d76fa72f..1c5fdfe872f2 100644
--- a/sound/hda/codecs/hdmi/nvhdmi-mcp.c
+++ b/sound/hda/codecs/hdmi/nvhdmi-mcp.c
@@ -350,8 +350,8 @@ static int nvhdmi_mcp_probe(struct hda_codec *codec,
static const struct hda_codec_ops nvhdmi_mcp_codec_ops = {
.probe = nvhdmi_mcp_probe,
.remove = snd_hda_hdmi_simple_remove,
- .build_controls = nvhdmi_mcp_build_pcms,
- .build_pcms = nvhdmi_mcp_build_controls,
+ .build_pcms = nvhdmi_mcp_build_pcms,
+ .build_controls = nvhdmi_mcp_build_controls,
.init = nvhdmi_mcp_init,
.unsol_event = snd_hda_hdmi_simple_unsol_event,
};
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index 4aec5067c59d..b45fcc9a3785 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -6525,6 +6525,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8a75, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
@@ -6572,6 +6573,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bd4, "HP Victus 16-s0xxx (MB 8BD4)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ SND_PCI_QUIRK(0x103c, 0x8bd6, "HP Pavilion Aero Laptop 13z-be200", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
@@ -6694,6 +6696,15 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8ed5, "HP Merino13X", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8ed6, "HP Merino13", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8ed7, "HP Merino14", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8ed8, "HP Merino16", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8ed9, "HP Merino14W", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8eda, "HP Merino16W", ALC245_FIXUP_TAS2781_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x8f40, "HP Lampas14", ALC287_FIXUP_TXNW2781_I2C),
+ SND_PCI_QUIRK(0x103c, 0x8f41, "HP Lampas16", ALC287_FIXUP_TXNW2781_I2C),
+ SND_PCI_QUIRK(0x103c, 0x8f42, "HP LampasW14", ALC287_FIXUP_TXNW2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1032, "ASUS VivoBook X513EA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1034, "ASUS GU605C", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index de56e83d8e10..bb02945793f0 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -280,11 +280,11 @@ __snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
// (5)
err = pci_read_config_word(pci, PCI_DEVICE_ID, &chip->device);
- if (err < 0)
- return err;
+ if (err)
+ return pcibios_err_to_errno(err);
err = pci_read_config_word(pci, PCI_VENDOR_ID, &chip->vendor);
- if (err < 0)
- return err;
+ if (err)
+ return pcibios_err_to_errno(err);
chip->rev = pci->revision;
#ifdef CHIP_AU8830
if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 6a3cca3d26c7..ead447a5da7f 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -581,17 +581,17 @@ static int cs4271_component_probe(struct snd_soc_component *component)
ret = regcache_sync(cs4271->regmap);
if (ret < 0)
- return ret;
+ goto err_disable_regulator;
ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
if (ret < 0)
- return ret;
+ goto err_disable_regulator;
ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
CS4271_MODE2_PDN, 0);
if (ret < 0)
- return ret;
+ goto err_disable_regulator;
/* Power-up sequence requires 85 uS */
udelay(85);
@@ -601,6 +601,10 @@ static int cs4271_component_probe(struct snd_soc_component *component)
CS4271_MODE2_MUTECAEQUB);
return 0;
+
+err_disable_regulator:
+ regulator_bulk_disable(ARRAY_SIZE(cs4271->supplies), cs4271->supplies);
+ return ret;
}
static void cs4271_component_remove(struct snd_soc_component *component)
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index ae89260ca215..3420011da444 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -2124,11 +2124,50 @@ static int da7213_probe(struct snd_soc_component *component)
return 0;
}
+static int da7213_runtime_suspend(struct device *dev)
+{
+ struct da7213_priv *da7213 = dev_get_drvdata(dev);
+
+ regcache_cache_only(da7213->regmap, true);
+ regcache_mark_dirty(da7213->regmap);
+ regulator_bulk_disable(DA7213_NUM_SUPPLIES, da7213->supplies);
+
+ return 0;
+}
+
+static int da7213_runtime_resume(struct device *dev)
+{
+ struct da7213_priv *da7213 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(DA7213_NUM_SUPPLIES, da7213->supplies);
+ if (ret < 0)
+ return ret;
+ regcache_cache_only(da7213->regmap, false);
+ return regcache_sync(da7213->regmap);
+}
+
+static int da7213_suspend(struct snd_soc_component *component)
+{
+ struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component);
+
+ return da7213_runtime_suspend(da7213->dev);
+}
+
+static int da7213_resume(struct snd_soc_component *component)
+{
+ struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component);
+
+ return da7213_runtime_resume(da7213->dev);
+}
+
static const struct snd_soc_component_driver soc_component_dev_da7213 = {
.probe = da7213_probe,
.set_bias_level = da7213_set_bias_level,
.controls = da7213_snd_controls,
.num_controls = ARRAY_SIZE(da7213_snd_controls),
+ .suspend = da7213_suspend,
+ .resume = da7213_resume,
.dapm_widgets = da7213_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(da7213_dapm_widgets),
.dapm_routes = da7213_audio_map,
@@ -2175,6 +2214,8 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
if (!da7213->fin_min_rate)
return -EINVAL;
+ da7213->dev = &i2c->dev;
+
i2c_set_clientdata(i2c, da7213);
/* Get required supplies */
@@ -2224,31 +2265,9 @@ static void da7213_i2c_remove(struct i2c_client *i2c)
pm_runtime_disable(&i2c->dev);
}
-static int da7213_runtime_suspend(struct device *dev)
-{
- struct da7213_priv *da7213 = dev_get_drvdata(dev);
-
- regcache_cache_only(da7213->regmap, true);
- regcache_mark_dirty(da7213->regmap);
- regulator_bulk_disable(DA7213_NUM_SUPPLIES, da7213->supplies);
-
- return 0;
-}
-
-static int da7213_runtime_resume(struct device *dev)
-{
- struct da7213_priv *da7213 = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_bulk_enable(DA7213_NUM_SUPPLIES, da7213->supplies);
- if (ret < 0)
- return ret;
- regcache_cache_only(da7213->regmap, false);
- return regcache_sync(da7213->regmap);
-}
-
-static DEFINE_RUNTIME_DEV_PM_OPS(da7213_pm, da7213_runtime_suspend,
- da7213_runtime_resume, NULL);
+static const struct dev_pm_ops da7213_pm = {
+ RUNTIME_PM_OPS(da7213_runtime_suspend, da7213_runtime_resume, NULL)
+};
static const struct i2c_device_id da7213_i2c_id[] = {
{ "da7213" },
diff --git a/sound/soc/codecs/da7213.h b/sound/soc/codecs/da7213.h
index b9ab791d6b88..29cbf0eb6124 100644
--- a/sound/soc/codecs/da7213.h
+++ b/sound/soc/codecs/da7213.h
@@ -595,6 +595,7 @@ enum da7213_supplies {
/* Codec private data */
struct da7213_priv {
struct regmap *regmap;
+ struct device *dev;
struct mutex ctrl_lock;
struct regulator_bulk_data supplies[DA7213_NUM_SUPPLIES];
struct clk *mclk;
diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
index 2e1b77973a3e..92c177b82a02 100644
--- a/sound/soc/codecs/lpass-va-macro.c
+++ b/sound/soc/codecs/lpass-va-macro.c
@@ -1638,7 +1638,7 @@ static int va_macro_probe(struct platform_device *pdev)
if (ret)
goto err_clkout;
- va->fsgen = clk_hw_get_clk(&va->hw, "fsgen");
+ va->fsgen = devm_clk_hw_get_clk(dev, &va->hw, "fsgen");
if (IS_ERR(va->fsgen)) {
ret = PTR_ERR(va->fsgen);
goto err_clkout;
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index ba880b5de7e8..8f37aa00e62e 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -1957,7 +1957,8 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv)
{
struct i2c_client *client = (struct i2c_client *)tas_priv->client;
unsigned int dev_addrs[TASDEVICE_MAX_CHANNELS];
- int i, ndev = 0;
+ int ndev = 0;
+ int i, rc;
if (tas_priv->isacpi) {
ndev = device_property_read_u32_array(&client->dev,
@@ -1968,8 +1969,12 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv)
} else {
ndev = (ndev < ARRAY_SIZE(dev_addrs))
? ndev : ARRAY_SIZE(dev_addrs);
- ndev = device_property_read_u32_array(&client->dev,
+ rc = device_property_read_u32_array(&client->dev,
"ti,audio-slots", dev_addrs, ndev);
+ if (rc != 0) {
+ ndev = 1;
+ dev_addrs[0] = client->addr;
+ }
}
tas_priv->irq =
diff --git a/sound/soc/codecs/tas2783-sdw.c b/sound/soc/codecs/tas2783-sdw.c
index 1fb4227b711e..e273b80d033e 100644
--- a/sound/soc/codecs/tas2783-sdw.c
+++ b/sound/soc/codecs/tas2783-sdw.c
@@ -762,10 +762,17 @@ static void tas2783_fw_ready(const struct firmware *fmw, void *context)
goto out;
}
- mutex_lock(&tas_dev->pde_lock);
img_sz = fmw->size;
buf = fmw->data;
offset += FW_DL_OFFSET;
+ if (offset >= (img_sz - FW_FL_HDR)) {
+ dev_err(tas_dev->dev,
+ "firmware is too small");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&tas_dev->pde_lock);
while (offset < (img_sz - FW_FL_HDR)) {
memset(&hdr, 0, sizeof(hdr));
offset += read_header(&buf[offset], &hdr);
@@ -776,6 +783,14 @@ static void tas2783_fw_ready(const struct firmware *fmw, void *context)
/* size also includes the header */
file_blk_size = hdr.length - FW_FL_HDR;
+ /* make sure that enough data is there */
+ if (offset + file_blk_size > img_sz) {
+ ret = -EINVAL;
+ dev_err(tas_dev->dev,
+ "corrupt firmware file");
+ break;
+ }
+
switch (hdr.file_id) {
case 0:
ret = sdw_nwrite_no_pm(tas_dev->sdw_peripheral,
@@ -808,7 +823,8 @@ static void tas2783_fw_ready(const struct firmware *fmw, void *context)
break;
}
mutex_unlock(&tas_dev->pde_lock);
- tas2783_update_calibdata(tas_dev);
+ if (!ret)
+ tas2783_update_calibdata(tas_dev);
out:
if (!ret)
diff --git a/sound/soc/renesas/rcar/ssiu.c b/sound/soc/renesas/rcar/ssiu.c
index faf351126d57..244fb833292a 100644
--- a/sound/soc/renesas/rcar/ssiu.c
+++ b/sound/soc/renesas/rcar/ssiu.c
@@ -509,7 +509,7 @@ void rsnd_parse_connect_ssiu(struct rsnd_dai *rdai,
int rsnd_ssiu_probe(struct rsnd_priv *priv)
{
struct device *dev = rsnd_priv_to_dev(priv);
- struct device_node *node;
+ struct device_node *node __free(device_node) = rsnd_ssiu_of_node(priv);
struct rsnd_ssiu *ssiu;
struct rsnd_mod_ops *ops;
const int *list = NULL;
@@ -522,7 +522,6 @@ int rsnd_ssiu_probe(struct rsnd_priv *priv)
* see
* rsnd_ssiu_bufsif_to_id()
*/
- node = rsnd_ssiu_of_node(priv);
if (node)
nr = rsnd_node_count(priv, node, SSIU_NAME);
else
diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c
index 13f68f7b6dd6..0ccb6775f4de 100644
--- a/sound/soc/sdca/sdca_functions.c
+++ b/sound/soc/sdca/sdca_functions.c
@@ -894,7 +894,8 @@ static int find_sdca_entity_control(struct device *dev, struct sdca_entity *enti
return ret;
}
- control->values = devm_kzalloc(dev, hweight64(control->cn_list), GFP_KERNEL);
+ control->values = devm_kcalloc(dev, hweight64(control->cn_list),
+ sizeof(int), GFP_KERNEL);
if (!control->values)
return -ENOMEM;
diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
index f7c8c16308de..3848c7df1916 100644
--- a/sound/soc/sdw_utils/soc_sdw_utils.c
+++ b/sound/soc/sdw_utils/soc_sdw_utils.c
@@ -1277,7 +1277,7 @@ static int is_sdca_endpoint_present(struct device *dev,
struct sdw_slave *slave;
struct device *sdw_dev;
const char *sdw_codec_name;
- int i;
+ int ret, i;
dlc = kzalloc(sizeof(*dlc), GFP_KERNEL);
if (!dlc)
@@ -1307,13 +1307,16 @@ static int is_sdca_endpoint_present(struct device *dev,
}
slave = dev_to_sdw_dev(sdw_dev);
- if (!slave)
- return -EINVAL;
+ if (!slave) {
+ ret = -EINVAL;
+ goto put_device;
+ }
/* Make sure BIOS provides SDCA properties */
if (!slave->sdca_data.interface_revision) {
dev_warn(&slave->dev, "SDCA properties not found in the BIOS\n");
- return 1;
+ ret = 1;
+ goto put_device;
}
for (i = 0; i < slave->sdca_data.num_functions; i++) {
@@ -1322,7 +1325,8 @@ static int is_sdca_endpoint_present(struct device *dev,
if (dai_type == dai_info->dai_type) {
dev_dbg(&slave->dev, "DAI type %d sdca function %s found\n",
dai_type, slave->sdca_data.function[i].name);
- return 1;
+ ret = 1;
+ goto put_device;
}
}
@@ -1330,7 +1334,11 @@ static int is_sdca_endpoint_present(struct device *dev,
"SDCA device function for DAI type %d not supported, skip endpoint\n",
dai_info->dai_type);
- return 0;
+ ret = 0;
+
+put_device:
+ put_device(sdw_dev);
+ return ret;
}
int asoc_sdw_parse_sdw_endpoints(struct snd_soc_card *card,
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 880f5afcce60..cc15624ecaff 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -1362,6 +1362,11 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
ep->sample_rem = ep->cur_rate % ep->pps;
ep->packsize[0] = ep->cur_rate / ep->pps;
ep->packsize[1] = (ep->cur_rate + (ep->pps - 1)) / ep->pps;
+ if (ep->packsize[1] > ep->maxpacksize) {
+ usb_audio_dbg(chip, "Too small maxpacksize %u for rate %u / pps %u\n",
+ ep->maxpacksize, ep->cur_rate, ep->pps);
+ return -EINVAL;
+ }
/* calculate the frequency in 16.16 format */
ep->freqm = ep->freqn;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 6f00e0d52382..3af71d42b9b9 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -921,7 +921,7 @@ static int parse_term_uac2_clock_source(struct mixer_build *state,
{
struct uac_clock_source_descriptor *d = p1;
- term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */
+ term->type = UAC2_CLOCK_SOURCE << 16; /* virtual type */
term->id = id;
term->name = d->iClockSource;
return 0;
@@ -3086,6 +3086,8 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
int i;
assoc = usb_ifnum_to_if(dev, ctrlif)->intf_assoc;
+ if (!assoc)
+ return -EINVAL;
/* Detect BADD capture/playback channels from AS EP descriptors */
for (i = 0; i < assoc->bInterfaceCount; i++) {
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 71638e6dfb20..61bd61ffb1b2 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2022,12 +2022,15 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */
case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
+ case USB_ID(0x16d0, 0x0ab1): /* PureAudio APA DAC */
+ case USB_ID(0x16d0, 0xeca1): /* PureAudio Lotus DAC5, DAC5 SE, DAC5 Pro */
case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */
case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
+ case USB_ID(0x2622, 0x0061): /* LEAK Stereo 230 */
case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
@@ -2267,6 +2270,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_FIXED_RATE),
DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x1038, 0x1294, /* SteelSeries Arctis Pro Wireless */
+ QUIRK_FLAG_MIXER_PLAYBACK_MIN_MUTE),
DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x12d1, 0x3a07, /* Huawei Technologies Co., Ltd. */
@@ -2297,6 +2302,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_IGNORE_CLOCK_SOURCE),
DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */
QUIRK_FLAG_TX_LENGTH | QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x16d0, 0x0ab1, /* PureAudio APA DAC */
+ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x16d0, 0xeca1, /* PureAudio Lotus DAC5, DAC5 SE and DAC5 Pro */
+ QUIRK_FLAG_DSD_RAW),
DEVICE_FLG(0x17aa, 0x1046, /* Lenovo ThinkStation P620 Rear Line-in, Line-out and Microphone */
QUIRK_FLAG_DISABLE_AUTOSUSPEND),
DEVICE_FLG(0x17aa, 0x104d, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */
@@ -2420,6 +2429,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x25ce, /* Mytek devices */
QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2622, /* IAG Limited devices */
+ QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x278b, /* Rotel? */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x292b, /* Gustard/Ess based devices */
diff --git a/tools/arch/riscv/include/asm/csr.h b/tools/arch/riscv/include/asm/csr.h
index 56d7367ee344..21d8cee04638 100644
--- a/tools/arch/riscv/include/asm/csr.h
+++ b/tools/arch/riscv/include/asm/csr.h
@@ -167,7 +167,8 @@
#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
(_AC(1, UL) << IRQ_S_TIMER) | \
- (_AC(1, UL) << IRQ_S_EXT))
+ (_AC(1, UL) << IRQ_S_EXT) | \
+ (_AC(1, UL) << IRQ_PMU_OVF))
/* AIA CSR bits */
#define TOPI_IID_SHIFT 16
@@ -280,7 +281,7 @@
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_SSCOUNTOVF 0xda0
+#define CSR_SCOUNTOVF 0xda0
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index 9792e329343e..1baa86dfe029 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -93,6 +93,7 @@
#define EXIT_REASON_TPAUSE 68
#define EXIT_REASON_BUS_LOCK 74
#define EXIT_REASON_NOTIFY 75
+#define EXIT_REASON_SEAMCALL 76
#define EXIT_REASON_TDCALL 77
#define EXIT_REASON_MSR_READ_IMM 84
#define EXIT_REASON_MSR_WRITE_IMM 85
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 009633294b09..35aeeaf5f711 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -182,7 +182,7 @@ bpftool prog tracelog
bpftool prog tracelog { stdout | stderr } *PROG*
Dump the BPF stream of the program. BPF programs can write to these streams
- at runtime with the **bpf_stream_vprintk**\ () kfunc. The kernel may write
+ at runtime with the **bpf_stream_vprintk_impl**\ () kfunc. The kernel may write
error messages to the standard error stream. This facility should be used
only for debugging purposes.
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 49b0add392b1..95646290cb89 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -107,7 +107,7 @@ all: $(FILES)
__BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
- BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
+ BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -ldl -lz -llzma -lzstd
__BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
@@ -115,7 +115,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
###############################
$(OUTPUT)test-all.bin:
- $(BUILD_ALL) || $(BUILD_ALL) -lopcodes -liberty
+ $(BUILD_ALL)
$(OUTPUT)test-hello.bin:
$(BUILD)
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 048c8de1a130..e0b579a1df4f 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_NETDEV_H
#define _UAPI_LINUX_NETDEV_H
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 80c028540656..d4e4e388e625 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -315,20 +315,20 @@ enum libbpf_tristate {
___param, sizeof(___param)); \
})
-extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args,
- __u32 len__sz, void *aux__prog) __weak __ksym;
-
-#define bpf_stream_printk(stream_id, fmt, args...) \
-({ \
- static const char ___fmt[] = fmt; \
- unsigned long long ___param[___bpf_narg(args)]; \
- \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- ___bpf_fill(___param, args); \
- _Pragma("GCC diagnostic pop") \
- \
- bpf_stream_vprintk(stream_id, ___fmt, ___param, sizeof(___param), NULL);\
+extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
+ __u32 len__sz, void *aux__prog) __weak __ksym;
+
+#define bpf_stream_printk(stream_id, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_stream_vprintk_impl(stream_id, ___fmt, ___param, sizeof(___param), NULL); \
})
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index 31ed20c0f3f8..a40591e513b7 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -12,7 +12,7 @@ endif
libdir ?= $(prefix)/$(libdir_relative)
includedir ?= $(prefix)/include
-SUBDIRS = lib generated samples ynltool
+SUBDIRS = lib generated samples ynltool tests
all: $(SUBDIRS) libynl.a
@@ -49,5 +49,9 @@ install: libynl.a lib/*.h
@echo -e "\tINSTALL pyynl"
@pip install --prefix=$(DESTDIR)$(prefix) .
@make -C generated install
+ @make -C tests install
-.PHONY: all clean distclean install $(SUBDIRS)
+run_tests:
+ @$(MAKE) -C tests run_tests
+
+.PHONY: all clean distclean install run_tests $(SUBDIRS)
diff --git a/tools/net/ynl/pyynl/cli.py b/tools/net/ynl/pyynl/cli.py
index 8c192e900bd3..ff81ff083764 100755
--- a/tools/net/ynl/pyynl/cli.py
+++ b/tools/net/ynl/pyynl/cli.py
@@ -7,6 +7,7 @@ import os
import pathlib
import pprint
import sys
+import textwrap
sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
from lib import YnlFamily, Netlink, NlError
@@ -39,6 +40,60 @@ class YnlEncoder(json.JSONEncoder):
return json.JSONEncoder.default(self, obj)
+def print_attr_list(ynl, attr_names, attr_set, indent=2):
+ """Print a list of attributes with their types and documentation."""
+ prefix = ' ' * indent
+ for attr_name in attr_names:
+ if attr_name in attr_set.attrs:
+ attr = attr_set.attrs[attr_name]
+ attr_info = f'{prefix}- {attr_name}: {attr.type}'
+ if 'enum' in attr.yaml:
+ enum_name = attr.yaml['enum']
+ attr_info += f" (enum: {enum_name})"
+ # Print enum values if available
+ if enum_name in ynl.consts:
+ const = ynl.consts[enum_name]
+ enum_values = list(const.entries.keys())
+ attr_info += f"\n{prefix} {const.type.capitalize()}: {', '.join(enum_values)}"
+
+ # Show nested attributes reference and recursively display them
+ nested_set_name = None
+ if attr.type == 'nest' and 'nested-attributes' in attr.yaml:
+ nested_set_name = attr.yaml['nested-attributes']
+ attr_info += f" -> {nested_set_name}"
+
+ if attr.yaml.get('doc'):
+ doc_text = textwrap.indent(attr.yaml['doc'], prefix + ' ')
+ attr_info += f"\n{doc_text}"
+ print(attr_info)
+
+ # Recursively show nested attributes
+ if nested_set_name in ynl.attr_sets:
+ nested_set = ynl.attr_sets[nested_set_name]
+ # Filter out 'unspec' and other unused attrs
+ nested_names = [n for n in nested_set.attrs.keys()
+ if nested_set.attrs[n].type != 'unused']
+ if nested_names:
+ print_attr_list(ynl, nested_names, nested_set, indent + 4)
+
+
+def print_mode_attrs(ynl, mode, mode_spec, attr_set, print_request=True):
+ """Print a given mode (do/dump/event/notify)."""
+ mode_title = mode.capitalize()
+
+ if print_request and 'request' in mode_spec and 'attributes' in mode_spec['request']:
+ print(f'\n{mode_title} request attributes:')
+ print_attr_list(ynl, mode_spec['request']['attributes'], attr_set)
+
+ if 'reply' in mode_spec and 'attributes' in mode_spec['reply']:
+ print(f'\n{mode_title} reply attributes:')
+ print_attr_list(ynl, mode_spec['reply']['attributes'], attr_set)
+
+ if 'attributes' in mode_spec:
+ print(f'\n{mode_title} attributes:')
+ print_attr_list(ynl, mode_spec['attributes'], attr_set)
+
+
def main():
description = """
YNL CLI utility - a general purpose netlink utility that uses YAML
@@ -70,6 +125,8 @@ def main():
group.add_argument('--dump', dest='dump', metavar='DUMP-OPERATION', type=str)
group.add_argument('--list-ops', action='store_true')
group.add_argument('--list-msgs', action='store_true')
+ group.add_argument('--list-attrs', dest='list_attrs', metavar='OPERATION', type=str,
+ help='List attributes for an operation')
parser.add_argument('--duration', dest='duration', type=int,
help='when subscribed, watch for DURATION seconds')
@@ -135,6 +192,28 @@ def main():
for op_name, op in ynl.msgs.items():
print(op_name, " [", ", ".join(op.modes), "]")
+ if args.list_attrs:
+ op = ynl.msgs.get(args.list_attrs)
+ if not op:
+ print(f'Operation {args.list_attrs} not found')
+ exit(1)
+
+ print(f'Operation: {op.name}')
+ print(op.yaml['doc'])
+
+ for mode in ['do', 'dump', 'event']:
+ if mode in op.yaml:
+ print_mode_attrs(ynl, mode, op.yaml[mode], op.attr_set, True)
+
+ if 'notify' in op.yaml:
+ mode_spec = op.yaml['notify']
+ ref_spec = ynl.msgs.get(mode_spec).yaml.get('do')
+ if ref_spec:
+ print_mode_attrs(ynl, 'notify', ref_spec, op.attr_set, False)
+
+ if 'mcgrp' in op.yaml:
+ print(f"\nMulticast group: {op.yaml['mcgrp']}")
+
try:
if args.do:
reply = ynl.do(args.do, attrs, args.flags)
diff --git a/tools/net/ynl/pyynl/lib/ynl.py b/tools/net/ynl/pyynl/lib/ynl.py
index 225baad3c8f8..36d36eb7e3b8 100644
--- a/tools/net/ynl/pyynl/lib/ynl.py
+++ b/tools/net/ynl/pyynl/lib/ynl.py
@@ -985,6 +985,15 @@ class YnlFamily(SpecFamily):
raw = bytes.fromhex(string)
else:
raw = int(string, 16)
+ elif attr_spec.display_hint == 'mac':
+ # Parse MAC address in format "00:11:22:33:44:55" or "001122334455"
+ if ':' in string:
+ mac_bytes = [int(x, 16) for x in string.split(':')]
+ else:
+ if len(string) % 2 != 0:
+ raise Exception(f"Invalid MAC address format: {string}")
+ mac_bytes = [int(string[i:i+2], 16) for i in range(0, len(string), 2)]
+ raw = bytes(mac_bytes)
else:
raise Exception(f"Display hint '{attr_spec.display_hint}' not implemented"
f" when parsing '{attr_spec['name']}'")
diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py
index aadeb3abcad8..b517d0c605ad 100755
--- a/tools/net/ynl/pyynl/ynl_gen_c.py
+++ b/tools/net/ynl/pyynl/ynl_gen_c.py
@@ -1205,7 +1205,7 @@ class SubMessage(SpecSubMessage):
class Family(SpecFamily):
- def __init__(self, file_name, exclude_ops):
+ def __init__(self, file_name, exclude_ops, fn_prefix):
# Added by resolve:
self.c_name = None
delattr(self, "c_name")
@@ -1237,6 +1237,8 @@ class Family(SpecFamily):
else:
self.uapi_header_name = self.ident_name
+ self.fn_prefix = fn_prefix if fn_prefix else f'{self.ident_name}-nl'
+
def resolve(self):
self.resolve_up(super())
@@ -2911,12 +2913,12 @@ def print_kernel_op_table_fwd(family, cw, terminate):
continue
if 'do' in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-doit")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-doit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
if 'dump' in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-dumpit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
cw.nl()
@@ -2942,7 +2944,7 @@ def print_kernel_op_table(family, cw):
for x in op['dont-validate']])), )
for op_mode in ['do', 'dump']:
if op_mode in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-{op_mode}it")
members.append((op_mode + 'it', name))
if family.kernel_policy == 'per-op':
struct = Struct(family, op['attribute-set'],
@@ -2980,7 +2982,7 @@ def print_kernel_op_table(family, cw):
members.append(('validate',
' | '.join([c_upper('genl-dont-validate-' + x)
for x in dont_validate])), )
- name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-{op_mode}it")
if 'pre' in op[op_mode]:
members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
members.append((op_mode + 'it', name))
@@ -3402,6 +3404,7 @@ def main():
help='Do not overwrite the output file if the new output is identical to the old')
parser.add_argument('--exclude-op', action='append', default=[])
parser.add_argument('-o', dest='out_file', type=str, default=None)
+ parser.add_argument('--function-prefix', dest='fn_prefix', type=str)
args = parser.parse_args()
if args.header is None:
@@ -3410,7 +3413,7 @@ def main():
exclude_ops = [re.compile(expr) for expr in args.exclude_op]
try:
- parsed = Family(args.spec, exclude_ops)
+ parsed = Family(args.spec, exclude_ops, args.fn_prefix)
if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
print('Spec license:', parsed.license)
print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
@@ -3430,11 +3433,16 @@ def main():
cw.p("/* Do not edit directly, auto-generated from: */")
cw.p(f"/*\t{spec_kernel} */")
cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
- if args.exclude_op or args.user_header:
+ if args.exclude_op or args.user_header or args.fn_prefix:
line = ''
- line += ' --user-header '.join([''] + args.user_header)
- line += ' --exclude-op '.join([''] + args.exclude_op)
+ if args.user_header:
+ line += ' --user-header '.join([''] + args.user_header)
+ if args.exclude_op:
+ line += ' --exclude-op '.join([''] + args.exclude_op)
+ if args.fn_prefix:
+ line += f' --function-prefix {args.fn_prefix}'
cw.p(f'/* YNL-ARG{line} */')
+ cw.p('/* To regenerate run: tools/net/ynl/ynl-regen.sh */')
cw.nl()
if args.mode == 'uapi':
diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore
index 7f5fca7682d7..05087ee323ba 100644
--- a/tools/net/ynl/samples/.gitignore
+++ b/tools/net/ynl/samples/.gitignore
@@ -7,3 +7,4 @@ rt-addr
rt-link
rt-route
tc
+tc-filter-add
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
index c9494a564da4..d76cbd41cbb1 100644
--- a/tools/net/ynl/samples/Makefile
+++ b/tools/net/ynl/samples/Makefile
@@ -19,6 +19,7 @@ include $(wildcard *.d)
all: $(BINS)
CFLAGS_page-pool=$(CFLAGS_netdev)
+CFLAGS_tc-filter-add:=$(CFLAGS_tc)
$(BINS): ../lib/ynl.a ../generated/protos.a $(SRCS)
@echo -e '\tCC sample $@'
diff --git a/tools/net/ynl/samples/tc-filter-add.c b/tools/net/ynl/samples/tc-filter-add.c
new file mode 100644
index 000000000000..1f9cd3f62df6
--- /dev/null
+++ b/tools/net/ynl/samples/tc-filter-add.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <linux/pkt_sched.h>
+#include <linux/tc_act/tc_vlan.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/if_ether.h>
+#include <net/if.h>
+
+#include <ynl.h>
+
+#include "tc-user.h"
+
+#define TC_HANDLE (0xFFFF << 16)
+
+const char *vlan_act_name(struct tc_vlan *p)
+{
+ switch (p->v_action) {
+ case TCA_VLAN_ACT_POP:
+ return "pop";
+ case TCA_VLAN_ACT_PUSH:
+ return "push";
+ case TCA_VLAN_ACT_MODIFY:
+ return "modify";
+ default:
+ break;
+ }
+
+ return "not supported";
+}
+
+const char *gact_act_name(struct tc_gact *p)
+{
+ switch (p->action) {
+ case TC_ACT_SHOT:
+ return "drop";
+ case TC_ACT_OK:
+ return "ok";
+ case TC_ACT_PIPE:
+ return "pipe";
+ default:
+ break;
+ }
+
+ return "not supported";
+}
+
+static void print_vlan(struct tc_act_vlan_attrs *vlan)
+{
+ printf("%s ", vlan_act_name(vlan->parms));
+ if (vlan->_present.push_vlan_id)
+ printf("id %u ", vlan->push_vlan_id);
+ if (vlan->_present.push_vlan_protocol)
+ printf("protocol %#x ", ntohs(vlan->push_vlan_protocol));
+ if (vlan->_present.push_vlan_priority)
+ printf("priority %u ", vlan->push_vlan_priority);
+}
+
+static void print_gact(struct tc_act_gact_attrs *gact)
+{
+ struct tc_gact *p = gact->parms;
+
+ printf("%s ", gact_act_name(p));
+}
+
+static void flower_print(struct tc_flower_attrs *flower, const char *kind)
+{
+ struct tc_act_attrs *a;
+ unsigned int i;
+
+ printf("%s:\n", kind);
+
+ if (flower->_present.key_vlan_id)
+ printf(" vlan_id: %u\n", flower->key_vlan_id);
+ if (flower->_present.key_vlan_prio)
+ printf(" vlan_prio: %u\n", flower->key_vlan_prio);
+ if (flower->_present.key_num_of_vlans)
+ printf(" num_of_vlans: %u\n", flower->key_num_of_vlans);
+
+ for (i = 0; i < flower->_count.act; i++) {
+ a = &flower->act[i];
+ printf("action order: %i %s ", i + 1, a->kind);
+ if (a->options._present.vlan)
+ print_vlan(&a->options.vlan);
+ else if (a->options._present.gact)
+ print_gact(&a->options.gact);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+static void tc_filter_print(struct tc_gettfilter_rsp *f)
+{
+ struct tc_options_msg *opt = &f->options;
+
+ if (opt->_present.flower)
+ flower_print(&opt->flower, f->kind);
+ else if (f->_len.kind)
+ printf("%s pref %u proto: %#x\n", f->kind,
+ (f->_hdr.tcm_info >> 16),
+ ntohs(TC_H_MIN(f->_hdr.tcm_info)));
+}
+
+static int tc_filter_add(struct ynl_sock *ys, int ifi)
+{
+ struct tc_newtfilter_req *req;
+ struct tc_act_attrs *acts;
+ struct tc_vlan p = {
+ .action = TC_ACT_PIPE,
+ .v_action = TCA_VLAN_ACT_PUSH
+ };
+ __u16 flags = NLM_F_REQUEST | NLM_F_EXCL | NLM_F_CREATE;
+ int ret;
+
+ req = tc_newtfilter_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_newtfilter_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ acts = tc_act_attrs_alloc(3);
+ if (!acts) {
+ fprintf(stderr, "tc_act_attrs_alloc\n");
+ tc_newtfilter_req_free(req);
+ return -1;
+ }
+ memset(acts, 0, sizeof(*acts) * 3);
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ req->_hdr.tcm_info = TC_H_MAKE(1 << 16, htons(ETH_P_8021Q));
+ req->chain = 0;
+
+ tc_newtfilter_req_set_nlflags(req, flags);
+ tc_newtfilter_req_set_kind(req, "flower");
+ tc_newtfilter_req_set_options_flower_key_vlan_id(req, 100);
+ tc_newtfilter_req_set_options_flower_key_vlan_prio(req, 5);
+ tc_newtfilter_req_set_options_flower_key_num_of_vlans(req, 3);
+
+ __tc_newtfilter_req_set_options_flower_act(req, acts, 3);
+
+ /* Skip action at index 0 because in TC, the action array
+ * index starts at 1, with each index defining the action's
+ * order. In contrast, in YNL indexed arrays start at index 0.
+ */
+ tc_act_attrs_set_kind(&acts[1], "vlan");
+ tc_act_attrs_set_options_vlan_parms(&acts[1], &p, sizeof(p));
+ tc_act_attrs_set_options_vlan_push_vlan_id(&acts[1], 200);
+ tc_act_attrs_set_kind(&acts[2], "vlan");
+ tc_act_attrs_set_options_vlan_parms(&acts[2], &p, sizeof(p));
+ tc_act_attrs_set_options_vlan_push_vlan_id(&acts[2], 300);
+
+ tc_newtfilter_req_set_options_flower_flags(req, 0);
+ tc_newtfilter_req_set_options_flower_key_eth_type(req, htons(0x8100));
+
+ ret = tc_newtfilter(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_newtfilter: %s\n", ys->err.msg);
+
+ tc_newtfilter_req_free(req);
+
+ return ret;
+}
+
+static int tc_filter_show(struct ynl_sock *ys, int ifi)
+{
+ struct tc_gettfilter_req_dump *req;
+ struct tc_gettfilter_list *rsp;
+
+ req = tc_gettfilter_req_dump_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_gettfilter_req_dump_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ req->_present.chain = 1;
+ req->chain = 0;
+
+ rsp = tc_gettfilter_dump(ys, req);
+ tc_gettfilter_req_dump_free(req);
+ if (!rsp) {
+ fprintf(stderr, "YNL: %s\n", ys->err.msg);
+ return -1;
+ }
+
+ if (ynl_dump_empty(rsp))
+ fprintf(stderr, "Error: no filters reported\n");
+ else
+ ynl_dump_foreach(rsp, flt) tc_filter_print(flt);
+
+ tc_gettfilter_list_free(rsp);
+
+ return 0;
+}
+
+static int tc_filter_del(struct ynl_sock *ys, int ifi)
+{
+ struct tc_deltfilter_req *req;
+ __u16 flags = NLM_F_REQUEST;
+ int ret;
+
+ req = tc_deltfilter_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_deltfilter_req_alloc failedq\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ req->_hdr.tcm_info = TC_H_MAKE(1 << 16, htons(ETH_P_8021Q));
+ tc_deltfilter_req_set_nlflags(req, flags);
+
+ ret = tc_deltfilter(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_deltfilter failed: %s\n", ys->err.msg);
+
+ tc_deltfilter_req_free(req);
+
+ return ret;
+}
+
+static int tc_clsact_add(struct ynl_sock *ys, int ifi)
+{
+ struct tc_newqdisc_req *req;
+ __u16 flags = NLM_F_REQUEST | NLM_F_EXCL | NLM_F_CREATE;
+ int ret;
+
+ req = tc_newqdisc_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_newqdisc_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_CLSACT;
+ req->_hdr.tcm_handle = TC_HANDLE;
+ tc_newqdisc_req_set_nlflags(req, flags);
+ tc_newqdisc_req_set_kind(req, "clsact");
+
+ ret = tc_newqdisc(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_newqdisc failed: %s\n", ys->err.msg);
+
+ tc_newqdisc_req_free(req);
+
+ return ret;
+}
+
+static int tc_clsact_del(struct ynl_sock *ys, int ifi)
+{
+ struct tc_delqdisc_req *req;
+ __u16 flags = NLM_F_REQUEST;
+ int ret;
+
+ req = tc_delqdisc_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_delqdisc_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_CLSACT;
+ req->_hdr.tcm_handle = TC_HANDLE;
+ tc_delqdisc_req_set_nlflags(req, flags);
+
+ ret = tc_delqdisc(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_delqdisc failed: %s\n", ys->err.msg);
+
+ tc_delqdisc_req_free(req);
+
+ return ret;
+}
+
+static int tc_filter_config(struct ynl_sock *ys, int ifi)
+{
+ int ret = 0;
+
+ if (tc_filter_add(ys, ifi))
+ return -1;
+
+ ret = tc_filter_show(ys, ifi);
+
+ if (tc_filter_del(ys, ifi))
+ return -1;
+
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ifi, ret = 0;
+
+ if (argc < 2) {
+ fprintf(stderr, "Usage: %s <interface_name>\n", argv[0]);
+ return 1;
+ }
+ ifi = if_nametoindex(argv[1]);
+ if (!ifi) {
+ perror("if_nametoindex");
+ return 1;
+ }
+
+ ys = ynl_sock_create(&ynl_tc_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return 1;
+ }
+
+ if (tc_clsact_add(ys, ifi)) {
+ ret = 2;
+ goto err_destroy;
+ }
+
+ if (tc_filter_config(ys, ifi))
+ ret = 3;
+
+ if (tc_clsact_del(ys, ifi))
+ ret = 4;
+
+err_destroy:
+ ynl_sock_destroy(ys);
+ return ret;
+}
diff --git a/tools/net/ynl/tests/Makefile b/tools/net/ynl/tests/Makefile
new file mode 100644
index 000000000000..c1df2e001255
--- /dev/null
+++ b/tools/net/ynl/tests/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for YNL tests
+
+TESTS := \
+ test_ynl_cli.sh \
+ test_ynl_ethtool.sh \
+# end of TESTS
+
+all: $(TESTS)
+
+run_tests:
+ @for test in $(TESTS); do \
+ ./$$test; \
+ done
+
+install: $(TESTS)
+ @mkdir -p $(DESTDIR)/usr/bin
+ @mkdir -p $(DESTDIR)/usr/share/kselftest
+ @cp ../../../testing/selftests/kselftest/ktap_helpers.sh $(DESTDIR)/usr/share/kselftest/
+ @for test in $(TESTS); do \
+ name=$$(basename $$test .sh); \
+ sed -e 's|^ynl=.*|ynl="ynl"|' \
+ -e 's|^ynl_ethtool=.*|ynl_ethtool="ynl-ethtool"|' \
+ -e 's|KSELFTEST_KTAP_HELPERS=.*|KSELFTEST_KTAP_HELPERS="/usr/share/kselftest/ktap_helpers.sh"|' \
+ $$test > $(DESTDIR)/usr/bin/$$name; \
+ chmod +x $(DESTDIR)/usr/bin/$$name; \
+ done
+
+clean distclean:
+ @# Nothing to clean
+
+.PHONY: all install clean run_tests
diff --git a/tools/net/ynl/tests/config b/tools/net/ynl/tests/config
new file mode 100644
index 000000000000..339f1309c03f
--- /dev/null
+++ b/tools/net/ynl/tests/config
@@ -0,0 +1,6 @@
+CONFIG_DUMMY=m
+CONFIG_INET_DIAG=y
+CONFIG_IPV6=y
+CONFIG_NET_NS=y
+CONFIG_NETDEVSIM=m
+CONFIG_VETH=m
diff --git a/tools/net/ynl/tests/test_ynl_cli.sh b/tools/net/ynl/tests/test_ynl_cli.sh
new file mode 100755
index 000000000000..7c0722a08117
--- /dev/null
+++ b/tools/net/ynl/tests/test_ynl_cli.sh
@@ -0,0 +1,327 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Test YNL CLI functionality
+
+# Load KTAP test helpers
+KSELFTEST_KTAP_HELPERS="$(dirname "$(realpath "$0")")/../../../testing/selftests/kselftest/ktap_helpers.sh"
+# shellcheck source=../../../testing/selftests/kselftest/ktap_helpers.sh
+source "$KSELFTEST_KTAP_HELPERS"
+
+# Default ynl path for direct execution, can be overridden by make install
+ynl="../pyynl/cli.py"
+
+readonly NSIM_ID="1338"
+readonly NSIM_DEV_NAME="nsim${NSIM_ID}"
+readonly VETH_A="veth_a"
+readonly VETH_B="veth_b"
+
+testns="ynl-$(mktemp -u XXXXXX)"
+TESTS_NO=0
+
+# Test listing available families
+cli_list_families()
+{
+ if $ynl --list-families &>/dev/null; then
+ ktap_test_pass "YNL CLI list families"
+ else
+ ktap_test_fail "YNL CLI list families"
+ fi
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test netdev family operations (dev-get, queue-get)
+cli_netdev_ops()
+{
+ local dev_output
+ local ifindex
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ dev_output=$(ip netns exec "$testns" $ynl --family netdev \
+ --do dev-get --json "{\"ifindex\": $ifindex}" 2>/dev/null)
+
+ if ! echo "$dev_output" | grep -q "ifindex"; then
+ ktap_test_fail "YNL CLI netdev operations (netdev dev-get output missing ifindex)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl --family netdev \
+ --dump queue-get --json "{\"ifindex\": $ifindex}" &>/dev/null; then
+ ktap_test_fail "YNL CLI netdev operations (failed to get netdev queue info)"
+ return
+ fi
+
+ ktap_test_pass "YNL CLI netdev operations"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test ethtool family operations (rings-get, linkinfo-get)
+cli_ethtool_ops()
+{
+ local rings_output
+ local linkinfo_output
+
+ rings_output=$(ip netns exec "$testns" $ynl --family ethtool \
+ --do rings-get --json "{\"header\": {\"dev-name\": \"$NSIM_DEV_NAME\"}}" 2>/dev/null)
+
+ if ! echo "$rings_output" | grep -q "header"; then
+ ktap_test_fail "YNL CLI ethtool operations (ethtool rings-get output missing header)"
+ return
+ fi
+
+ linkinfo_output=$(ip netns exec "$testns" $ynl --family ethtool \
+ --do linkinfo-get --json "{\"header\": {\"dev-name\": \"$VETH_A\"}}" 2>/dev/null)
+
+ if ! echo "$linkinfo_output" | grep -q "header"; then
+ ktap_test_fail "YNL CLI ethtool operations (ethtool linkinfo-get output missing header)"
+ return
+ fi
+
+ ktap_test_pass "YNL CLI ethtool operations"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-route family operations
+cli_rt_route_ops()
+{
+ local ifindex
+
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-route"; then
+ ktap_test_skip "YNL CLI rt-route operations (rt-route family not available)"
+ return
+ fi
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ # Add route: 192.0.2.0/24 dev $dev scope link
+ if ! ip netns exec "$testns" $ynl --family rt-route --do newroute --create \
+ --json "{\"dst\": \"192.0.2.0\", \"oif\": $ifindex, \"rtm-dst-len\": 24, \"rtm-family\": 2, \"rtm-scope\": 253, \"rtm-type\": 1, \"rtm-protocol\": 3, \"rtm-table\": 254}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-route operations (failed to add route)"
+ return
+ fi
+
+ local route_output
+ route_output=$(ip netns exec "$testns" $ynl --family rt-route --dump getroute 2>/dev/null)
+ if echo "$route_output" | grep -q "192.0.2.0"; then
+ ktap_test_pass "YNL CLI rt-route operations"
+ else
+ ktap_test_fail "YNL CLI rt-route operations (failed to verify route)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-route --do delroute \
+ --json "{\"dst\": \"192.0.2.0\", \"oif\": $ifindex, \"rtm-dst-len\": 24, \"rtm-family\": 2, \"rtm-scope\": 253, \"rtm-type\": 1, \"rtm-protocol\": 3, \"rtm-table\": 254}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-addr family operations
+cli_rt_addr_ops()
+{
+ local ifindex
+
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-addr"; then
+ ktap_test_skip "YNL CLI rt-addr operations (rt-addr family not available)"
+ return
+ fi
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ if ! ip netns exec "$testns" $ynl --family rt-addr --do newaddr \
+ --json "{\"ifa-index\": $ifindex, \"local\": \"192.0.2.100\", \"ifa-prefixlen\": 24, \"ifa-family\": 2}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-addr operations (failed to add address)"
+ return
+ fi
+
+ local addr_output
+ addr_output=$(ip netns exec "$testns" $ynl --family rt-addr --dump getaddr 2>/dev/null)
+ if echo "$addr_output" | grep -q "192.0.2.100"; then
+ ktap_test_pass "YNL CLI rt-addr operations"
+ else
+ ktap_test_fail "YNL CLI rt-addr operations (failed to verify address)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-addr --do deladdr \
+ --json "{\"ifa-index\": $ifindex, \"local\": \"192.0.2.100\", \"ifa-prefixlen\": 24, \"ifa-family\": 2}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-link family operations
+cli_rt_link_ops()
+{
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-link"; then
+ ktap_test_skip "YNL CLI rt-link operations (rt-link family not available)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl --family rt-link --do newlink --create \
+ --json "{\"ifname\": \"dummy0\", \"linkinfo\": {\"kind\": \"dummy\"}}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-link operations (failed to add link)"
+ return
+ fi
+
+ local link_output
+ link_output=$(ip netns exec "$testns" $ynl --family rt-link --dump getlink 2>/dev/null)
+ if echo "$link_output" | grep -q "$NSIM_DEV_NAME" && echo "$link_output" | grep -q "dummy0"; then
+ ktap_test_pass "YNL CLI rt-link operations"
+ else
+ ktap_test_fail "YNL CLI rt-link operations (failed to verify link)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-link --do dellink \
+ --json "{\"ifname\": \"dummy0\"}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-neigh family operations
+cli_rt_neigh_ops()
+{
+ local ifindex
+
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-neigh"; then
+ ktap_test_skip "YNL CLI rt-neigh operations (rt-neigh family not available)"
+ return
+ fi
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ # Add neighbor: 192.0.2.1 dev nsim1338 lladdr 11:22:33:44:55:66 PERMANENT
+ if ! ip netns exec "$testns" $ynl --family rt-neigh --do newneigh --create \
+ --json "{\"ndm-ifindex\": $ifindex, \"dst\": \"192.0.2.1\", \"lladdr\": \"11:22:33:44:55:66\", \"ndm-family\": 2, \"ndm-state\": 128}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-neigh operations (failed to add neighbor)"
+ fi
+
+ local neigh_output
+ neigh_output=$(ip netns exec "$testns" $ynl --family rt-neigh --dump getneigh 2>/dev/null)
+ if echo "$neigh_output" | grep -q "192.0.2.1"; then
+ ktap_test_pass "YNL CLI rt-neigh operations"
+ else
+ ktap_test_fail "YNL CLI rt-neigh operations (failed to verify neighbor)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-neigh --do delneigh \
+ --json "{\"ndm-ifindex\": $ifindex, \"dst\": \"192.0.2.1\", \"lladdr\": \"11:22:33:44:55:66\", \"ndm-family\": 2}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-rule family operations
+cli_rt_rule_ops()
+{
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-rule"; then
+ ktap_test_skip "YNL CLI rt-rule operations (rt-rule family not available)"
+ return
+ fi
+
+ # Add rule: from 192.0.2.0/24 lookup 100 none
+ if ! ip netns exec "$testns" $ynl --family rt-rule --do newrule \
+ --json "{\"family\": 2, \"src-len\": 24, \"src\": \"192.0.2.0\", \"table\": 100}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-rule operations (failed to add rule)"
+ return
+ fi
+
+ local rule_output
+ rule_output=$(ip netns exec "$testns" $ynl --family rt-rule --dump getrule 2>/dev/null)
+ if echo "$rule_output" | grep -q "192.0.2.0"; then
+ ktap_test_pass "YNL CLI rt-rule operations"
+ else
+ ktap_test_fail "YNL CLI rt-rule operations (failed to verify rule)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-rule --do delrule \
+ --json "{\"family\": 2, \"src-len\": 24, \"src\": \"192.0.2.0\", \"table\": 100}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test nlctrl family operations
+cli_nlctrl_ops()
+{
+ local family_output
+
+ if ! family_output=$($ynl --family nlctrl \
+ --do getfamily --json "{\"family-name\": \"netdev\"}" 2>/dev/null); then
+ ktap_test_fail "YNL CLI nlctrl getfamily (failed to get nlctrl family info)"
+ return
+ fi
+
+ if ! echo "$family_output" | grep -q "family-name"; then
+ ktap_test_fail "YNL CLI nlctrl getfamily (nlctrl getfamily output missing family-name)"
+ return
+ fi
+
+ if ! echo "$family_output" | grep -q "family-id"; then
+ ktap_test_fail "YNL CLI nlctrl getfamily (nlctrl getfamily output missing family-id)"
+ return
+ fi
+
+ ktap_test_pass "YNL CLI nlctrl getfamily"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+setup()
+{
+ modprobe netdevsim &> /dev/null
+ if ! [ -f /sys/bus/netdevsim/new_device ]; then
+ ktap_skip_all "netdevsim module not available"
+ exit "$KSFT_SKIP"
+ fi
+
+ if ! ip netns add "$testns" 2>/dev/null; then
+ ktap_skip_all "failed to create test namespace"
+ exit "$KSFT_SKIP"
+ fi
+
+ echo "$NSIM_ID 1" | ip netns exec "$testns" tee /sys/bus/netdevsim/new_device >/dev/null 2>&1 || {
+ ktap_skip_all "failed to create netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ local dev
+ dev=$(ip netns exec "$testns" ls /sys/bus/netdevsim/devices/netdevsim$NSIM_ID/net 2>/dev/null | head -1)
+ if [[ -z "$dev" ]]; then
+ ktap_skip_all "failed to find netdevsim device"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -netns "$testns" link set dev "$dev" name "$NSIM_DEV_NAME" 2>/dev/null || {
+ ktap_skip_all "failed to rename netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ ip -netns "$testns" link set dev "$NSIM_DEV_NAME" up 2>/dev/null
+
+ if ! ip -n "$testns" link add "$VETH_A" type veth peer name "$VETH_B" 2>/dev/null; then
+ ktap_skip_all "failed to create veth pair"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -n "$testns" link set "$VETH_A" up 2>/dev/null
+ ip -n "$testns" link set "$VETH_B" up 2>/dev/null
+}
+
+cleanup()
+{
+ ip netns exec "$testns" bash -c "echo $NSIM_ID > /sys/bus/netdevsim/del_device" 2>/dev/null || true
+ ip netns del "$testns" 2>/dev/null || true
+}
+
+# Check if ynl command is available
+if ! command -v $ynl &>/dev/null && [[ ! -x $ynl ]]; then
+ ktap_skip_all "ynl command not found: $ynl"
+ exit "$KSFT_SKIP"
+fi
+
+trap cleanup EXIT
+
+ktap_print_header
+setup
+ktap_set_plan "${TESTS_NO}"
+
+cli_list_families
+cli_netdev_ops
+cli_ethtool_ops
+cli_rt_route_ops
+cli_rt_addr_ops
+cli_rt_link_ops
+cli_rt_neigh_ops
+cli_rt_rule_ops
+cli_nlctrl_ops
+
+ktap_finished
diff --git a/tools/net/ynl/tests/test_ynl_ethtool.sh b/tools/net/ynl/tests/test_ynl_ethtool.sh
new file mode 100755
index 000000000000..b826269017f4
--- /dev/null
+++ b/tools/net/ynl/tests/test_ynl_ethtool.sh
@@ -0,0 +1,222 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Test YNL ethtool functionality
+
+# Load KTAP test helpers
+KSELFTEST_KTAP_HELPERS="$(dirname "$(realpath "$0")")/../../../testing/selftests/kselftest/ktap_helpers.sh"
+# shellcheck source=../../../testing/selftests/kselftest/ktap_helpers.sh
+source "$KSELFTEST_KTAP_HELPERS"
+
+# Default ynl-ethtool path for direct execution, can be overridden by make install
+ynl_ethtool="../pyynl/ethtool.py"
+
+readonly NSIM_ID="1337"
+readonly NSIM_DEV_NAME="nsim${NSIM_ID}"
+readonly VETH_A="veth_a"
+readonly VETH_B="veth_b"
+
+testns="ynl-ethtool-$(mktemp -u XXXXXX)"
+TESTS_NO=0
+
+# Uses veth device as netdevsim doesn't support basic ethtool device info
+ethtool_device_info()
+{
+ local info_output
+
+ info_output=$(ip netns exec "$testns" $ynl_ethtool "$VETH_A" 2>/dev/null)
+
+ if ! echo "$info_output" | grep -q "Settings for"; then
+ ktap_test_fail "YNL ethtool device info (device info output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool device info"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_statistics()
+{
+ local stats_output
+
+ stats_output=$(ip netns exec "$testns" $ynl_ethtool --statistics "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$stats_output" | grep -q -E "(NIC statistics|packets|bytes)"; then
+ ktap_test_fail "YNL ethtool statistics (statistics output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool statistics"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_ring_params()
+{
+ local ring_output
+
+ ring_output=$(ip netns exec "$testns" $ynl_ethtool --show-ring "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$ring_output" | grep -q -E "(Ring parameters|RX|TX)"; then
+ ktap_test_fail "YNL ethtool ring parameters (ring parameters output missing expected content)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-ring "$NSIM_DEV_NAME" rx 64 2>/dev/null; then
+ ktap_test_fail "YNL ethtool ring parameters (set-ring command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool ring parameters (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_coalesce_params()
+{
+ if ! ip netns exec "$testns" $ynl_ethtool --show-coalesce "$NSIM_DEV_NAME" &>/dev/null; then
+ ktap_test_fail "YNL ethtool coalesce parameters (failed to get coalesce parameters)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-coalesce "$NSIM_DEV_NAME" rx-usecs 50 2>/dev/null; then
+ ktap_test_fail "YNL ethtool coalesce parameters (set-coalesce command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool coalesce parameters (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_pause_params()
+{
+ if ! ip netns exec "$testns" $ynl_ethtool --show-pause "$NSIM_DEV_NAME" &>/dev/null; then
+ ktap_test_fail "YNL ethtool pause parameters (failed to get pause parameters)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-pause "$NSIM_DEV_NAME" tx 1 rx 1 2>/dev/null; then
+ ktap_test_fail "YNL ethtool pause parameters (set-pause command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool pause parameters (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_features_info()
+{
+ local features_output
+
+ features_output=$(ip netns exec "$testns" $ynl_ethtool --show-features "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$features_output" | grep -q -E "(Features|offload)"; then
+ ktap_test_fail "YNL ethtool features info (features output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool features info (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_channels_info()
+{
+ local channels_output
+
+ channels_output=$(ip netns exec "$testns" $ynl_ethtool --show-channels "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$channels_output" | grep -q -E "(Channel|Combined|RX|TX)"; then
+ ktap_test_fail "YNL ethtool channels info (channels output missing expected content)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-channels "$NSIM_DEV_NAME" combined-count 1 2>/dev/null; then
+ ktap_test_fail "YNL ethtool channels info (set-channels command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool channels info (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_time_stamping()
+{
+ local ts_output
+
+ ts_output=$(ip netns exec "$testns" $ynl_ethtool --show-time-stamping "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$ts_output" | grep -q -E "(Time stamping|timestamping|SOF_TIMESTAMPING)"; then
+ ktap_test_fail "YNL ethtool time stamping (time stamping output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool time stamping"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+setup()
+{
+ modprobe netdevsim &> /dev/null
+ if ! [ -f /sys/bus/netdevsim/new_device ]; then
+ ktap_skip_all "netdevsim module not available"
+ exit "$KSFT_SKIP"
+ fi
+
+ if ! ip netns add "$testns" 2>/dev/null; then
+ ktap_skip_all "failed to create test namespace"
+ exit "$KSFT_SKIP"
+ fi
+
+ echo "$NSIM_ID 1" | ip netns exec "$testns" tee /sys/bus/netdevsim/new_device >/dev/null 2>&1 || {
+ ktap_skip_all "failed to create netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ local dev
+ dev=$(ip netns exec "$testns" ls /sys/bus/netdevsim/devices/netdevsim$NSIM_ID/net 2>/dev/null | head -1)
+ if [[ -z "$dev" ]]; then
+ ktap_skip_all "failed to find netdevsim device"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -netns "$testns" link set dev "$dev" name "$NSIM_DEV_NAME" 2>/dev/null || {
+ ktap_skip_all "failed to rename netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ ip -netns "$testns" link set dev "$NSIM_DEV_NAME" up 2>/dev/null
+
+ if ! ip -n "$testns" link add "$VETH_A" type veth peer name "$VETH_B" 2>/dev/null; then
+ ktap_skip_all "failed to create veth pair"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -n "$testns" link set "$VETH_A" up 2>/dev/null
+ ip -n "$testns" link set "$VETH_B" up 2>/dev/null
+}
+
+cleanup()
+{
+ ip netns exec "$testns" bash -c "echo $NSIM_ID > /sys/bus/netdevsim/del_device" 2>/dev/null || true
+ ip netns del "$testns" 2>/dev/null || true
+}
+
+# Check if ynl-ethtool command is available
+if ! command -v $ynl_ethtool &>/dev/null && [[ ! -x $ynl_ethtool ]]; then
+ ktap_skip_all "ynl-ethtool command not found: $ynl_ethtool"
+ exit "$KSFT_SKIP"
+fi
+
+trap cleanup EXIT
+
+ktap_print_header
+setup
+ktap_set_plan "${TESTS_NO}"
+
+ethtool_device_info
+ethtool_statistics
+ethtool_ring_params
+ethtool_coalesce_params
+ethtool_pause_params
+ethtool_features_info
+ethtool_channels_info
+ethtool_time_stamping
+
+ktap_finished
diff --git a/tools/net/ynl/ynltool/.gitignore b/tools/net/ynl/ynltool/.gitignore
index f38848dbb0d3..690d399c921a 100644
--- a/tools/net/ynl/ynltool/.gitignore
+++ b/tools/net/ynl/ynltool/.gitignore
@@ -1 +1,2 @@
ynltool
+*.d
diff --git a/tools/net/ynl/ynltool/Makefile b/tools/net/ynl/ynltool/Makefile
index 5edaa0f93237..f5b1de32daa5 100644
--- a/tools/net/ynl/ynltool/Makefile
+++ b/tools/net/ynl/ynltool/Makefile
@@ -31,7 +31,7 @@ Q = @
$(YNLTOOL): ../libynl.a $(OBJS)
$(Q)echo -e "\tLINK $@"
- $(Q)$(CC) $(CFLAGS) -o $@ $(OBJS) ../libynl.a -lmnl -lm
+ $(Q)$(CC) $(CFLAGS) -o $@ $(OBJS) ../libynl.a -lm
%.o: %.c ../libynl.a
$(Q)echo -e "\tCC $@"
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 5700516aa84a..2dd5f5a60568 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -354,9 +354,6 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
-FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
-FEATURE_CHECK_LDFLAGS-disassembler-init-styled = -lbfd -lopcodes -ldl
-
CORE_CFLAGS += -fno-omit-frame-pointer
CORE_CFLAGS += -Wall
CORE_CFLAGS += -Wextra
@@ -930,6 +927,8 @@ ifdef BUILD_NONDISTRO
ifeq ($(feature-libbfd), 1)
EXTLIBS += -lbfd -lopcodes
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
+ FEATURE_CHECK_LDFLAGS-disassembler-init-styled = -lbfd -lopcodes -ldl
else
# we are on a system that requires -liberty and (maybe) -lz
# to link against -lbfd; test each case individually here
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 078634461df2..e8962c985d34 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1867,6 +1867,7 @@ static int __cmd_report(bool display_info)
eops.sample = process_sample_event;
eops.comm = perf_event__process_comm;
eops.mmap = perf_event__process_mmap;
+ eops.mmap2 = perf_event__process_mmap2;
eops.namespaces = perf_event__process_namespaces;
eops.tracing_data = perf_event__process_tracing_data;
session = perf_session__new(&data, &eops);
@@ -2023,6 +2024,7 @@ static int __cmd_contention(int argc, const char **argv)
eops.sample = process_sample_event;
eops.comm = perf_event__process_comm;
eops.mmap = perf_event__process_mmap;
+ eops.mmap2 = perf_event__process_mmap2;
eops.tracing_data = perf_event__process_tracing_data;
perf_env__init(&host_env);
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index 7248a74ca2a3..6dd90519f45c 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -13,15 +13,18 @@ cleanup() {
rm -f ${perfdata}
rm -f ${result}
rm -f ${errout}
- trap - EXIT TERM INT
+ trap - EXIT TERM INT ERR
}
trap_cleanup() {
+ if (( $? == 139 )); then #SIGSEGV
+ err=1
+ fi
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit ${err}
}
-trap trap_cleanup EXIT TERM INT
+trap trap_cleanup EXIT TERM INT ERR
check() {
if [ "$(id -u)" != 0 ]; then
@@ -145,7 +148,7 @@ test_aggr_cgroup()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -g -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
+ perf lock con -a -b --lock-cgroup -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -271,7 +274,7 @@ test_cgroup_filter()
return
fi
- perf lock con -a -b -g -E 1 -F wait_total -q -- perf bench sched messaging -p > /dev/null 2> ${result}
+ perf lock con -a -b --lock-cgroup -E 1 -F wait_total -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a cgroup result:" "$(cat "${result}")"
err=1
@@ -279,7 +282,7 @@ test_cgroup_filter()
fi
cgroup=$(cat "${result}" | awk '{ print $3 }')
- perf lock con -a -b -g -E 1 -G "${cgroup}" -q -- perf bench sched messaging -p > /dev/null 2> ${result}
+ perf lock con -a -b --lock-cgroup -E 1 -G "${cgroup}" -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a result with cgroup filter:" "$(cat "${cgroup}")"
err=1
@@ -338,4 +341,5 @@ test_aggr_task_stack_filter
test_cgroup_filter
test_csv_output
+cleanup
exit ${err}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4f2a6e10ed5c..4e12be579140 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1022,12 +1022,9 @@ static int write_bpf_prog_info(struct feat_fd *ff,
down_read(&env->bpf_progs.lock);
- if (env->bpf_progs.infos_cnt == 0)
- goto out;
-
ret = do_write(ff, &env->bpf_progs.infos_cnt,
sizeof(env->bpf_progs.infos_cnt));
- if (ret < 0)
+ if (ret < 0 || env->bpf_progs.infos_cnt == 0)
goto out;
root = &env->bpf_progs.infos;
@@ -1067,13 +1064,10 @@ static int write_bpf_btf(struct feat_fd *ff,
down_read(&env->bpf_progs.lock);
- if (env->bpf_progs.btfs_cnt == 0)
- goto out;
-
ret = do_write(ff, &env->bpf_progs.btfs_cnt,
sizeof(env->bpf_progs.btfs_cnt));
- if (ret < 0)
+ if (ret < 0 || env->bpf_progs.btfs_cnt == 0)
goto out;
root = &env->bpf_progs.btfs;
diff --git a/tools/perf/util/libbfd.c b/tools/perf/util/libbfd.c
index 01147fbf73b3..6434c2dccd4a 100644
--- a/tools/perf/util/libbfd.c
+++ b/tools/perf/util/libbfd.c
@@ -38,6 +38,39 @@ struct a2l_data {
asymbol **syms;
};
+static bool perf_bfd_lock(void *bfd_mutex)
+{
+ mutex_lock(bfd_mutex);
+ return true;
+}
+
+static bool perf_bfd_unlock(void *bfd_mutex)
+{
+ mutex_unlock(bfd_mutex);
+ return true;
+}
+
+static void perf_bfd_init(void)
+{
+ static struct mutex bfd_mutex;
+
+ mutex_init_recursive(&bfd_mutex);
+
+ if (bfd_init() != BFD_INIT_MAGIC) {
+ pr_err("Error initializing libbfd\n");
+ return;
+ }
+ if (!bfd_thread_init(perf_bfd_lock, perf_bfd_unlock, &bfd_mutex))
+ pr_err("Error initializing libbfd threading\n");
+}
+
+static void ensure_bfd_init(void)
+{
+ static pthread_once_t bfd_init_once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&bfd_init_once, perf_bfd_init);
+}
+
static int bfd_error(const char *string)
{
const char *errmsg;
@@ -132,6 +165,7 @@ static struct a2l_data *addr2line_init(const char *path)
bfd *abfd;
struct a2l_data *a2l = NULL;
+ ensure_bfd_init();
abfd = bfd_openr(path, NULL);
if (abfd == NULL)
return NULL;
@@ -288,6 +322,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
bfd *abfd;
u64 start, len;
+ ensure_bfd_init();
abfd = bfd_openr(debugfile, NULL);
if (!abfd)
return -1;
@@ -393,6 +428,7 @@ int libbfd__read_build_id(const char *filename, struct build_id *bid, bool block
if (fd < 0)
return -1;
+ ensure_bfd_init();
abfd = bfd_fdopenr(filename, /*target=*/NULL, fd);
if (!abfd)
return -1;
@@ -421,6 +457,7 @@ int libbfd_filename__read_debuglink(const char *filename, char *debuglink,
asection *section;
bfd *abfd;
+ ensure_bfd_init();
abfd = bfd_openr(filename, NULL);
if (!abfd)
return -1;
@@ -480,6 +517,7 @@ int symbol__disassemble_bpf_libbfd(struct symbol *sym __maybe_unused,
memset(tpath, 0, sizeof(tpath));
perf_exe(tpath, sizeof(tpath));
+ ensure_bfd_init();
bfdf = bfd_openr(tpath, NULL);
if (bfdf == NULL)
abort();
diff --git a/tools/perf/util/mutex.c b/tools/perf/util/mutex.c
index bca7f0717f35..7aa1f3f55a7d 100644
--- a/tools/perf/util/mutex.c
+++ b/tools/perf/util/mutex.c
@@ -17,7 +17,7 @@ static void check_err(const char *fn, int err)
#define CHECK_ERR(err) check_err(__func__, err)
-static void __mutex_init(struct mutex *mtx, bool pshared)
+static void __mutex_init(struct mutex *mtx, bool pshared, bool recursive)
{
pthread_mutexattr_t attr;
@@ -27,21 +27,27 @@ static void __mutex_init(struct mutex *mtx, bool pshared)
/* In normal builds enable error checking, such as recursive usage. */
CHECK_ERR(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
#endif
+ if (recursive)
+ CHECK_ERR(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
if (pshared)
CHECK_ERR(pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
-
CHECK_ERR(pthread_mutex_init(&mtx->lock, &attr));
CHECK_ERR(pthread_mutexattr_destroy(&attr));
}
void mutex_init(struct mutex *mtx)
{
- __mutex_init(mtx, /*pshared=*/false);
+ __mutex_init(mtx, /*pshared=*/false, /*recursive=*/false);
}
void mutex_init_pshared(struct mutex *mtx)
{
- __mutex_init(mtx, /*pshared=*/true);
+ __mutex_init(mtx, /*pshared=*/true, /*recursive=*/false);
+}
+
+void mutex_init_recursive(struct mutex *mtx)
+{
+ __mutex_init(mtx, /*pshared=*/false, /*recursive=*/true);
}
void mutex_destroy(struct mutex *mtx)
diff --git a/tools/perf/util/mutex.h b/tools/perf/util/mutex.h
index 38458f00846f..70232d8d094f 100644
--- a/tools/perf/util/mutex.h
+++ b/tools/perf/util/mutex.h
@@ -104,6 +104,8 @@ void mutex_init(struct mutex *mtx);
* process-private attribute.
*/
void mutex_init_pshared(struct mutex *mtx);
+/* Initializes a mutex that may be recursively held on the same thread. */
+void mutex_init_recursive(struct mutex *mtx);
void mutex_destroy(struct mutex *mtx);
void mutex_lock(struct mutex *mtx) EXCLUSIVE_LOCK_FUNCTION(*mtx);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index fcd2f9bf78c9..558839e3c185 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -50,6 +50,7 @@ CONFIG_IPV6_SIT=y
CONFIG_IPV6_TUNNEL=y
CONFIG_KEYS=y
CONFIG_LIRC=y
+CONFIG_LIVEPATCH=y
CONFIG_LWTUNNEL=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SRCVERSION_ALL=y
@@ -111,6 +112,8 @@ CONFIG_IP6_NF_FILTER=y
CONFIG_NF_NAT=y
CONFIG_PACKET=y
CONFIG_RC_CORE=y
+CONFIG_SAMPLES=y
+CONFIG_SAMPLE_LIVEPATCH=m
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SYN_COOKIES=y
diff --git a/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
new file mode 100644
index 000000000000..72aa5376c30e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "testing_helpers.h"
+#include "livepatch_trampoline.skel.h"
+
+static int load_livepatch(void)
+{
+ char path[4096];
+
+ /* CI will set KBUILD_OUTPUT */
+ snprintf(path, sizeof(path), "%s/samples/livepatch/livepatch-sample.ko",
+ getenv("KBUILD_OUTPUT") ? : "../../../..");
+
+ return load_module(path, env_verbosity > VERBOSE_NONE);
+}
+
+static void unload_livepatch(void)
+{
+ /* Disable the livepatch before unloading the module */
+ system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled");
+
+ unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE);
+}
+
+static void read_proc_cmdline(void)
+{
+ char buf[4096];
+ int fd, ret;
+
+ fd = open("/proc/cmdline", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "open /proc/cmdline"))
+ return;
+
+ ret = read(fd, buf, sizeof(buf));
+ if (!ASSERT_GT(ret, 0, "read /proc/cmdline"))
+ goto out;
+
+ ASSERT_OK(strncmp(buf, "this has been live patched", 26), "strncmp");
+
+out:
+ close(fd);
+}
+
+static void __test_livepatch_trampoline(bool fexit_first)
+{
+ struct livepatch_trampoline *skel = NULL;
+ int err;
+
+ skel = livepatch_trampoline__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ skel->bss->my_pid = getpid();
+
+ if (!fexit_first) {
+ /* fentry program is loaded first by default */
+ err = livepatch_trampoline__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+ } else {
+ /* Manually load fexit program first. */
+ skel->links.fexit_cmdline = bpf_program__attach(skel->progs.fexit_cmdline);
+ if (!ASSERT_OK_PTR(skel->links.fexit_cmdline, "attach_fexit"))
+ goto out;
+
+ skel->links.fentry_cmdline = bpf_program__attach(skel->progs.fentry_cmdline);
+ if (!ASSERT_OK_PTR(skel->links.fentry_cmdline, "attach_fentry"))
+ goto out;
+ }
+
+ read_proc_cmdline();
+
+ ASSERT_EQ(skel->bss->fentry_hit, 1, "fentry_hit");
+ ASSERT_EQ(skel->bss->fexit_hit, 1, "fexit_hit");
+out:
+ livepatch_trampoline__destroy(skel);
+}
+
+void test_livepatch_trampoline(void)
+{
+ int retry_cnt = 0;
+
+retry:
+ if (load_livepatch()) {
+ if (retry_cnt) {
+ ASSERT_OK(1, "load_livepatch");
+ goto out;
+ }
+ /*
+ * Something else (previous run of the same test?) loaded
+ * the KLP module. Unload the KLP module and retry.
+ */
+ unload_livepatch();
+ retry_cnt++;
+ goto retry;
+ }
+
+ if (test__start_subtest("fentry_first"))
+ __test_livepatch_trampoline(false);
+
+ if (test__start_subtest("fexit_first"))
+ __test_livepatch_trampoline(true);
+out:
+ unload_livepatch();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index f8eb7f9d4fd2..8fade8bdc451 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -6,11 +6,13 @@
#include <netinet/in.h>
#include <test_progs.h>
#include <unistd.h>
+#include <errno.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_sock.skel.h"
#include "mptcpify.skel.h"
#include "mptcp_subflow.skel.h"
+#include "mptcp_sockmap.skel.h"
#define NS_TEST "mptcp_ns"
#define ADDR_1 "10.0.1.1"
@@ -436,6 +438,142 @@ close_cgroup:
close(cgroup_fd);
}
+/* Test sockmap on MPTCP server handling non-mp-capable clients. */
+static void test_sockmap_with_mptcp_fallback(struct mptcp_sockmap *skel)
+{
+ int listen_fd = -1, client_fd1 = -1, client_fd2 = -1;
+ int server_fd1 = -1, server_fd2 = -1, sent, recvd;
+ char snd[9] = "123456789";
+ char rcv[10];
+
+ /* start server with MPTCP enabled */
+ listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_OK_FD(listen_fd, "sockmap-fb:start_mptcp_server"))
+ return;
+
+ skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
+ skel->bss->sk_index = 0;
+ /* create client without MPTCP enabled */
+ client_fd1 = connect_to_fd_opts(listen_fd, NULL);
+ if (!ASSERT_OK_FD(client_fd1, "sockmap-fb:connect_to_fd"))
+ goto end;
+
+ server_fd1 = accept(listen_fd, NULL, 0);
+ skel->bss->sk_index = 1;
+ client_fd2 = connect_to_fd_opts(listen_fd, NULL);
+ if (!ASSERT_OK_FD(client_fd2, "sockmap-fb:connect_to_fd"))
+ goto end;
+
+ server_fd2 = accept(listen_fd, NULL, 0);
+ /* test normal redirect behavior: data sent by client_fd1 can be
+ * received by client_fd2
+ */
+ skel->bss->redirect_idx = 1;
+ sent = send(client_fd1, snd, sizeof(snd), 0);
+ if (!ASSERT_EQ(sent, sizeof(snd), "sockmap-fb:send(client_fd1)"))
+ goto end;
+
+ /* try to recv more bytes to avoid truncation check */
+ recvd = recv(client_fd2, rcv, sizeof(rcv), 0);
+ if (!ASSERT_EQ(recvd, sizeof(snd), "sockmap-fb:recv(client_fd2)"))
+ goto end;
+
+end:
+ if (client_fd1 >= 0)
+ close(client_fd1);
+ if (client_fd2 >= 0)
+ close(client_fd2);
+ if (server_fd1 >= 0)
+ close(server_fd1);
+ if (server_fd2 >= 0)
+ close(server_fd2);
+ close(listen_fd);
+}
+
+/* Test sockmap rejection of MPTCP sockets - both server and client sides. */
+static void test_sockmap_reject_mptcp(struct mptcp_sockmap *skel)
+{
+ int listen_fd = -1, server_fd = -1, client_fd1 = -1;
+ int err, zero = 0;
+
+ /* start server with MPTCP enabled */
+ listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_OK_FD(listen_fd, "start_mptcp_server"))
+ return;
+
+ skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
+ skel->bss->sk_index = 0;
+ /* create client with MPTCP enabled */
+ client_fd1 = connect_to_fd(listen_fd, 0);
+ if (!ASSERT_OK_FD(client_fd1, "connect_to_fd client_fd1"))
+ goto end;
+
+ /* bpf_sock_map_update() called from sockops should reject MPTCP sk */
+ if (!ASSERT_EQ(skel->bss->helper_ret, -EOPNOTSUPP, "should reject"))
+ goto end;
+
+ server_fd = accept(listen_fd, NULL, 0);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
+ &zero, &server_fd, BPF_NOEXIST);
+ if (!ASSERT_EQ(err, -EOPNOTSUPP, "server should be disallowed"))
+ goto end;
+
+ /* MPTCP client should also be disallowed */
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
+ &zero, &client_fd1, BPF_NOEXIST);
+ if (!ASSERT_EQ(err, -EOPNOTSUPP, "client should be disallowed"))
+ goto end;
+end:
+ if (client_fd1 >= 0)
+ close(client_fd1);
+ if (server_fd >= 0)
+ close(server_fd);
+ close(listen_fd);
+}
+
+static void test_mptcp_sockmap(void)
+{
+ struct mptcp_sockmap *skel;
+ struct netns_obj *netns;
+ int cgroup_fd, err;
+
+ cgroup_fd = test__join_cgroup("/mptcp_sockmap");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_sockmap"))
+ return;
+
+ skel = mptcp_sockmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_sockmap"))
+ goto close_cgroup;
+
+ skel->links.mptcp_sockmap_inject =
+ bpf_program__attach_cgroup(skel->progs.mptcp_sockmap_inject, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.mptcp_sockmap_inject, "attach sockmap"))
+ goto skel_destroy;
+
+ err = bpf_prog_attach(bpf_program__fd(skel->progs.mptcp_sockmap_redirect),
+ bpf_map__fd(skel->maps.sock_map),
+ BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach stream verdict"))
+ goto skel_destroy;
+
+ netns = netns_new(NS_TEST, true);
+ if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_sockmap"))
+ goto skel_destroy;
+
+ if (endpoint_init("subflow") < 0)
+ goto close_netns;
+
+ test_sockmap_with_mptcp_fallback(skel);
+ test_sockmap_reject_mptcp(skel);
+
+close_netns:
+ netns_free(netns);
+skel_destroy:
+ mptcp_sockmap__destroy(skel);
+close_cgroup:
+ close(cgroup_fd);
+}
+
void test_mptcp(void)
{
if (test__start_subtest("base"))
@@ -444,4 +582,6 @@ void test_mptcp(void)
test_mptcpify();
if (test__start_subtest("subflow"))
test_subflow();
+ if (test__start_subtest("sockmap"))
+ test_mptcp_sockmap();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
new file mode 100644
index 000000000000..c9efdd2a5b18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_ips.skel.h"
+
+#ifdef __x86_64__
+static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
+{
+ __u64 ips[PERF_MAX_STACK_DEPTH];
+ struct ksyms *ksyms = NULL;
+ int i, err = 0;
+ va_list args;
+
+ /* sorted by addr */
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
+ return -1;
+
+ /* unlikely, but... */
+ if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
+ return -1;
+
+ err = bpf_map_lookup_elem(fd, &key, ips);
+ if (err)
+ goto out;
+
+ /*
+ * Compare all symbols provided via arguments with stacktrace ips,
+ * and their related symbol addresses.t
+ */
+ va_start(args, cnt);
+
+ for (i = 0; i < cnt; i++) {
+ unsigned long val;
+ struct ksym *ksym;
+
+ val = va_arg(args, unsigned long);
+ ksym = ksym_search_local(ksyms, ips[i]);
+ if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
+ break;
+ ASSERT_EQ(ksym->addr, val, "stack_cmp");
+ }
+
+ va_end(args);
+
+out:
+ free_kallsyms_local(ksyms);
+ return err;
+}
+
+static void test_stacktrace_ips_kprobe_multi(bool retprobe)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
+ .retprobe = retprobe
+ );
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct stacktrace_ips *skel;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.kprobe_multi_test,
+ "bpf_testmod_stacktrace_test", &opts);
+ if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void test_stacktrace_ips_raw_tp(void)
+{
+ __u32 info_len = sizeof(struct bpf_prog_info);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_prog_info info = {};
+ struct stacktrace_ips *skel;
+ __u64 bpf_prog_ksym = 0;
+ int err;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.rawtp_test = bpf_program__attach_raw_tracepoint(
+ skel->progs.rawtp_test,
+ "bpf_testmod_test_read");
+ if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint"))
+ goto cleanup;
+
+ /* get bpf program address */
+ info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym);
+ info.nr_jited_ksyms = 1;
+ err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test),
+ &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2,
+ bpf_prog_ksym,
+ ksym_get_addr("bpf_trace_run2"));
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void __test_stacktrace_ips(void)
+{
+ if (test__start_subtest("kprobe_multi"))
+ test_stacktrace_ips_kprobe_multi(false);
+ if (test__start_subtest("kretprobe_multi"))
+ test_stacktrace_ips_kprobe_multi(true);
+ if (test__start_subtest("raw_tp"))
+ test_stacktrace_ips_raw_tp();
+}
+#else
+static void __test_stacktrace_ips(void)
+{
+ test__skip();
+}
+#endif
+
+void test_stacktrace_ips(void)
+{
+ __test_stacktrace_ips();
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
index 164640db3a29..b1e509b231cd 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
@@ -99,13 +99,13 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_retransmit_timer.expires;
+ timer_expires = sp->tcp_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_retransmit_timer.expires;
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = sp->tcp_retransmit_timer.expires;
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
index 591c703f5032..dbc7166aee91 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
@@ -99,13 +99,13 @@ static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_retransmit_timer.expires;
+ timer_expires = sp->tcp_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_retransmit_timer.expires;
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = sp->tcp_retransmit_timer.expires;
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
diff --git a/tools/testing/selftests/bpf/progs/iters_looping.c b/tools/testing/selftests/bpf/progs/iters_looping.c
index 05fa5ce7fc59..d00fd570255a 100644
--- a/tools/testing/selftests/bpf/progs/iters_looping.c
+++ b/tools/testing/selftests/bpf/progs/iters_looping.c
@@ -161,3 +161,56 @@ int simplest_loop(void *ctx)
return 0;
}
+
+__used
+static void iterator_with_diff_stack_depth(int x)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "if r1 == 42 goto 0f;"
+ "*(u64 *)(r10 - 128) = 0;"
+ "0:"
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ /* consume next item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "goto 1b;"
+ "2:"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("socket")
+__success
+__naked int widening_stack_size_bug(void *ctx)
+{
+ /*
+ * Depending on iterator_with_diff_stack_depth() parameter value,
+ * subprogram stack depth is either 8 or 128 bytes. Arrange values so
+ * that it is 128 on a first call and 8 on a second. This triggered a
+ * bug in verifier's widen_imprecise_scalars() logic.
+ */
+ asm volatile (
+ "r6 = 0;"
+ "r1 = 0;"
+ "1:"
+ "call iterator_with_diff_stack_depth;"
+ "r1 = 42;"
+ "r6 += 1;"
+ "if r6 < 2 goto 1b;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/livepatch_trampoline.c b/tools/testing/selftests/bpf/progs/livepatch_trampoline.c
new file mode 100644
index 000000000000..15579d5bcd91
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/livepatch_trampoline.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int fentry_hit;
+int fexit_hit;
+int my_pid;
+
+SEC("fentry/cmdline_proc_show")
+int BPF_PROG(fentry_cmdline)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ fentry_hit = 1;
+ return 0;
+}
+
+SEC("fexit/cmdline_proc_show")
+int BPF_PROG(fexit_cmdline)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ fexit_hit = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_sockmap.c b/tools/testing/selftests/bpf/progs/mptcp_sockmap.c
new file mode 100644
index 000000000000..d4eef0cbadb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_sockmap.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+int sk_index;
+int redirect_idx;
+int trace_port;
+int helper_ret;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, 100);
+} sock_map SEC(".maps");
+
+SEC("sockops")
+int mptcp_sockmap_inject(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *sk;
+
+ /* only accept specified connection */
+ if (skops->local_port != trace_port ||
+ skops->op != BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB)
+ return 1;
+
+ sk = skops->sk;
+ if (!sk)
+ return 1;
+
+ /* update sk handler */
+ helper_ret = bpf_sock_map_update(skops, &sock_map, &sk_index, BPF_NOEXIST);
+
+ return 1;
+}
+
+SEC("sk_skb/stream_verdict")
+int mptcp_sockmap_redirect(struct __sk_buff *skb)
+{
+ /* redirect skb to the sk under sock_map[redirect_idx] */
+ return bpf_sk_redirect_map(skb, &sock_map, redirect_idx, 0);
+}
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_ips.c b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
new file mode 100644
index 000000000000..a96c8150d7f5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
+
+typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, 16384);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+extern bool CONFIG_UNWINDER_ORC __kconfig __weak;
+
+/*
+ * This function is here to have CONFIG_UNWINDER_ORC
+ * used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_UNWINDER_ORC ? 0 : 1;
+}
+
+__u32 stack_key;
+
+SEC("kprobe.multi")
+int kprobe_multi_test(struct pt_regs *ctx)
+{
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
+SEC("raw_tp/bpf_testmod_test_read")
+int rawtp_test(void *ctx)
+{
+ /* Skip ebpf program entry in the stack. */
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c
index b4a0d0cc8ec8..3662515f0107 100644
--- a/tools/testing/selftests/bpf/progs/stream_fail.c
+++ b/tools/testing/selftests/bpf/progs/stream_fail.c
@@ -10,7 +10,7 @@ SEC("syscall")
__failure __msg("Possibly NULL pointer passed")
int stream_vprintk_null_arg(void *ctx)
{
- bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL);
+ bpf_stream_vprintk_impl(BPF_STDOUT, "", NULL, 0, NULL);
return 0;
}
@@ -18,7 +18,7 @@ SEC("syscall")
__failure __msg("R3 type=scalar expected=")
int stream_vprintk_scalar_arg(void *ctx)
{
- bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL);
+ bpf_stream_vprintk_impl(BPF_STDOUT, "", (void *)46, 0, NULL);
return 0;
}
@@ -26,7 +26,7 @@ SEC("syscall")
__failure __msg("arg#1 doesn't point to a const string")
int stream_vprintk_string_arg(void *ctx)
{
- bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL);
+ bpf_stream_vprintk_impl(BPF_STDOUT, ctx, NULL, 0, NULL);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_work.c b/tools/testing/selftests/bpf/progs/task_work.c
index 23217f06a3ec..663a80990f8f 100644
--- a/tools/testing/selftests/bpf/progs/task_work.c
+++ b/tools/testing/selftests/bpf/progs/task_work.c
@@ -66,7 +66,7 @@ int oncpu_hash_map(struct pt_regs *args)
if (!work)
return 0;
- bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
return 0;
}
@@ -80,7 +80,7 @@ int oncpu_array_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
- bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL);
+ bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
return 0;
}
@@ -102,6 +102,6 @@ int oncpu_lru_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&lrumap, &key);
if (!work || work->data[0])
return 0;
- bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL);
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_work_fail.c b/tools/testing/selftests/bpf/progs/task_work_fail.c
index 77fe8f28facd..1270953fd092 100644
--- a/tools/testing/selftests/bpf/progs/task_work_fail.c
+++ b/tools/testing/selftests/bpf/progs/task_work_fail.c
@@ -53,7 +53,7 @@ int mismatch_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
- bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
return 0;
}
@@ -65,7 +65,7 @@ int no_map_task_work(struct pt_regs *args)
struct bpf_task_work tw;
task = bpf_get_current_task_btf();
- bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL);
+ bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
return 0;
}
@@ -76,7 +76,7 @@ int task_work_null(struct pt_regs *args)
struct task_struct *task;
task = bpf_get_current_task_btf();
- bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL);
+ bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
return 0;
}
@@ -91,6 +91,6 @@ int map_null(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
- bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL);
+ bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_work_stress.c b/tools/testing/selftests/bpf/progs/task_work_stress.c
index 90fca06fff56..55e555f7f41b 100644
--- a/tools/testing/selftests/bpf/progs/task_work_stress.c
+++ b/tools/testing/selftests/bpf/progs/task_work_stress.c
@@ -51,8 +51,8 @@ int schedule_task_work(void *ctx)
if (!work)
return 0;
}
- err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
- process_work, NULL);
+ err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
+ process_work, NULL);
if (err)
__sync_fetch_and_add(&schedule_error, 1);
else
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index 8eeebaa951f0..1669a7eeda26 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -417,6 +417,30 @@ noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
}
+noinline void bpf_testmod_stacktrace_test(void)
+{
+ /* used for stacktrace test as attach function */
+ asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_3(void)
+{
+ bpf_testmod_stacktrace_test();
+ asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_2(void)
+{
+ bpf_testmod_stacktrace_test_3();
+ asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_1(void)
+{
+ bpf_testmod_stacktrace_test_2();
+ asm volatile ("");
+}
+
int bpf_testmod_fentry_ok;
noinline ssize_t
@@ -497,6 +521,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
21, 22, 23, 24, 25, 26) != 231)
goto out;
+ bpf_testmod_stacktrace_test_1();
+
bpf_testmod_fentry_ok = 1;
out:
return -EIO; /* always fail */
diff --git a/tools/testing/selftests/drivers/net/.gitignore b/tools/testing/selftests/drivers/net/.gitignore
index 585ecb4d5dc4..3633c7a3ed65 100644
--- a/tools/testing/selftests/drivers/net/.gitignore
+++ b/tools/testing/selftests/drivers/net/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+gro
napi_id_helper
psp_responder
diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile
index 33f4816216ec..f5c71d993750 100644
--- a/tools/testing/selftests/drivers/net/Makefile
+++ b/tools/testing/selftests/drivers/net/Makefile
@@ -6,10 +6,12 @@ TEST_INCLUDES := $(wildcard lib/py/*.py) \
../../net/lib.sh \
TEST_GEN_FILES := \
+ gro \
napi_id_helper \
# end of TEST_GEN_FILES
TEST_PROGS := \
+ gro.py \
hds.py \
napi_id.py \
napi_threaded.py \
diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/drivers/net/gro.c
index cfc39f70635d..995b492f5bcb 100644
--- a/tools/testing/selftests/net/gro.c
+++ b/tools/testing/selftests/drivers/net/gro.c
@@ -57,7 +57,8 @@
#include <string.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "../../kselftest.h"
+#include "../../net/lib/ksft.h"
#define DPORT 8000
#define SPORT 1500
@@ -1127,6 +1128,8 @@ static void gro_receiver(void)
set_timeout(rxfd);
bind_packetsocket(rxfd);
+ ksft_ready();
+
memset(correct_payload, 0, sizeof(correct_payload));
if (strcmp(testname, "data") == 0) {
diff --git a/tools/testing/selftests/drivers/net/gro.py b/tools/testing/selftests/drivers/net/gro.py
new file mode 100755
index 000000000000..ba83713bf7b5
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/gro.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+GRO (Generic Receive Offload) conformance tests.
+
+Validates that GRO coalescing works correctly by running the gro
+binary in different configurations and checking for correct packet
+coalescing behavior.
+
+Test cases:
+ - data: Data packets with same size/headers and correct seq numbers coalesce
+ - ack: Pure ACK packets do not coalesce
+ - flags: Packets with PSH, SYN, URG, RST flags do not coalesce
+ - tcp: Packets with incorrect checksum, non-consecutive seqno don't coalesce
+ - ip: Packets with different ECN, TTL, TOS, or IP options don't coalesce
+ - large: Packets larger than GRO_MAX_SIZE don't coalesce
+"""
+
+import os
+from lib.py import ksft_run, ksft_exit, ksft_pr
+from lib.py import NetDrvEpEnv, KsftXfailEx
+from lib.py import cmd, defer, bkg, ip
+from lib.py import ksft_variants
+
+
+def _resolve_dmac(cfg, ipver):
+ """
+ Find the destination MAC address remote host should use to send packets
+ towards the local host. It may be a router / gateway address.
+ """
+
+ attr = "dmac" + ipver
+ # Cache the response across test cases
+ if hasattr(cfg, attr):
+ return getattr(cfg, attr)
+
+ route = ip(f"-{ipver} route get {cfg.addr_v[ipver]}",
+ json=True, host=cfg.remote)[0]
+ gw = route.get("gateway")
+ # Local L2 segment, address directly
+ if not gw:
+ setattr(cfg, attr, cfg.dev['address'])
+ return getattr(cfg, attr)
+
+ # ping to make sure neighbor is resolved,
+ # bind to an interface, for v6 the GW is likely link local
+ cmd(f"ping -c1 -W0 -I{cfg.remote_ifname} {gw}", host=cfg.remote)
+
+ neigh = ip(f"neigh get {gw} dev {cfg.remote_ifname}",
+ json=True, host=cfg.remote)[0]
+ setattr(cfg, attr, neigh['lladdr'])
+ return getattr(cfg, attr)
+
+
+def _write_defer_restore(cfg, path, val, defer_undo=False):
+ with open(path, "r", encoding="utf-8") as fp:
+ orig_val = fp.read().strip()
+ if str(val) == orig_val:
+ return
+ with open(path, "w", encoding="utf-8") as fp:
+ fp.write(val)
+ if defer_undo:
+ defer(_write_defer_restore, cfg, path, orig_val)
+
+
+def _set_mtu_restore(dev, mtu, host):
+ if dev['mtu'] < mtu:
+ ip(f"link set dev {dev['ifname']} mtu {mtu}", host=host)
+ defer(ip, f"link set dev {dev['ifname']} mtu {dev['mtu']}", host=host)
+
+
+def _setup(cfg, test_name):
+ """ Setup hardware loopback mode for GRO testing. """
+
+ if not hasattr(cfg, "bin_remote"):
+ cfg.bin_local = cfg.test_dir / "gro"
+ cfg.bin_remote = cfg.remote.deploy(cfg.bin_local)
+
+ # "large" test needs at least 4k MTU
+ if test_name == "large":
+ _set_mtu_restore(cfg.dev, 4096, None)
+ _set_mtu_restore(cfg.remote_dev, 4096, cfg.remote)
+
+ flush_path = f"/sys/class/net/{cfg.ifname}/gro_flush_timeout"
+ irq_path = f"/sys/class/net/{cfg.ifname}/napi_defer_hard_irqs"
+
+ _write_defer_restore(cfg, flush_path, "200000", defer_undo=True)
+ _write_defer_restore(cfg, irq_path, "10", defer_undo=True)
+
+ try:
+ # Disable TSO for local tests
+ cfg.require_nsim() # will raise KsftXfailEx if not running on nsim
+
+ cmd(f"ethtool -K {cfg.ifname} gro on tso off")
+ cmd(f"ethtool -K {cfg.remote_ifname} gro on tso off", host=cfg.remote)
+ except KsftXfailEx:
+ pass
+
+def _gro_variants():
+ """Generator that yields all combinations of protocol and test types."""
+
+ for protocol in ["ipv4", "ipv6", "ipip"]:
+ for test_name in ["data", "ack", "flags", "tcp", "ip", "large"]:
+ yield protocol, test_name
+
+
+@ksft_variants(_gro_variants())
+def test(cfg, protocol, test_name):
+ """Run a single GRO test with retries."""
+
+ ipver = "6" if protocol[-1] == "6" else "4"
+ cfg.require_ipver(ipver)
+
+ _setup(cfg, test_name)
+
+ base_cmd_args = [
+ f"--{protocol}",
+ f"--dmac {_resolve_dmac(cfg, ipver)}",
+ f"--smac {cfg.remote_dev['address']}",
+ f"--daddr {cfg.addr_v[ipver]}",
+ f"--saddr {cfg.remote_addr_v[ipver]}",
+ f"--test {test_name}",
+ "--verbose"
+ ]
+ base_args = " ".join(base_cmd_args)
+
+ # Each test is run 6 times to deflake, because given the receive timing,
+ # not all packets that should coalesce will be considered in the same flow
+ # on every try.
+ max_retries = 6
+ for attempt in range(max_retries):
+ rx_cmd = f"{cfg.bin_local} {base_args} --rx --iface {cfg.ifname}"
+ tx_cmd = f"{cfg.bin_remote} {base_args} --iface {cfg.remote_ifname}"
+
+ fail_now = attempt >= max_retries - 1
+
+ with bkg(rx_cmd, ksft_ready=True, exit_wait=True,
+ fail=fail_now) as rx_proc:
+ cmd(tx_cmd, host=cfg.remote)
+
+ if rx_proc.ret == 0:
+ return
+
+ ksft_pr(rx_proc.stdout.strip().replace('\n', '\n# '))
+ ksft_pr(rx_proc.stderr.strip().replace('\n', '\n# '))
+
+ if test_name == "large" and os.environ.get("KSFT_MACHINE_SLOW"):
+ ksft_pr(f"Ignoring {protocol}/{test_name} failure due to slow environment")
+ return
+
+ ksft_pr(f"Attempt {attempt + 1}/{max_retries} failed, retrying...")
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEpEnv(__file__) as cfg:
+ ksft_run(cases=[test], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/.gitignore b/tools/testing/selftests/drivers/net/hw/.gitignore
index 6942bf575497..46540468a775 100644
--- a/tools/testing/selftests/drivers/net/hw/.gitignore
+++ b/tools/testing/selftests/drivers/net/hw/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
iou-zcrx
ncdevmem
+toeplitz
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index 8133d1a0051c..7c819fdfa107 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -1,6 +1,21 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-TEST_GEN_FILES = iou-zcrx
+# Check if io_uring supports zero-copy receive
+HAS_IOURING_ZCRX := $(shell \
+ echo -e '#include <liburing.h>\n' \
+ 'void *func = (void *)io_uring_register_ifq;\n' \
+ 'int main() {return 0;}' | \
+ $(CC) -luring -x c - -o /dev/null 2>&1 && echo y)
+
+ifeq ($(HAS_IOURING_ZCRX),y)
+COND_GEN_FILES += iou-zcrx
+else
+$(warning excluding iouring tests, liburing not installed or too old)
+endif
+
+TEST_GEN_FILES := \
+ $(COND_GEN_FILES) \
+# end of TEST_GEN_FILES
TEST_PROGS = \
csum.py \
@@ -21,6 +36,7 @@ TEST_PROGS = \
rss_ctx.py \
rss_flow_label.py \
rss_input_xfrm.py \
+ toeplitz.py \
tso.py \
xsk_reconfig.py \
#
@@ -38,7 +54,10 @@ TEST_INCLUDES := \
#
# YNL files, must be before "include ..lib.mk"
-YNL_GEN_FILES := ncdevmem
+YNL_GEN_FILES := \
+ ncdevmem \
+ toeplitz \
+# end of YNL_GEN_FILES
TEST_GEN_FILES += $(YNL_GEN_FILES)
TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c))
@@ -54,4 +73,6 @@ include ../../../net/ynl.mk
include ../../../net/bpf.mk
+ifeq ($(HAS_IOURING_ZCRX),y)
$(OUTPUT)/iou-zcrx: LDLIBS += -luring
+endif
diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
index fb010a48a5a1..0c61debf86fb 100644
--- a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
@@ -25,7 +25,7 @@ try:
fd_read_timeout, ip, rand_port, wait_port_listen, wait_file
from net.lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
from net.lib.py import ksft_disruptive, ksft_exit, ksft_pr, ksft_run, \
- ksft_setup
+ ksft_setup, ksft_variants, KsftNamedVariant
from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
ksft_ne, ksft_not_in, ksft_raises, ksft_true, ksft_gt, ksft_not_none
from drivers.net.lib.py import GenerateTraffic, Remote
@@ -40,7 +40,7 @@ try:
"wait_port_listen", "wait_file",
"KsftSkipEx", "KsftFailEx", "KsftXfailEx",
"ksft_disruptive", "ksft_exit", "ksft_pr", "ksft_run",
- "ksft_setup",
+ "ksft_setup", "ksft_variants", "KsftNamedVariant",
"ksft_eq", "ksft_ge", "ksft_in", "ksft_is", "ksft_lt",
"ksft_ne", "ksft_not_in", "ksft_raises", "ksft_true", "ksft_gt",
"ksft_not_none", "ksft_not_none",
diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/drivers/net/hw/toeplitz.c
index 9ba03164d73a..a4d04438c313 100644
--- a/tools/testing/selftests/net/toeplitz.c
+++ b/tools/testing/selftests/drivers/net/hw/toeplitz.c
@@ -52,7 +52,11 @@
#include <sys/types.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include <ynl.h>
+#include "ethtool-user.h"
+
+#include "../../../kselftest.h"
+#include "../../../net/lib/ksft.h"
#define TOEPLITZ_KEY_MIN_LEN 40
#define TOEPLITZ_KEY_MAX_LEN 60
@@ -64,6 +68,7 @@
#define FOUR_TUPLE_MAX_LEN ((sizeof(struct in6_addr) * 2) + (sizeof(uint16_t) * 2))
#define RSS_MAX_CPUS (1 << 16) /* real constraint is PACKET_FANOUT_MAX */
+#define RSS_MAX_INDIR (1 << 16)
#define RPS_MAX_CPUS 16UL /* must be a power of 2 */
@@ -101,6 +106,8 @@ struct ring_state {
static unsigned int rx_irq_cpus[RSS_MAX_CPUS]; /* map from rxq to cpu */
static int rps_silo_to_cpu[RPS_MAX_CPUS];
static unsigned char toeplitz_key[TOEPLITZ_KEY_MAX_LEN];
+static unsigned int rss_indir_tbl[RSS_MAX_INDIR];
+static unsigned int rss_indir_tbl_size;
static struct ring_state rings[RSS_MAX_CPUS];
static inline uint32_t toeplitz(const unsigned char *four_tuple,
@@ -129,7 +136,12 @@ static inline uint32_t toeplitz(const unsigned char *four_tuple,
/* Compare computed cpu with arrival cpu from packet_fanout_cpu */
static void verify_rss(uint32_t rx_hash, int cpu)
{
- int queue = rx_hash % cfg_num_queues;
+ int queue;
+
+ if (rss_indir_tbl_size)
+ queue = rss_indir_tbl[rx_hash % rss_indir_tbl_size];
+ else
+ queue = rx_hash % cfg_num_queues;
log_verbose(" rxq %d (cpu %d)", queue, rx_irq_cpus[queue]);
if (rx_irq_cpus[queue] != cpu) {
@@ -482,6 +494,56 @@ static void parse_rps_bitmap(const char *arg)
rps_silo_to_cpu[cfg_num_rps_cpus++] = i;
}
+static void read_rss_dev_info_ynl(void)
+{
+ struct ethtool_rss_get_req *req;
+ struct ethtool_rss_get_rsp *rsp;
+ struct ynl_sock *ys;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, NULL);
+ if (!ys)
+ error(1, errno, "ynl_sock_create failed");
+
+ req = ethtool_rss_get_req_alloc();
+ if (!req)
+ error(1, errno, "ethtool_rss_get_req_alloc failed");
+
+ ethtool_rss_get_req_set_header_dev_name(req, cfg_ifname);
+
+ rsp = ethtool_rss_get(ys, req);
+ if (!rsp)
+ error(1, ys->err.code, "YNL: %s", ys->err.msg);
+
+ if (!rsp->_len.hkey)
+ error(1, 0, "RSS key not available for %s", cfg_ifname);
+
+ if (rsp->_len.hkey < TOEPLITZ_KEY_MIN_LEN ||
+ rsp->_len.hkey > TOEPLITZ_KEY_MAX_LEN)
+ error(1, 0, "RSS key length %u out of bounds [%u, %u]",
+ rsp->_len.hkey, TOEPLITZ_KEY_MIN_LEN,
+ TOEPLITZ_KEY_MAX_LEN);
+
+ memcpy(toeplitz_key, rsp->hkey, rsp->_len.hkey);
+
+ if (rsp->_count.indir > RSS_MAX_INDIR)
+ error(1, 0, "RSS indirection table too large (%u > %u)",
+ rsp->_count.indir, RSS_MAX_INDIR);
+
+ /* If indir table not available we'll fallback to simple modulo math */
+ if (rsp->_count.indir) {
+ memcpy(rss_indir_tbl, rsp->indir,
+ rsp->_count.indir * sizeof(rss_indir_tbl[0]));
+ rss_indir_tbl_size = rsp->_count.indir;
+
+ log_verbose("RSS indirection table size: %u\n",
+ rss_indir_tbl_size);
+ }
+
+ ethtool_rss_get_rsp_free(rsp);
+ ethtool_rss_get_req_free(req);
+ ynl_sock_destroy(ys);
+}
+
static void parse_opts(int argc, char **argv)
{
static struct option long_options[] = {
@@ -550,7 +612,7 @@ static void parse_opts(int argc, char **argv)
}
if (!have_toeplitz)
- error(1, 0, "Must supply rss key ('-k')");
+ read_rss_dev_info_ynl();
num_cpus = get_nprocs();
if (num_cpus > RSS_MAX_CPUS)
@@ -576,6 +638,10 @@ int main(int argc, char **argv)
fd_sink = setup_sink();
setup_rings();
+
+ /* Signal to test framework that we're ready to receive */
+ ksft_ready();
+
process_rings();
cleanup_rings();
diff --git a/tools/testing/selftests/drivers/net/hw/toeplitz.py b/tools/testing/selftests/drivers/net/hw/toeplitz.py
new file mode 100755
index 000000000000..d2db5ee9e358
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/toeplitz.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Toeplitz Rx hashing test:
+ - rxhash (the hash value calculation itself);
+ - RSS mapping from rxhash to rx queue;
+ - RPS mapping from rxhash to cpu.
+"""
+
+import glob
+import os
+import socket
+from lib.py import ksft_run, ksft_exit, ksft_pr
+from lib.py import NetDrvEpEnv, EthtoolFamily, NetdevFamily
+from lib.py import cmd, bkg, rand_port, defer
+from lib.py import ksft_in
+from lib.py import ksft_variants, KsftNamedVariant, KsftSkipEx, KsftFailEx
+
+# "define" for the ID of the Toeplitz hash function
+ETH_RSS_HASH_TOP = 1
+
+
+def _check_rps_and_rfs_not_configured(cfg):
+ """Verify that RPS is not already configured."""
+
+ for rps_file in glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*/rps_cpus"):
+ with open(rps_file, "r", encoding="utf-8") as fp:
+ val = fp.read().strip()
+ if set(val) - {"0", ","}:
+ raise KsftSkipEx(f"RPS already configured on {rps_file}: {val}")
+
+ rfs_file = "/proc/sys/net/core/rps_sock_flow_entries"
+ with open(rfs_file, "r", encoding="utf-8") as fp:
+ val = fp.read().strip()
+ if val != "0":
+ raise KsftSkipEx(f"RFS already configured {rfs_file}: {val}")
+
+
+def _get_cpu_for_irq(irq):
+ with open(f"/proc/irq/{irq}/smp_affinity_list", "r",
+ encoding="utf-8") as fp:
+ data = fp.read().strip()
+ if "," in data or "-" in data:
+ raise KsftFailEx(f"IRQ{irq} is not mapped to a single core: {data}")
+ return int(data)
+
+
+def _get_irq_cpus(cfg):
+ """
+ Read the list of IRQs for the device Rx queues.
+ """
+ queues = cfg.netnl.queue_get({"ifindex": cfg.ifindex}, dump=True)
+ napis = cfg.netnl.napi_get({"ifindex": cfg.ifindex}, dump=True)
+
+ # Remap into ID-based dicts
+ napis = {n["id"]: n for n in napis}
+ queues = {f"{q['type']}{q['id']}": q for q in queues}
+
+ cpus = []
+ for rx in range(9999):
+ name = f"rx{rx}"
+ if name not in queues:
+ break
+ cpus.append(_get_cpu_for_irq(napis[queues[name]["napi-id"]]["irq"]))
+
+ return cpus
+
+
+def _get_unused_cpus(cfg, count=2):
+ """
+ Get CPUs that are not used by Rx queues.
+ Returns a list of at least 'count' CPU numbers.
+ """
+
+ # Get CPUs used by Rx queues
+ rx_cpus = set(_get_irq_cpus(cfg))
+
+ # Get total number of CPUs
+ num_cpus = os.cpu_count()
+
+ # Find unused CPUs
+ unused_cpus = [cpu for cpu in range(num_cpus) if cpu not in rx_cpus]
+
+ if len(unused_cpus) < count:
+ raise KsftSkipEx(f"Need at {count} CPUs not used by Rx queues, found {len(unused_cpus)}")
+
+ return unused_cpus[:count]
+
+
+def _configure_rps(cfg, rps_cpus):
+ """Configure RPS for all Rx queues."""
+
+ mask = 0
+ for cpu in rps_cpus:
+ mask |= (1 << cpu)
+ mask = hex(mask)[2:]
+
+ # Set RPS bitmap for all rx queues
+ for rps_file in glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*/rps_cpus"):
+ with open(rps_file, "w", encoding="utf-8") as fp:
+ fp.write(mask)
+
+ return mask
+
+
+def _send_traffic(cfg, proto_flag, ipver, port):
+ """Send 20 packets of requested type."""
+
+ # Determine protocol and IP version for socat
+ if proto_flag == "-u":
+ proto = "UDP"
+ else:
+ proto = "TCP"
+
+ baddr = f"[{cfg.addr_v['6']}]" if ipver == "6" else cfg.addr_v["4"]
+
+ # Run socat in a loop to send traffic periodically
+ # Use sh -c with a loop similar to toeplitz_client.sh
+ socat_cmd = f"""
+ for i in `seq 20`; do
+ echo "msg $i" | socat -{ipver} -t 0.1 - {proto}:{baddr}:{port};
+ sleep 0.001;
+ done
+ """
+
+ cmd(socat_cmd, shell=True, host=cfg.remote)
+
+
+def _test_variants():
+ for grp in ["", "rss", "rps"]:
+ for l4 in ["tcp", "udp"]:
+ for l3 in ["4", "6"]:
+ name = f"{l4}_ipv{l3}"
+ if grp:
+ name = f"{grp}_{name}"
+ yield KsftNamedVariant(name, "-" + l4[0], l3, grp)
+
+
+@ksft_variants(_test_variants())
+def test(cfg, proto_flag, ipver, grp):
+ """Run a single toeplitz test."""
+
+ cfg.require_ipver(ipver)
+
+ # Check that rxhash is enabled
+ ksft_in("receive-hashing: on", cmd(f"ethtool -k {cfg.ifname}").stdout)
+
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ # Make sure NIC is configured to use Toeplitz hash, and no key xfrm.
+ if rss.get('hfunc') != ETH_RSS_HASH_TOP or rss.get('input-xfrm'):
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "hfunc": ETH_RSS_HASH_TOP,
+ "input-xfrm": {}})
+ defer(cfg.ethnl.rss_set, {"header": {"dev-index": cfg.ifindex},
+ "hfunc": rss.get('hfunc'),
+ "input-xfrm": rss.get('input-xfrm', {})
+ })
+
+ port = rand_port(socket.SOCK_DGRAM)
+
+ toeplitz_path = cfg.test_dir / "toeplitz"
+ rx_cmd = [
+ str(toeplitz_path),
+ "-" + ipver,
+ proto_flag,
+ "-d", str(port),
+ "-i", cfg.ifname,
+ "-T", "4000",
+ "-s",
+ "-v"
+ ]
+
+ if grp:
+ _check_rps_and_rfs_not_configured(cfg)
+ if grp == "rss":
+ irq_cpus = ",".join([str(x) for x in _get_irq_cpus(cfg)])
+ rx_cmd += ["-C", irq_cpus]
+ ksft_pr(f"RSS using CPUs: {irq_cpus}")
+ elif grp == "rps":
+ # Get CPUs not used by Rx queues and configure them for RPS
+ rps_cpus = _get_unused_cpus(cfg, count=2)
+ rps_mask = _configure_rps(cfg, rps_cpus)
+ defer(_configure_rps, cfg, [])
+ rx_cmd += ["-r", rps_mask]
+ ksft_pr(f"RPS using CPUs: {rps_cpus}, mask: {rps_mask}")
+
+ # Run rx in background, it will exit once it has seen enough packets
+ with bkg(" ".join(rx_cmd), ksft_ready=True, exit_wait=True) as rx_proc:
+ while rx_proc.proc.poll() is None:
+ _send_traffic(cfg, proto_flag, ipver, port)
+
+ # Check rx result
+ ksft_pr("Receiver output:")
+ ksft_pr(rx_proc.stdout.strip().replace('\n', '\n# '))
+ if rx_proc.stderr:
+ ksft_pr(rx_proc.stderr.strip().replace('\n', '\n# '))
+
+
+def main() -> None:
+ """Ksft boilerplate main."""
+
+ with NetDrvEpEnv(__file__) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ cfg.netnl = NetdevFamily()
+ ksft_run(cases=[test], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/lib/py/__init__.py b/tools/testing/selftests/drivers/net/lib/py/__init__.py
index b0c6300150fb..d9d035634a31 100644
--- a/tools/testing/selftests/drivers/net/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/lib/py/__init__.py
@@ -25,7 +25,7 @@ try:
fd_read_timeout, ip, rand_port, wait_port_listen, wait_file
from net.lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
from net.lib.py import ksft_disruptive, ksft_exit, ksft_pr, ksft_run, \
- ksft_setup
+ ksft_setup, ksft_variants, KsftNamedVariant
from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
ksft_ne, ksft_not_in, ksft_raises, ksft_true, ksft_gt, ksft_not_none
@@ -38,7 +38,7 @@ try:
"wait_port_listen", "wait_file",
"KsftSkipEx", "KsftFailEx", "KsftXfailEx",
"ksft_disruptive", "ksft_exit", "ksft_pr", "ksft_run",
- "ksft_setup",
+ "ksft_setup", "ksft_variants", "KsftNamedVariant",
"ksft_eq", "ksft_ge", "ksft_in", "ksft_is", "ksft_lt",
"ksft_ne", "ksft_not_in", "ksft_raises", "ksft_true", "ksft_gt",
"ksft_not_none", "ksft_not_none"]
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index 01be3d9b9720..8b644fd84ff2 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -168,6 +168,8 @@ class NetDrvEpEnv(NetDrvEnvBase):
# resolve remote interface name
self.remote_ifname = self.resolve_remote_ifc()
+ self.remote_dev = ip("-d link show dev " + self.remote_ifname,
+ host=self.remote, json=True)[0]
self._required_cmd = {}
diff --git a/tools/testing/selftests/drivers/net/netcons_basic.sh b/tools/testing/selftests/drivers/net/netcons_basic.sh
index a3446b569976..2022f3061738 100755
--- a/tools/testing/selftests/drivers/net/netcons_basic.sh
+++ b/tools/testing/selftests/drivers/net/netcons_basic.sh
@@ -28,8 +28,6 @@ OUTPUT_FILE="/tmp/${TARGET}"
# Check for basic system dependency and exit if not found
check_for_dependencies
-# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5)
-echo "6 5" > /proc/sys/kernel/printk
# Remove the namespace, interfaces and netconsole target on exit
trap cleanup EXIT
@@ -39,6 +37,9 @@ do
for IP_VERSION in "ipv6" "ipv4"
do
echo "Running with target mode: ${FORMAT} (${IP_VERSION})"
+ # Set current loglevel to KERN_INFO(6), and default to
+ # KERN_NOTICE(5)
+ echo "6 5" > /proc/sys/kernel/printk
# Create one namespace and two interfaces
set_network "${IP_VERSION}"
# Create a dynamic target for netconsole
diff --git a/tools/testing/selftests/drivers/net/netcons_overflow.sh b/tools/testing/selftests/drivers/net/netcons_overflow.sh
index 29bad56448a2..06089643b771 100755
--- a/tools/testing/selftests/drivers/net/netcons_overflow.sh
+++ b/tools/testing/selftests/drivers/net/netcons_overflow.sh
@@ -15,7 +15,7 @@ SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh
# This is coming from netconsole code. Check for it in drivers/net/netconsole.c
-MAX_USERDATA_ITEMS=16
+MAX_USERDATA_ITEMS=256
# Function to create userdata entries
function create_userdata_max_entries() {
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index 030762b203d7..1b529ccaf050 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -3,7 +3,8 @@
lib_dir=$(dirname $0)/../../../net/forwarding
-ALL_TESTS="fw_flash_test params_test regions_test reload_test \
+ALL_TESTS="fw_flash_test params_test \
+ params_default_test regions_test reload_test \
netns_reload_test resource_test dev_info_test \
empty_reporter_test dummy_reporter_test rate_test"
NUM_NETIFS=0
@@ -78,17 +79,28 @@ fw_flash_test()
param_get()
{
local name=$1
+ local attr=${2:-value}
+ local cmode=${3:-driverinit}
cmd_jq "devlink dev param show $DL_HANDLE name $name -j" \
- '.[][][].values[] | select(.cmode == "driverinit").value'
+ '.[][][].values[] | select(.cmode == "'"$cmode"'").'"$attr"
}
param_set()
{
local name=$1
local value=$2
+ local cmode=${3:-driverinit}
- devlink dev param set $DL_HANDLE name $name cmode driverinit value $value
+ devlink dev param set $DL_HANDLE name $name cmode $cmode value $value
+}
+
+param_set_default()
+{
+ local name=$1
+ local cmode=${2:-driverinit}
+
+ devlink dev param set $DL_HANDLE name $name default cmode $cmode
}
check_value()
@@ -97,12 +109,18 @@ check_value()
local phase_name=$2
local expected_param_value=$3
local expected_debugfs_value=$4
+ local cmode=${5:-driverinit}
local value
+ local attr="value"
- value=$(param_get $name)
- check_err $? "Failed to get $name param value"
+ if [[ "$phase_name" == *"default"* ]]; then
+ attr="default"
+ fi
+
+ value=$(param_get $name $attr $cmode)
+ check_err $? "Failed to get $name param $attr"
[ "$value" == "$expected_param_value" ]
- check_err $? "Unexpected $phase_name $name param value"
+ check_err $? "Unexpected $phase_name $name param $attr"
value=$(<$DEBUGFS_DIR/$name)
check_err $? "Failed to get $name debugfs value"
[ "$value" == "$expected_debugfs_value" ]
@@ -135,6 +153,92 @@ params_test()
log_test "params test"
}
+value_to_debugfs()
+{
+ local value=$1
+
+ case "$value" in
+ true)
+ echo "Y"
+ ;;
+ false)
+ echo "N"
+ ;;
+ *)
+ echo "$value"
+ ;;
+ esac
+}
+
+test_default()
+{
+ local param_name=$1
+ local new_value=$2
+ local expected_default=$3
+ local cmode=${4:-driverinit}
+ local default_debugfs
+ local new_debugfs
+ local expected_debugfs
+
+ default_debugfs=$(value_to_debugfs $expected_default)
+ new_debugfs=$(value_to_debugfs $new_value)
+
+ expected_debugfs=$default_debugfs
+ check_value $param_name initial-default $expected_default $expected_debugfs $cmode
+
+ param_set $param_name $new_value $cmode
+ check_err $? "Failed to set $param_name to $new_value"
+
+ expected_debugfs=$([ "$cmode" == "runtime" ] && echo "$new_debugfs" || echo "$default_debugfs")
+ check_value $param_name post-set $new_value $expected_debugfs $cmode
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload device"
+
+ expected_debugfs=$new_debugfs
+ check_value $param_name post-reload-new-value $new_value $expected_debugfs $cmode
+
+ param_set_default $param_name $cmode
+ check_err $? "Failed to set $param_name to default"
+
+ expected_debugfs=$([ "$cmode" == "runtime" ] && echo "$default_debugfs" || echo "$new_debugfs")
+ check_value $param_name post-set-default $expected_default $expected_debugfs $cmode
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload device"
+
+ expected_debugfs=$default_debugfs
+ check_value $param_name post-reload-default $expected_default $expected_debugfs $cmode
+}
+
+params_default_test()
+{
+ RET=0
+
+ if ! devlink dev param help 2>&1 | grep -q "value VALUE | default"; then
+ echo "SKIP: devlink cli missing default feature"
+ return
+ fi
+
+ # Remove side effects of previous tests. Use plain param_set, because
+ # param_set_default is a feature under test here.
+ param_set max_macs 32 driverinit
+ check_err $? "Failed to reset max_macs to default value"
+ param_set test1 true driverinit
+ check_err $? "Failed to reset test1 to default value"
+ param_set test2 1234 runtime
+ check_err $? "Failed to reset test2 to default value"
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload device for clean state"
+
+ test_default max_macs 16 32 driverinit
+ test_default test1 false true driverinit
+ test_default test2 100 1234 runtime
+
+ log_test "params default test"
+}
+
check_region_size()
{
local name=$1
diff --git a/tools/testing/selftests/drivers/net/xdp.py b/tools/testing/selftests/drivers/net/xdp.py
index a148004e1c36..e54df158dfe9 100755
--- a/tools/testing/selftests/drivers/net/xdp.py
+++ b/tools/testing/selftests/drivers/net/xdp.py
@@ -12,6 +12,7 @@ from dataclasses import dataclass
from enum import Enum
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_ge, ksft_ne, ksft_pr
+from lib.py import KsftNamedVariant, ksft_variants
from lib.py import KsftFailEx, NetDrvEpEnv
from lib.py import EthtoolFamily, NetdevFamily, NlError
from lib.py import bkg, cmd, rand_port, wait_port_listen
@@ -672,7 +673,18 @@ def test_xdp_native_adjst_head_shrnk_data(cfg):
_validate_res(res, offset_lst, pkt_sz_lst)
-def _test_xdp_native_ifc_stats(cfg, act):
+@ksft_variants([
+ KsftNamedVariant("pass", XDPAction.PASS),
+ KsftNamedVariant("drop", XDPAction.DROP),
+ KsftNamedVariant("tx", XDPAction.TX),
+])
+def test_xdp_native_qstats(cfg, act):
+ """
+ Send 1000 messages. Expect XDP action specified in @act.
+ Make sure the packets were counted to interface level qstats
+ (Rx, and Tx if act is TX).
+ """
+
cfg.require_cmd("socat")
bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
@@ -687,9 +699,12 @@ def _test_xdp_native_ifc_stats(cfg, act):
"/dev/null"
# Listener runs on "remote" in case of XDP_TX
rx_host = cfg.remote if act == XDPAction.TX else None
- # We want to spew 2000 packets quickly, bash seems to do a good enough job
- tx_udp = f"exec 5<>/dev/udp/{cfg.addr}/{port}; " \
- "for i in `seq 2000`; do echo a >&5; done; exec 5>&-"
+ # We want to spew 1000 packets quickly, bash seems to do a good enough job
+ # Each reopening of the socket gives us a differenot local port (for RSS)
+ tx_udp = "for _ in `seq 20`; do " \
+ f"exec 5<>/dev/udp/{cfg.addr}/{port}; " \
+ "for i in `seq 50`; do echo a >&5; done; " \
+ "exec 5>&-; done"
cfg.wait_hw_stats_settle()
# Qstats have more clearly defined semantics than rtnetlink.
@@ -704,11 +719,11 @@ def _test_xdp_native_ifc_stats(cfg, act):
cfg.wait_hw_stats_settle()
after = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
- ksft_ge(after['rx-packets'] - before['rx-packets'], 2000)
+ expected_pkts = 1000
+ ksft_ge(after['rx-packets'] - before['rx-packets'], expected_pkts)
if act == XDPAction.TX:
- ksft_ge(after['tx-packets'] - before['tx-packets'], 2000)
+ ksft_ge(after['tx-packets'] - before['tx-packets'], expected_pkts)
- expected_pkts = 2000
stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
ksft_eq(stats[XDPStats.RX.value], expected_pkts, "XDP RX stats mismatch")
if act == XDPAction.TX:
@@ -730,30 +745,6 @@ def _test_xdp_native_ifc_stats(cfg, act):
ksft_ge(after['tx-packets'], before['tx-packets'])
-def test_xdp_native_qstats_pass(cfg):
- """
- Send 2000 messages, expect XDP_PASS, make sure the packets were counted
- to interface level qstats (Rx).
- """
- _test_xdp_native_ifc_stats(cfg, XDPAction.PASS)
-
-
-def test_xdp_native_qstats_drop(cfg):
- """
- Send 2000 messages, expect XDP_DROP, make sure the packets were counted
- to interface level qstats (Rx).
- """
- _test_xdp_native_ifc_stats(cfg, XDPAction.DROP)
-
-
-def test_xdp_native_qstats_tx(cfg):
- """
- Send 2000 messages, expect XDP_TX, make sure the packets were counted
- to interface level qstats (Rx and Tx)
- """
- _test_xdp_native_ifc_stats(cfg, XDPAction.TX)
-
-
def main():
"""
Main function to execute the XDP tests.
@@ -778,9 +769,7 @@ def main():
test_xdp_native_adjst_tail_shrnk_data,
test_xdp_native_adjst_head_grow_data,
test_xdp_native_adjst_head_shrnk_data,
- test_xdp_native_qstats_pass,
- test_xdp_native_qstats_drop,
- test_xdp_native_qstats_tx,
+ test_xdp_native_qstats,
],
args=(cfg,))
ksft_exit()
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index c62165fabd0c..cfa16aa1f39a 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -20,6 +20,10 @@ sample_events() {
echo 0 > tracing_on
echo 0 > events/enable
+# Clear functions caused by page cache; run sample_events twice
+sample_events
+sample_events
+
echo "Get the most frequently calling function"
echo > trace
sample_events
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 9e3be2ee7f1b..f917b4c4c943 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -1758,10 +1758,15 @@ int main(int argc, char *argv[])
uffd_test_ops = mem_type->mem_ops;
uffd_test_case_ops = test->test_case_ops;
- if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
+ if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {
gopts.page_size = default_huge_page_size();
- else
+ if (gopts.page_size == 0) {
+ uffd_test_skip("huge page size is 0, feature missing?");
+ continue;
+ }
+ } else {
gopts.page_size = psize();
+ }
/* Ensure we have at least 2 pages */
gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
@@ -1776,12 +1781,6 @@ int main(int argc, char *argv[])
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
- if ((mem_type->mem_flag == MEM_HUGETLB ||
- mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
- (default_huge_page_size() == 0)) {
- uffd_test_skip("huge page size is 0, feature missing?");
- continue;
- }
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 439101b518ee..6930fe926c58 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -4,10 +4,8 @@ bind_timewait
bind_wildcard
busy_poller
cmsg_sender
-diag_uid
epoll_busy_poll
fin_ack_lat
-gro
hwtstamp_config
io_uring_zerocopy_tx
ioam6_parser
@@ -18,7 +16,6 @@ ipv6_flowlabel
ipv6_flowlabel_mgr
ipv6_fragmentation
log.txt
-msg_oob
msg_zerocopy
netlink-dumps
nettest
@@ -35,9 +32,6 @@ reuseport_bpf_numa
reuseport_dualstack
rxtimestamp
sctp_hello
-scm_inq
-scm_pidfd
-scm_rights
sk_bind_sendto_listen
sk_connect_zero_addr
sk_so_peek_off
@@ -56,7 +50,6 @@ tcp_port_share
tfo
timestamping
tls
-toeplitz
tools
tun
txring_overwrite
@@ -64,4 +57,3 @@ txtimestamp
udpgso
udpgso_bench_rx
udpgso_bench_tx
-unix_connect
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index b5127e968108..b66ba04f19d9 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -38,7 +38,6 @@ TEST_PROGS := \
fq_band_pktlimit.sh \
gre_gso.sh \
gre_ipv6_lladdr.sh \
- gro.sh \
icmp.sh \
icmp_redirect.sh \
io_uring_zerocopy_tx.sh \
@@ -121,8 +120,6 @@ TEST_PROGS := \
# end of TEST_PROGS
TEST_PROGS_EXTENDED := \
- toeplitz.sh \
- toeplitz_client.sh \
xfrm_policy_add_speed.sh \
# end of TEST_PROGS_EXTENDED
@@ -130,7 +127,6 @@ TEST_GEN_FILES := \
bind_bhash \
cmsg_sender \
fin_ack_lat \
- gro \
hwtstamp_config \
io_uring_zerocopy_tx \
ioam6_parser \
@@ -159,7 +155,6 @@ TEST_GEN_FILES := \
tcp_mmap \
tfo \
timestamping \
- toeplitz \
txring_overwrite \
txtimestamp \
udpgso \
@@ -193,8 +188,6 @@ TEST_FILES := \
in_netns.sh \
lib.sh \
settings \
- setup_loopback.sh \
- setup_veth.sh \
# end of TEST_FILES
# YNL files, must be before "include ..lib.mk"
diff --git a/tools/testing/selftests/net/af_unix/.gitignore b/tools/testing/selftests/net/af_unix/.gitignore
new file mode 100644
index 000000000000..240b26740c9e
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/.gitignore
@@ -0,0 +1,8 @@
+diag_uid
+msg_oob
+scm_inq
+scm_pidfd
+scm_rights
+so_peek_off
+unix_connect
+unix_connreset
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index de805cbbdf69..3cd677b72072 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -6,7 +6,9 @@ TEST_GEN_PROGS := \
scm_inq \
scm_pidfd \
scm_rights \
+ so_peek_off \
unix_connect \
+ unix_connreset \
# end of TEST_GEN_PROGS
include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/so_peek_off.c b/tools/testing/selftests/net/af_unix/so_peek_off.c
new file mode 100644
index 000000000000..86e7b0fb522d
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/so_peek_off.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+
+#include "../../kselftest_harness.h"
+
+FIXTURE(so_peek_off)
+{
+ int fd[2]; /* 0: sender, 1: receiver */
+};
+
+FIXTURE_VARIANT(so_peek_off)
+{
+ int type;
+};
+
+FIXTURE_VARIANT_ADD(so_peek_off, stream)
+{
+ .type = SOCK_STREAM,
+};
+
+FIXTURE_VARIANT_ADD(so_peek_off, dgram)
+{
+ .type = SOCK_DGRAM,
+};
+
+FIXTURE_VARIANT_ADD(so_peek_off, seqpacket)
+{
+ .type = SOCK_SEQPACKET,
+};
+
+FIXTURE_SETUP(so_peek_off)
+{
+ struct timeval timeout = {
+ .tv_sec = 5,
+ .tv_usec = 0,
+ };
+ int ret;
+
+ ret = socketpair(AF_UNIX, variant->type, 0, self->fd);
+ ASSERT_EQ(0, ret);
+
+ ret = setsockopt(self->fd[1], SOL_SOCKET, SO_RCVTIMEO_NEW,
+ &timeout, sizeof(timeout));
+ ASSERT_EQ(0, ret);
+
+ ret = setsockopt(self->fd[1], SOL_SOCKET, SO_PEEK_OFF,
+ &(int){0}, sizeof(int));
+ ASSERT_EQ(0, ret);
+}
+
+FIXTURE_TEARDOWN(so_peek_off)
+{
+ close_range(self->fd[0], self->fd[1], 0);
+}
+
+#define sendeq(fd, str, flags) \
+ do { \
+ int bytes, len = strlen(str); \
+ \
+ bytes = send(fd, str, len, flags); \
+ ASSERT_EQ(len, bytes); \
+ } while (0)
+
+#define recveq(fd, str, buflen, flags) \
+ do { \
+ char buf[(buflen) + 1] = {}; \
+ int bytes; \
+ \
+ bytes = recv(fd, buf, buflen, flags); \
+ ASSERT_NE(-1, bytes); \
+ ASSERT_STREQ(str, buf); \
+ } while (0)
+
+#define async \
+ for (pid_t pid = (pid = fork(), \
+ pid < 0 ? \
+ __TH_LOG("Failed to start async {}"), \
+ _metadata->exit_code = KSFT_FAIL, \
+ __bail(1, _metadata), \
+ 0xdead : \
+ pid); \
+ !pid; exit(0))
+
+TEST_F(so_peek_off, single_chunk)
+{
+ sendeq(self->fd[0], "aaaabbbb", 0);
+
+ recveq(self->fd[1], "aaaa", 4, MSG_PEEK);
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_F(so_peek_off, two_chunks)
+{
+ sendeq(self->fd[0], "aaaa", 0);
+ sendeq(self->fd[0], "bbbb", 0);
+
+ recveq(self->fd[1], "aaaa", 4, MSG_PEEK);
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_F(so_peek_off, two_chunks_blocking)
+{
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "aaaa", 0);
+ }
+
+ recveq(self->fd[1], "aaaa", 4, MSG_PEEK);
+
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "bbbb", 0);
+ }
+
+ /* goto again; -> goto redo; in unix_stream_read_generic(). */
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_F(so_peek_off, two_chunks_overlap)
+{
+ sendeq(self->fd[0], "aaaa", 0);
+ recveq(self->fd[1], "aa", 2, MSG_PEEK);
+
+ sendeq(self->fd[0], "bbbb", 0);
+
+ if (variant->type == SOCK_STREAM) {
+ /* SOCK_STREAM tries to fill the buffer. */
+ recveq(self->fd[1], "aabb", 4, MSG_PEEK);
+ recveq(self->fd[1], "bb", 100, MSG_PEEK);
+ } else {
+ /* SOCK_DGRAM and SOCK_SEQPACKET returns at the skb boundary. */
+ recveq(self->fd[1], "aa", 100, MSG_PEEK);
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+ }
+}
+
+TEST_F(so_peek_off, two_chunks_overlap_blocking)
+{
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "aaaa", 0);
+ }
+
+ recveq(self->fd[1], "aa", 2, MSG_PEEK);
+
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "bbbb", 0);
+ }
+
+ /* Even SOCK_STREAM does not wait if at least one byte is read. */
+ recveq(self->fd[1], "aa", 100, MSG_PEEK);
+
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/unix_connreset.c b/tools/testing/selftests/net/af_unix/unix_connreset.c
new file mode 100644
index 000000000000..08c1de8f5a98
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/unix_connreset.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Selftest for AF_UNIX socket close and ECONNRESET behaviour.
+ *
+ * This test verifies:
+ * 1. SOCK_STREAM returns EOF when the peer closes normally.
+ * 2. SOCK_STREAM returns ECONNRESET if peer closes with unread data.
+ * 3. SOCK_SEQPACKET returns EOF when the peer closes normally.
+ * 4. SOCK_SEQPACKET returns ECONNRESET if the peer closes with unread data.
+ * 5. SOCK_DGRAM does not return ECONNRESET when the peer closes.
+ *
+ * These tests document the intended Linux behaviour.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include "../../kselftest_harness.h"
+
+#define SOCK_PATH "/tmp/af_unix_connreset.sock"
+
+static void remove_socket_file(void)
+{
+ unlink(SOCK_PATH);
+}
+
+FIXTURE(unix_sock)
+{
+ int server;
+ int client;
+ int child;
+};
+
+FIXTURE_VARIANT(unix_sock)
+{
+ int socket_type;
+ const char *name;
+};
+
+FIXTURE_VARIANT_ADD(unix_sock, stream) {
+ .socket_type = SOCK_STREAM,
+ .name = "SOCK_STREAM",
+};
+
+FIXTURE_VARIANT_ADD(unix_sock, dgram) {
+ .socket_type = SOCK_DGRAM,
+ .name = "SOCK_DGRAM",
+};
+
+FIXTURE_VARIANT_ADD(unix_sock, seqpacket) {
+ .socket_type = SOCK_SEQPACKET,
+ .name = "SOCK_SEQPACKET",
+};
+
+FIXTURE_SETUP(unix_sock)
+{
+ struct sockaddr_un addr = {};
+ int err;
+
+ addr.sun_family = AF_UNIX;
+ strcpy(addr.sun_path, SOCK_PATH);
+ remove_socket_file();
+
+ self->server = socket(AF_UNIX, variant->socket_type, 0);
+ ASSERT_LT(-1, self->server);
+
+ err = bind(self->server, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(0, err);
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ err = listen(self->server, 1);
+ ASSERT_EQ(0, err);
+ }
+
+ self->client = socket(AF_UNIX, variant->socket_type | SOCK_NONBLOCK, 0);
+ ASSERT_LT(-1, self->client);
+
+ err = connect(self->client, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(0, err);
+}
+
+FIXTURE_TEARDOWN(unix_sock)
+{
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET)
+ close(self->child);
+
+ close(self->client);
+ close(self->server);
+ remove_socket_file();
+}
+
+/* Test 1: peer closes normally */
+TEST_F(unix_sock, eof)
+{
+ char buf[16] = {};
+ ssize_t n;
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ self->child = accept(self->server, NULL, NULL);
+ ASSERT_LT(-1, self->child);
+
+ close(self->child);
+ } else {
+ close(self->server);
+ }
+
+ n = recv(self->client, buf, sizeof(buf), 0);
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ ASSERT_EQ(0, n);
+ } else {
+ ASSERT_EQ(-1, n);
+ ASSERT_EQ(EAGAIN, errno);
+ }
+}
+
+/* Test 2: peer closes with unread data */
+TEST_F(unix_sock, reset_unread_behavior)
+{
+ char buf[16] = {};
+ ssize_t n;
+
+ /* Send data that will remain unread */
+ send(self->client, "hello", 5, 0);
+
+ if (variant->socket_type == SOCK_DGRAM) {
+ /* No real connection, just close the server */
+ close(self->server);
+ } else {
+ self->child = accept(self->server, NULL, NULL);
+ ASSERT_LT(-1, self->child);
+
+ /* Peer closes before client reads */
+ close(self->child);
+ }
+
+ n = recv(self->client, buf, sizeof(buf), 0);
+ ASSERT_EQ(-1, n);
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ ASSERT_EQ(ECONNRESET, errno);
+ } else {
+ ASSERT_EQ(EAGAIN, errno);
+ }
+}
+
+/* Test 3: closing unaccepted (embryo) server socket should reset client. */
+TEST_F(unix_sock, reset_closed_embryo)
+{
+ char buf[16] = {};
+ ssize_t n;
+
+ if (variant->socket_type == SOCK_DGRAM) {
+ snprintf(_metadata->results->reason,
+ sizeof(_metadata->results->reason),
+ "Test only applies to SOCK_STREAM and SOCK_SEQPACKET");
+ exit(KSFT_XFAIL);
+ }
+
+ /* Close server without accept()ing */
+ close(self->server);
+
+ n = recv(self->client, buf, sizeof(buf), 0);
+
+ ASSERT_EQ(-1, n);
+ ASSERT_EQ(ECONNRESET, errno);
+}
+
+TEST_HARNESS_MAIN
+
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index a94b73a53f72..a88f797c549a 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -11,7 +11,8 @@ TESTS="unregister down carrier nexthop suppress ipv6_notify ipv4_notify \
ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics \
ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr \
ipv6_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh fib6_gc_test \
- ipv4_mpath_list ipv6_mpath_list ipv4_mpath_balance ipv6_mpath_balance"
+ ipv4_mpath_list ipv6_mpath_list ipv4_mpath_balance ipv6_mpath_balance \
+ fib6_ra_to_static"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -1476,6 +1477,68 @@ ipv6_route_metrics_test()
route_cleanup
}
+fib6_ra_to_static()
+{
+ setup
+
+ echo
+ echo "Fib6 route promotion from RA-learned to static test"
+ set -e
+
+ # ra6 is required for the test. (ipv6toolkit)
+ if [ ! -x "$(command -v ra6)" ]; then
+ echo "SKIP: ra6 not found."
+ set +e
+ cleanup &> /dev/null
+ return
+ fi
+
+ # Create a pair of veth devices to send a RA message from one
+ # device to another.
+ $IP link add veth1 type veth peer name veth2
+ $IP link set dev veth1 up
+ $IP link set dev veth2 up
+ $IP -6 address add 2001:10::1/64 dev veth1 nodad
+ $IP -6 address add 2001:10::2/64 dev veth2 nodad
+
+ # Make veth1 ready to receive RA messages.
+ $NS_EXEC sysctl -wq net.ipv6.conf.veth1.accept_ra=2
+
+ # Send a RA message with a prefix from veth2.
+ $NS_EXEC ra6 -i veth2 -d 2001:10::1 -P 2001:12::/64\#LA\#120\#60
+
+ # Wait for the RA message.
+ sleep 1
+
+ # systemd may mess up the test. Make sure that
+ # systemd-networkd.service and systemd-networkd.socket are stopped.
+ check_rt_num_clean 2 $($IP -6 route list|grep expires|wc -l) || return
+
+ # Configure static address on the same prefix
+ $IP -6 address add 2001:12::dead/64 dev veth1 nodad
+
+ # On-link route won't expire anymore, default route still owned by RA
+ check_rt_num 1 $($IP -6 route list |grep expires|wc -l)
+
+ # Send a second RA message with a prefix from veth2.
+ $NS_EXEC ra6 -i veth2 -d 2001:10::1 -P 2001:12::/64\#LA\#120\#60
+ sleep 1
+
+ # Expire is not back, on-link route is still static
+ check_rt_num 1 $($IP -6 route list |grep expires|wc -l)
+
+ $IP -6 address del 2001:12::dead/64 dev veth1 nodad
+
+ # Expire is back, on-link route is now owned by RA again
+ check_rt_num 2 $($IP -6 route list |grep expires|wc -l)
+
+ log_test $ret 0 "ipv6 promote RA route to static"
+
+ set +e
+
+ cleanup &> /dev/null
+}
+
# add route for a prefix, flushing any existing routes first
# expected to be the first step of a test
add_route()
@@ -2798,6 +2861,7 @@ do
ipv6_mpath_list) ipv6_mpath_list_test;;
ipv4_mpath_balance) ipv4_mpath_balance_test;;
ipv6_mpath_balance) ipv6_mpath_balance_test;;
+ fib6_ra_to_static) fib6_ra_to_static;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/net/forwarding/lib_sh_test.sh b/tools/testing/selftests/net/forwarding/lib_sh_test.sh
index ff2accccaf4d..b4eda6c6199e 100755
--- a/tools/testing/selftests/net/forwarding/lib_sh_test.sh
+++ b/tools/testing/selftests/net/forwarding/lib_sh_test.sh
@@ -30,6 +30,11 @@ tfail()
do_test "tfail" false
}
+tfail2()
+{
+ do_test "tfail2" false
+}
+
txfail()
{
FAIL_TO_XFAIL=yes do_test "txfail" false
@@ -132,6 +137,8 @@ test_ret()
ret_subtest $ksft_fail "tfail" txfail tfail
ret_subtest $ksft_xfail "txfail" txfail txfail
+
+ ret_subtest $ksft_fail "tfail2" tfail2 tfail
}
exit_status_tests_run()
diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh
deleted file mode 100755
index 4c5144c6f652..000000000000
--- a/tools/testing/selftests/net/gro.sh
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-readonly SERVER_MAC="aa:00:00:00:00:02"
-readonly CLIENT_MAC="aa:00:00:00:00:01"
-readonly TESTS=("data" "ack" "flags" "tcp" "ip" "large")
-readonly PROTOS=("ipv4" "ipv6" "ipip")
-dev=""
-test="all"
-proto="ipv4"
-
-run_test() {
- local server_pid=0
- local exit_code=0
- local protocol=$1
- local test=$2
- local ARGS=( "--${protocol}" "--dmac" "${SERVER_MAC}" \
- "--smac" "${CLIENT_MAC}" "--test" "${test}" "--verbose" )
-
- setup_ns
- # Each test is run 6 times to deflake, because given the receive timing,
- # not all packets that should coalesce will be considered in the same flow
- # on every try.
- for tries in {1..6}; do
- # Actual test starts here
- ip netns exec $server_ns ./gro "${ARGS[@]}" "--rx" "--iface" "server" \
- 1>>log.txt &
- server_pid=$!
- sleep 0.5 # to allow for socket init
- ip netns exec $client_ns ./gro "${ARGS[@]}" "--iface" "client" \
- 1>>log.txt
- wait "${server_pid}"
- exit_code=$?
- if [[ ${test} == "large" && -n "${KSFT_MACHINE_SLOW}" && \
- ${exit_code} -ne 0 ]]; then
- echo "Ignoring errors due to slow environment" 1>&2
- exit_code=0
- fi
- if [[ "${exit_code}" -eq 0 ]]; then
- break;
- fi
- done
- cleanup_ns
- echo ${exit_code}
-}
-
-run_all_tests() {
- local failed_tests=()
- for proto in "${PROTOS[@]}"; do
- for test in "${TESTS[@]}"; do
- echo "running test ${proto} ${test}" >&2
- exit_code=$(run_test $proto $test)
- if [[ "${exit_code}" -ne 0 ]]; then
- failed_tests+=("${proto}_${test}")
- fi;
- done;
- done
- if [[ ${#failed_tests[@]} -ne 0 ]]; then
- echo "failed tests: ${failed_tests[*]}. \
- Please see log.txt for more logs"
- exit 1
- else
- echo "All Tests Succeeded!"
- fi;
-}
-
-usage() {
- echo "Usage: $0 \
- [-i <DEV>] \
- [-t data|ack|flags|tcp|ip|large] \
- [-p <ipv4|ipv6>]" 1>&2;
- exit 1;
-}
-
-while getopts "i:t:p:" opt; do
- case "${opt}" in
- i)
- dev="${OPTARG}"
- ;;
- t)
- test="${OPTARG}"
- ;;
- p)
- proto="${OPTARG}"
- ;;
- *)
- usage
- ;;
- esac
-done
-
-if [ -n "$dev" ]; then
- source setup_loopback.sh
-else
- source setup_veth.sh
-fi
-
-setup
-trap cleanup EXIT
-if [[ "${test}" == "all" ]]; then
- run_all_tests
-else
- exit_code=$(run_test "${proto}" "${test}")
- exit $exit_code
-fi;
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index feba4ef69a54..f448bafb3f20 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -43,7 +43,7 @@ __ksft_status_merge()
weights[$i]=$((weight++))
done
- if [[ ${weights[$a]} > ${weights[$b]} ]]; then
+ if [[ ${weights[$a]} -ge ${weights[$b]} ]]; then
echo "$a"
return 0
else
diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile
index ce795bc0a1af..5339f56329e1 100644
--- a/tools/testing/selftests/net/lib/Makefile
+++ b/tools/testing/selftests/net/lib/Makefile
@@ -8,6 +8,7 @@ CFLAGS += -I../../
TEST_FILES := \
../../../../net/ynl \
../../../../../Documentation/netlink/specs \
+ ksft_setup_loopback.sh \
# end of TEST_FILES
TEST_GEN_FILES := \
diff --git a/tools/testing/selftests/net/lib/ksft_setup_loopback.sh b/tools/testing/selftests/net/lib/ksft_setup_loopback.sh
new file mode 100755
index 000000000000..3defbb1919c5
--- /dev/null
+++ b/tools/testing/selftests/net/lib/ksft_setup_loopback.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Setup script for running ksft tests over a real interface in loopback mode.
+# This scripts replaces the historical setup_loopback.sh. It puts
+# a (presumably) real hardware interface into loopback mode, creates macvlan
+# interfaces on top and places them in a network namespace for isolation.
+#
+# NETIF env variable must be exported to indicate the real target device.
+# Note that the test will override NETIF with one of the macvlans, the
+# actual ksft test will only see the macvlans.
+#
+# Example use:
+# export NETIF=eth0
+# ./net/lib/ksft_setup_loopback.sh ./drivers/net/gro.py
+
+if [ -z "$NETIF" ]; then
+ echo "Error: NETIF variable not set"
+ exit 1
+fi
+if ! [ -d "/sys/class/net/$NETIF" ]; then
+ echo "Error: Can't find $NETIF, invalid netdevice"
+ exit 1
+fi
+
+# Save original settings for cleanup
+readonly FLUSH_PATH="/sys/class/net/${NETIF}/gro_flush_timeout"
+readonly IRQ_PATH="/sys/class/net/${NETIF}/napi_defer_hard_irqs"
+FLUSH_TIMEOUT="$(< "${FLUSH_PATH}")"
+readonly FLUSH_TIMEOUT
+HARD_IRQS="$(< "${IRQ_PATH}")"
+readonly HARD_IRQS
+
+SERVER_NS=$(mktemp -u server-XXXXXXXX)
+readonly SERVER_NS
+CLIENT_NS=$(mktemp -u client-XXXXXXXX)
+readonly CLIENT_NS
+readonly SERVER_MAC="aa:00:00:00:00:02"
+readonly CLIENT_MAC="aa:00:00:00:00:01"
+
+# ksft expects addresses to communicate with remote
+export LOCAL_V6=2001:db8:1::1
+export REMOTE_V6=2001:db8:1::2
+
+cleanup() {
+ local exit_code=$?
+
+ echo "Cleaning up..."
+
+ # Remove macvlan interfaces and namespaces
+ ip -netns "${SERVER_NS}" link del dev server 2>/dev/null || true
+ ip netns del "${SERVER_NS}" 2>/dev/null || true
+ ip -netns "${CLIENT_NS}" link del dev client 2>/dev/null || true
+ ip netns del "${CLIENT_NS}" 2>/dev/null || true
+
+ # Disable loopback
+ ethtool -K "${NETIF}" loopback off 2>/dev/null || true
+ sleep 1
+
+ echo "${FLUSH_TIMEOUT}" >"${FLUSH_PATH}"
+ echo "${HARD_IRQS}" >"${IRQ_PATH}"
+
+ exit $exit_code
+}
+
+trap cleanup EXIT INT TERM
+
+# Enable loopback mode
+echo "Enabling loopback on ${NETIF}..."
+ethtool -K "${NETIF}" loopback on || {
+ echo "Failed to enable loopback mode"
+ exit 1
+}
+# The interface may need time to get carrier back, but selftests
+# will wait for carrier, so no need to wait / sleep here.
+
+# Use timer on host to trigger the network stack
+# Also disable device interrupt to not depend on NIC interrupt
+# Reduce test flakiness caused by unexpected interrupts
+echo 100000 >"${FLUSH_PATH}"
+echo 50 >"${IRQ_PATH}"
+
+# Create server namespace with macvlan
+ip netns add "${SERVER_NS}"
+ip link add link "${NETIF}" dev server address "${SERVER_MAC}" type macvlan
+ip link set dev server netns "${SERVER_NS}"
+ip -netns "${SERVER_NS}" link set dev server up
+ip -netns "${SERVER_NS}" addr add $LOCAL_V6/64 dev server
+ip -netns "${SERVER_NS}" link set dev lo up
+
+# Create client namespace with macvlan
+ip netns add "${CLIENT_NS}"
+ip link add link "${NETIF}" dev client address "${CLIENT_MAC}" type macvlan
+ip link set dev client netns "${CLIENT_NS}"
+ip -netns "${CLIENT_NS}" link set dev client up
+ip -netns "${CLIENT_NS}" addr add $REMOTE_V6/64 dev client
+ip -netns "${CLIENT_NS}" link set dev lo up
+
+echo "Setup complete!"
+echo " Device: ${NETIF}"
+echo " Server NS: ${SERVER_NS}"
+echo " Client NS: ${CLIENT_NS}"
+echo ""
+
+# Setup environment variables for tests
+export NETIF=server
+export REMOTE_TYPE=netns
+export REMOTE_ARGS="${CLIENT_NS}"
+
+# Run the command
+ip netns exec "${SERVER_NS}" "$@"
diff --git a/tools/testing/selftests/net/lib/py/__init__.py b/tools/testing/selftests/net/lib/py/__init__.py
index 97b7cf2b20eb..40f9ce307dd1 100644
--- a/tools/testing/selftests/net/lib/py/__init__.py
+++ b/tools/testing/selftests/net/lib/py/__init__.py
@@ -8,7 +8,8 @@ from .consts import KSRC
from .ksft import KsftFailEx, KsftSkipEx, KsftXfailEx, ksft_pr, ksft_eq, \
ksft_ne, ksft_true, ksft_not_none, ksft_in, ksft_not_in, ksft_is, \
ksft_ge, ksft_gt, ksft_lt, ksft_raises, ksft_busy_wait, \
- ktap_result, ksft_disruptive, ksft_setup, ksft_run, ksft_exit
+ ktap_result, ksft_disruptive, ksft_setup, ksft_run, ksft_exit, \
+ ksft_variants, KsftNamedVariant
from .netns import NetNS, NetNSEnter
from .nsim import NetdevSim, NetdevSimDev
from .utils import CmdExitFailure, fd_read_timeout, cmd, bkg, defer, \
@@ -21,7 +22,7 @@ __all__ = ["KSRC",
"ksft_ne", "ksft_true", "ksft_not_none", "ksft_in", "ksft_not_in",
"ksft_is", "ksft_ge", "ksft_gt", "ksft_lt", "ksft_raises",
"ksft_busy_wait", "ktap_result", "ksft_disruptive", "ksft_setup",
- "ksft_run", "ksft_exit",
+ "ksft_run", "ksft_exit", "ksft_variants", "KsftNamedVariant",
"NetNS", "NetNSEnter",
"CmdExitFailure", "fd_read_timeout", "cmd", "bkg", "defer",
"bpftool", "ip", "ethtool", "bpftrace", "rand_port",
diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py
index 83b1574f7719..ebd82940ee50 100644
--- a/tools/testing/selftests/net/lib/py/ksft.py
+++ b/tools/testing/selftests/net/lib/py/ksft.py
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-import builtins
import functools
import inspect
import signal
import sys
import time
import traceback
+from collections import namedtuple
from .consts import KSFT_MAIN_NAME
from .utils import global_defer_queue
@@ -136,7 +136,7 @@ def ksft_busy_wait(cond, sleep=0.005, deadline=1, comment=""):
time.sleep(sleep)
-def ktap_result(ok, cnt=1, case="", comment=""):
+def ktap_result(ok, cnt=1, case_name="", comment=""):
global KSFT_RESULT_ALL
KSFT_RESULT_ALL = KSFT_RESULT_ALL and ok
@@ -146,8 +146,8 @@ def ktap_result(ok, cnt=1, case="", comment=""):
res += "ok "
res += str(cnt) + " "
res += KSFT_MAIN_NAME
- if case:
- res += "." + str(case.__name__)
+ if case_name:
+ res += "." + case_name
if comment:
res += " # " + comment
print(res, flush=True)
@@ -163,7 +163,7 @@ def ksft_flush_defer():
entry = global_defer_queue.pop()
try:
entry.exec_only()
- except:
+ except BaseException:
ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!")
tb = traceback.format_exc()
for line in tb.strip().split('\n'):
@@ -171,6 +171,10 @@ def ksft_flush_defer():
KSFT_RESULT = False
+KsftCaseFunction = namedtuple("KsftCaseFunction",
+ ['name', 'original_func', 'variants'])
+
+
def ksft_disruptive(func):
"""
Decorator that marks the test as disruptive (e.g. the test
@@ -181,11 +185,47 @@ def ksft_disruptive(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not KSFT_DISRUPTIVE:
- raise KsftSkipEx(f"marked as disruptive")
+ raise KsftSkipEx("marked as disruptive")
return func(*args, **kwargs)
return wrapper
+class KsftNamedVariant:
+ """ Named string name + argument list tuple for @ksft_variants """
+
+ def __init__(self, name, *params):
+ self.params = params
+ self.name = name or "_".join([str(x) for x in self.params])
+
+
+def ksft_variants(params):
+ """
+ Decorator defining the sets of inputs for a test.
+ The parameters will be included in the name of the resulting sub-case.
+ Parameters can be either single object, tuple or a KsftNamedVariant.
+ The argument can be a list or a generator.
+
+ Example:
+
+ @ksft_variants([
+ (1, "a"),
+ (2, "b"),
+ KsftNamedVariant("three", 3, "c"),
+ ])
+ def my_case(cfg, a, b):
+ pass # ...
+
+ ksft_run(cases=[my_case], args=(cfg, ))
+
+ Will generate cases:
+ my_case.1_a
+ my_case.2_b
+ my_case.three
+ """
+
+ return lambda func: KsftCaseFunction(func.__name__, func, params)
+
+
def ksft_setup(env):
"""
Setup test framework global state from the environment.
@@ -199,7 +239,7 @@ def ksft_setup(env):
return False
try:
return bool(int(value))
- except:
+ except Exception:
raise Exception(f"failed to parse {name}")
if "DISRUPTIVE" in env:
@@ -220,9 +260,13 @@ def _ksft_intr(signum, frame):
ksft_pr(f"Ignoring SIGTERM (cnt: {term_cnt}), already exiting...")
-def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
+def _ksft_generate_test_cases(cases, globs, case_pfx, args):
+ """Generate a flat list of (func, args, name) tuples"""
+
cases = cases or []
+ test_cases = []
+ # If using the globs method find all relevant functions
if globs and case_pfx:
for key, value in globs.items():
if not callable(value):
@@ -232,6 +276,27 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
cases.append(value)
break
+ for func in cases:
+ if isinstance(func, KsftCaseFunction):
+ # Parametrized test - create case for each param
+ for param in func.variants:
+ if not isinstance(param, KsftNamedVariant):
+ if not isinstance(param, tuple):
+ param = (param, )
+ param = KsftNamedVariant(None, *param)
+
+ test_cases.append((func.original_func,
+ (*args, *param.params),
+ func.name + "." + param.name))
+ else:
+ test_cases.append((func, args, func.__name__))
+
+ return test_cases
+
+
+def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
+ test_cases = _ksft_generate_test_cases(cases, globs, case_pfx, args)
+
global term_cnt
term_cnt = 0
prev_sigterm = signal.signal(signal.SIGTERM, _ksft_intr)
@@ -239,19 +304,19 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
totals = {"pass": 0, "fail": 0, "skip": 0, "xfail": 0}
print("TAP version 13", flush=True)
- print("1.." + str(len(cases)), flush=True)
+ print("1.." + str(len(test_cases)), flush=True)
global KSFT_RESULT
cnt = 0
stop = False
- for case in cases:
+ for func, args, name in test_cases:
KSFT_RESULT = True
cnt += 1
comment = ""
cnt_key = ""
try:
- case(*args)
+ func(*args)
except KsftSkipEx as e:
comment = "SKIP " + str(e)
cnt_key = 'skip'
@@ -273,7 +338,7 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
if not cnt_key:
cnt_key = 'pass' if KSFT_RESULT else 'fail'
- ktap_result(KSFT_RESULT, cnt, case, comment=comment)
+ ktap_result(KSFT_RESULT, cnt, name, comment=comment)
totals[cnt_key] += 1
if stop:
diff --git a/tools/testing/selftests/net/lib/py/nsim.py b/tools/testing/selftests/net/lib/py/nsim.py
index 1a8cbe9acc48..7c640ed64c0b 100644
--- a/tools/testing/selftests/net/lib/py/nsim.py
+++ b/tools/testing/selftests/net/lib/py/nsim.py
@@ -27,7 +27,7 @@ class NetdevSim:
self.port_index = port_index
self.ns = ns
self.dfs_dir = "%s/ports/%u/" % (nsimdev.dfs_dir, port_index)
- ret = ip("-j link show dev %s" % ifname, ns=ns)
+ ret = ip("-d -j link show dev %s" % ifname, ns=ns)
self.dev = json.loads(ret.stdout)[0]
self.ifindex = self.dev["ifindex"]
diff --git a/tools/testing/selftests/net/lib/py/utils.py b/tools/testing/selftests/net/lib/py/utils.py
index cb40ecef9456..106ee1f2df86 100644
--- a/tools/testing/selftests/net/lib/py/utils.py
+++ b/tools/testing/selftests/net/lib/py/utils.py
@@ -32,7 +32,7 @@ class cmd:
Use bkg() instead to run a command in the background.
"""
def __init__(self, comm, shell=None, fail=True, ns=None, background=False,
- host=None, timeout=5, ksft_wait=None):
+ host=None, timeout=5, ksft_ready=None, ksft_wait=None):
if ns:
comm = f'ip netns exec {ns} ' + comm
@@ -52,21 +52,25 @@ class cmd:
# ksft_wait lets us wait for the background process to fully start,
# we pass an FD to the child process, and wait for it to write back.
# Similarly term_fd tells child it's time to exit.
- pass_fds = ()
+ pass_fds = []
env = os.environ.copy()
if ksft_wait is not None:
- rfd, ready_fd = os.pipe()
wait_fd, self.ksft_term_fd = os.pipe()
- pass_fds = (ready_fd, wait_fd, )
- env["KSFT_READY_FD"] = str(ready_fd)
+ pass_fds.append(wait_fd)
env["KSFT_WAIT_FD"] = str(wait_fd)
+ ksft_ready = True # ksft_wait implies ready
+ if ksft_ready is not None:
+ rfd, ready_fd = os.pipe()
+ pass_fds.append(ready_fd)
+ env["KSFT_READY_FD"] = str(ready_fd)
self.proc = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=pass_fds,
env=env)
if ksft_wait is not None:
- os.close(ready_fd)
os.close(wait_fd)
+ if ksft_ready is not None:
+ os.close(ready_fd)
msg = fd_read_timeout(rfd, ksft_wait)
os.close(rfd)
if not msg:
@@ -116,10 +120,10 @@ class bkg(cmd):
with bkg("my_binary", ksft_wait=5):
"""
def __init__(self, comm, shell=None, fail=None, ns=None, host=None,
- exit_wait=False, ksft_wait=None):
+ exit_wait=False, ksft_ready=None, ksft_wait=None):
super().__init__(comm, background=True,
shell=shell, fail=fail, ns=ns, host=host,
- ksft_wait=ksft_wait)
+ ksft_ready=ksft_ready, ksft_wait=ksft_wait)
self.terminate = not exit_wait and not ksft_wait
self._exit_wait = exit_wait
self.check_fail = fail
diff --git a/tools/testing/selftests/net/lib/xdp_native.bpf.c b/tools/testing/selftests/net/lib/xdp_native.bpf.c
index c368fc045f4b..64f05229ab24 100644
--- a/tools/testing/selftests/net/lib/xdp_native.bpf.c
+++ b/tools/testing/selftests/net/lib/xdp_native.bpf.c
@@ -332,7 +332,7 @@ static __u16 csum_fold_helper(__u32 csum)
}
static int xdp_adjst_tail_shrnk_data(struct xdp_md *ctx, __u16 offset,
- __u32 hdr_len)
+ unsigned long hdr_len)
{
char tmp_buff[MAX_ADJST_OFFSET];
__u32 buff_pos, udp_csum = 0;
@@ -422,8 +422,9 @@ static int xdp_adjst_tail(struct xdp_md *ctx, __u16 port)
{
struct udphdr *udph = NULL;
__s32 *adjust_offset, *val;
- __u32 key, hdr_len;
+ unsigned long hdr_len;
void *offset_ptr;
+ __u32 key;
__u8 tag;
int ret;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 9b7b93f8eb0c..a6447f7a31fe 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -375,81 +375,75 @@ do_transfer()
local capfile="${rndh}-${connector_ns:0:3}-${listener_ns:0:3}-${cl_proto}-${srv_proto}-${connect_addr}-${port}"
local capopt="-i any -s 65535 -B 32768 ${capuser}"
- ip netns exec ${listener_ns} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
+ ip netns exec ${listener_ns} tcpdump ${capopt} \
+ -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
local cappid_listener=$!
- ip netns exec ${connector_ns} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
- local cappid_connector=$!
+ if [ ${listener_ns} != ${connector_ns} ]; then
+ ip netns exec ${connector_ns} tcpdump ${capopt} \
+ -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
+ local cappid_connector=$!
+ fi
sleep 1
fi
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
+ mptcp_lib_nstat_init "${listener_ns}"
if [ ${listener_ns} != ${connector_ns} ]; then
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
- fi
-
- local stat_synrx_last_l
- local stat_ackrx_last_l
- local stat_cookietx_last
- local stat_cookierx_last
- local stat_csum_err_s
- local stat_csum_err_c
- local stat_tcpfb_last_l
- stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
- stat_tcpfb_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
-
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_args $local_addr < "$sin" > "$sout" &
+ mptcp_lib_nstat_init "${connector_ns}"
+ fi
+
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
+ $extra_args $local_addr < "$sin" > "$sout" &
local spid=$!
mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
local start
start=$(date +%s%3N)
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_args $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_args $connect_addr < "$cin" > "$cout" &
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${listener_ns}" \
+ "${connector_ns}" "${port}" "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
wait $cpid
local retc=$?
wait $spid
local rets=$?
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
local stop
stop=$(date +%s%3N)
if $capture; then
sleep 1
kill ${cappid_listener}
- kill ${cappid_connector}
+ if [ ${listener_ns} != ${connector_ns} ]; then
+ kill ${cappid_connector}
+ fi
fi
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat | grep Tcp > /tmp/${listener_ns}.out
+ mptcp_lib_nstat_get "${listener_ns}"
if [ ${listener_ns} != ${connector_ns} ]; then
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat | grep Tcp > /tmp/${connector_ns}.out
+ mptcp_lib_nstat_get "${connector_ns}"
fi
local duration
duration=$((stop-start))
printf "(duration %05sms) " "${duration}"
- if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ] || [ ${timeout_pid} -ne 0 ]; then
mptcp_lib_pr_fail "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}" \
- "/tmp/${listener_ns}.out" "/tmp/${connector_ns}.out"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
echo
cat "$capout"
@@ -463,38 +457,38 @@ do_transfer()
rets=$?
local extra=""
- local stat_synrx_now_l
- local stat_ackrx_now_l
- local stat_cookietx_now
- local stat_cookierx_now
- local stat_ooo_now
- local stat_tcpfb_now_l
- stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
- stat_tcpfb_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
-
- expect_synrx=$((stat_synrx_last_l))
- expect_ackrx=$((stat_ackrx_last_l))
+ local stat_synrx
+ local stat_ackrx
+ local stat_cookietx
+ local stat_cookierx
+ local stat_ooo
+ local stat_tcpfb
+ stat_synrx=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ stat_ackrx=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ stat_cookietx=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ stat_cookierx=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ stat_ooo=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+ stat_tcpfb=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
+
+ expect_synrx=0
+ expect_ackrx=0
cookies=$(ip netns exec ${listener_ns} sysctl net.ipv4.tcp_syncookies)
cookies=${cookies##*=}
if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then
- expect_synrx=$((stat_synrx_last_l+connect_per_transfer))
- expect_ackrx=$((stat_ackrx_last_l+connect_per_transfer))
+ expect_synrx=${connect_per_transfer}
+ expect_ackrx=${connect_per_transfer}
fi
- if [ ${stat_synrx_now_l} -lt ${expect_synrx} ]; then
- mptcp_lib_pr_fail "lower MPC SYN rx (${stat_synrx_now_l})" \
+ if [ ${stat_synrx} -lt ${expect_synrx} ]; then
+ mptcp_lib_pr_fail "lower MPC SYN rx (${stat_synrx})" \
"than expected (${expect_synrx})"
retc=1
fi
- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then
- if [ ${stat_ooo_now} -eq 0 ]; then
- mptcp_lib_pr_fail "lower MPC ACK rx (${stat_ackrx_now_l})" \
+ if [ ${stat_ackrx} -lt ${expect_ackrx} ]; then
+ if [ ${stat_ooo} -eq 0 ]; then
+ mptcp_lib_pr_fail "lower MPC ACK rx (${stat_ackrx})" \
"than expected (${expect_ackrx})"
rets=1
else
@@ -508,47 +502,45 @@ do_transfer()
csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
- local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
- if [ $csum_err_s_nr -gt 0 ]; then
- mptcp_lib_pr_fail "server got ${csum_err_s_nr} data checksum error[s]"
+ if [ $csum_err_s -gt 0 ]; then
+ mptcp_lib_pr_fail "server got ${csum_err_s} data checksum error[s]"
rets=1
fi
- local csum_err_c_nr=$((csum_err_c - stat_csum_err_c))
- if [ $csum_err_c_nr -gt 0 ]; then
- mptcp_lib_pr_fail "client got ${csum_err_c_nr} data checksum error[s]"
+ if [ $csum_err_c -gt 0 ]; then
+ mptcp_lib_pr_fail "client got ${csum_err_c} data checksum error[s]"
retc=1
fi
fi
- if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then
+ if [ ${stat_ooo} -eq 0 ] && [ ${stat_tcpfb} -gt 0 ]; then
mptcp_lib_pr_fail "unexpected fallback to TCP"
rets=1
fi
if [ $cookies -eq 2 ];then
- if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then
+ if [ $stat_cookietx -eq 0 ] ;then
extra+=" WARN: CookieSent: did not advance"
fi
- if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then
+ if [ $stat_cookierx -eq 0 ] ;then
extra+=" WARN: CookieRecv: did not advance"
fi
else
- if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then
+ if [ $stat_cookietx -gt 0 ] ;then
extra+=" WARN: CookieSent: changed"
fi
- if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then
+ if [ $stat_cookierx -gt 0 ] ;then
extra+=" WARN: CookieRecv: changed"
fi
fi
- if [ ${stat_synrx_now_l} -gt ${expect_synrx} ]; then
+ if [ ${stat_synrx} -gt ${expect_synrx} ]; then
extra+=" WARN: SYNRX: expect ${expect_synrx},"
- extra+=" got ${stat_synrx_now_l} (probably retransmissions)"
+ extra+=" got ${stat_synrx} (probably retransmissions)"
fi
- if [ ${stat_ackrx_now_l} -gt ${expect_ackrx} ]; then
+ if [ ${stat_ackrx} -gt ${expect_ackrx} ]; then
extra+=" WARN: ACKRX: expect ${expect_ackrx},"
- extra+=" got ${stat_ackrx_now_l} (probably retransmissions)"
+ extra+=" got ${stat_ackrx} (probably retransmissions)"
fi
if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index f0efbf985625..b2e6e548f796 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -983,10 +983,8 @@ do_transfer()
cond_start_capture ${listener_ns}
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
+ mptcp_lib_nstat_init "${listener_ns}"
+ mptcp_lib_nstat_init "${connector_ns}"
local extra_args
if [ $speed = "fast" ]; then
@@ -1026,38 +1024,38 @@ do_transfer()
if [ "$test_linkfail" -gt 1 ];then
listener_in="${sinfail}"
fi
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p ${port} -s ${srv_proto} \
- ${extra_srv_args} "${bind_addr}" < "${listener_in}" > "${sout}" &
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p ${port} -s ${srv_proto} \
+ ${extra_srv_args} "${bind_addr}" < "${listener_in}" > "${sout}" &
local spid=$!
mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
extra_cl_args="$extra_args $extra_cl_args"
if [ "$test_linkfail" -eq 0 ];then
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr < "$cin" > "$cout" &
elif [ "$test_linkfail" -eq 1 ] || [ "$test_linkfail" -eq 2 ];then
connector_in="${cinsent}"
( cat "$cinfail" ; sleep 2; link_failure $listener_ns ; cat "$cinfail" ) | \
tee "$cinsent" | \
- timeout ${timeout_test} \
ip netns exec ${connector_ns} \
./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
$extra_cl_args $connect_addr > "$cout" &
else
connector_in="${cinsent}"
tee "$cinsent" < "$cinfail" | \
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr > "$cout" &
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr > "$cout" &
fi
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${listener_ns}" \
+ "${connector_ns}" "${port}" "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
pm_nl_set_endpoint $listener_ns $connector_ns $connect_addr
check_cestab $listener_ns $connector_ns
@@ -1066,17 +1064,20 @@ do_transfer()
wait $spid
local rets=$?
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
cond_stop_capture
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat | grep Tcp > /tmp/${listener_ns}.out
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat | grep Tcp > /tmp/${connector_ns}.out
+ mptcp_lib_nstat_get "${listener_ns}"
+ mptcp_lib_nstat_get "${connector_ns}"
- if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ] || [ ${timeout_pid} -ne 0 ]; then
fail_test "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}" \
- "/tmp/${listener_ns}.out" "/tmp/${connector_ns}.out"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
return 1
fi
@@ -1151,12 +1152,20 @@ run_tests()
do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
}
+_dump_stats()
+{
+ local ns="${1}"
+ local side="${2}"
+
+ mptcp_lib_print_err "${side} ns stats (${ns2})"
+ mptcp_lib_pr_nstat "${ns}"
+ echo
+}
+
dump_stats()
{
- echo Server ns stats
- ip netns exec $ns1 nstat -as | grep Tcp
- echo Client ns stats
- ip netns exec $ns2 nstat -as | grep Tcp
+ _dump_stats "${ns1}" "Server"
+ _dump_stats "${ns2}" "Client"
}
chk_csum_nr()
@@ -3646,7 +3655,6 @@ fullmesh_tests()
fastclose_tests()
{
if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
- MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=client \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
@@ -3655,7 +3663,6 @@ fastclose_tests()
fi
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
- MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=server \
run_tests $ns1 $ns2 10.0.1.1
join_rst_nr=1 \
@@ -3952,7 +3959,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns1
pm_nl_set_limits $ns2 2 2
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns1
@@ -3985,7 +3992,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns2
@@ -4013,7 +4020,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns2
@@ -4034,7 +4041,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns2
@@ -4058,7 +4065,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns1
pm_nl_set_limits $ns2 1 1
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns1
@@ -4089,7 +4096,7 @@ endpoint_tests()
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- { test_linkfail=128 speed=slow \
+ { timeout_test=120 test_linkfail=128 speed=slow \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -4116,7 +4123,7 @@ endpoint_tests()
pm_nl_set_limits $ns2 0 3
pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -4194,7 +4201,7 @@ endpoint_tests()
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
- { test_linkfail=128 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -4203,38 +4210,45 @@ endpoint_tests()
$ns1 10.0.2.1 id 1 flags signal
chk_subflow_nr "before delete" 2
chk_mptcp_info subflows 1 subflows 1
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 1
pm_nl_del_endpoint $ns1 1 10.0.2.1
pm_nl_del_endpoint $ns1 2 224.0.0.1
sleep 0.5
chk_subflow_nr "after delete" 1
chk_mptcp_info subflows 0 subflows 0
+ chk_mptcp_info add_addr_signal 0 add_addr_accepted 0
pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-add" 3
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_del_endpoint $ns1 42 10.0.1.1
sleep 0.5
chk_subflow_nr "after delete ID 0" 2
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-add ID 0" 3
chk_mptcp_info subflows 3 subflows 3
+ chk_mptcp_info add_addr_signal 3 add_addr_accepted 2
pm_nl_del_endpoint $ns1 99 10.0.1.1
sleep 0.5
chk_subflow_nr "after re-delete ID 0" 2
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-re-add ID 0" 3
chk_mptcp_info subflows 3 subflows 3
+ chk_mptcp_info add_addr_signal 3 add_addr_accepted 2
mptcp_lib_kill_group_wait $tests_pid
kill_events_pids
@@ -4267,7 +4281,7 @@ endpoint_tests()
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
- { test_linkfail=128 speed=20 \
+ { timeout_test=120 test_linkfail=128 speed=20 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index f4388900016a..5fea7e7df628 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -106,23 +106,32 @@ mptcp_lib_pr_info() {
mptcp_lib_print_info "INFO: ${*}"
}
-# $1-2: listener/connector ns ; $3 port ; $4-5 listener/connector stat file
+mptcp_lib_pr_nstat() {
+ local ns="${1}"
+ local hist="/tmp/${ns}.out"
+
+ if [ -f "${hist}" ]; then
+ awk '$2 != 0 { print " "$0 }' "${hist}"
+ else
+ ip netns exec "${ns}" nstat -as | grep Tcp
+ fi
+}
+
+# $1-2: listener/connector ns ; $3 port
mptcp_lib_pr_err_stats() {
local lns="${1}"
local cns="${2}"
local port="${3}"
- local lstat="${4}"
- local cstat="${5}"
echo -en "${MPTCP_LIB_COLOR_RED}"
{
printf "\nnetns %s (listener) socket stat for %d:\n" "${lns}" "${port}"
ip netns exec "${lns}" ss -Menitam -o "sport = :${port}"
- cat "${lstat}"
+ mptcp_lib_pr_nstat "${lns}"
printf "\nnetns %s (connector) socket stat for %d:\n" "${cns}" "${port}"
ip netns exec "${cns}" ss -Menitam -o "dport = :${port}"
- [ "${lstat}" != "${cstat}" ] && cat "${cstat}"
+ [ "${lns}" != "${cns}" ] && mptcp_lib_pr_nstat "${cns}"
} 1>&2
echo -en "${MPTCP_LIB_COLOR_RESET}"
}
@@ -341,6 +350,19 @@ mptcp_lib_evts_get_info() {
mptcp_lib_get_info_value "${1}" "^type:${3:-1},"
}
+mptcp_lib_wait_timeout() {
+ local timeout_test="${1}"
+ local listener_ns="${2}"
+ local connector_ns="${3}"
+ local port="${4}"
+ shift 4 # rest are PIDs
+
+ sleep "${timeout_test}"
+ mptcp_lib_print_err "timeout"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
+ kill "${@}" 2>/dev/null
+}
+
# $1: PID
mptcp_lib_kill_wait() {
[ "${1}" -eq 0 ] && return 0
@@ -376,14 +398,36 @@ mptcp_lib_is_v6() {
[ -z "${1##*:*}" ]
}
+mptcp_lib_nstat_init() {
+ local ns="${1}"
+
+ rm -f "/tmp/${ns}."{nstat,out}
+ NSTAT_HISTORY="/tmp/${ns}.nstat" ip netns exec "${ns}" nstat -n
+}
+
+mptcp_lib_nstat_get() {
+ local ns="${1}"
+
+ # filter out non-*TCP stats, and the rate (last column)
+ NSTAT_HISTORY="/tmp/${ns}.nstat" ip netns exec "${ns}" nstat -sz |
+ grep -o ".*Tcp\S\+\s\+[0-9]\+" > "/tmp/${ns}.out"
+}
+
# $1: ns, $2: MIB counter
+# Get the counter from the history (mptcp_lib_nstat_{init,get}()) if available.
+# If not, get the counter from nstat ignoring any history.
mptcp_lib_get_counter() {
local ns="${1}"
local counter="${2}"
+ local hist="/tmp/${ns}.out"
local count
- count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
- awk 'NR==1 {next} {print $2}')
+ if [[ -s "${hist}" && "${counter}" == *"Tcp"* ]]; then
+ count=$(awk "/^${counter} / {print \$2; exit}" "${hist}")
+ else
+ count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
+ awk 'NR==1 {next} {print $2}')
+ fi
if [ -z "${count}" ]; then
mptcp_lib_fail_if_expected_feature "${counter} counter"
return 1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index f01989be6e9b..ab8bce06b262 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -169,41 +169,44 @@ do_transfer()
cmsg+=",TCPINQ"
fi
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
-
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c "${cmsg}" \
- ${local_addr} < "$sin" > "$sout" &
+ mptcp_lib_nstat_init "${listener_ns}"
+ mptcp_lib_nstat_init "${connector_ns}"
+
+ ip netns exec ${listener_ns} \
+ $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c "${cmsg}" \
+ ${local_addr} < "$sin" > "$sout" &
local spid=$!
- sleep 1
+ mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c "${cmsg}" \
- $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} \
+ $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c "${cmsg}" \
+ $connect_addr < "$cin" > "$cout" &
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${listener_ns}" \
+ "${connector_ns}" "${port}" "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
wait $cpid
local retc=$?
wait $spid
local rets=$?
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat | grep Tcp > /tmp/${listener_ns}.out
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat | grep Tcp > /tmp/${connector_ns}.out
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
+ mptcp_lib_nstat_get "${listener_ns}"
+ mptcp_lib_nstat_get "${connector_ns}"
print_title "Transfer ${ip:2}"
- if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ] || [ ${timeout_pid} -ne 0 ]; then
mptcp_lib_pr_fail "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}" \
- "/tmp/${listener_ns}.out" "/tmp/${connector_ns}.out"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
mptcp_lib_result_fail "transfer ${ip}"
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 1903e8e84a31..806aaa7d2d61 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -155,48 +155,53 @@ do_transfer()
sleep 1
fi
- NSTAT_HISTORY=/tmp/${ns3}.nstat ip netns exec ${ns3} \
- nstat -n
- NSTAT_HISTORY=/tmp/${ns1}.nstat ip netns exec ${ns1} \
- nstat -n
-
- timeout ${timeout_test} \
- ip netns exec ${ns3} \
- ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $max_time \
- 0.0.0.0 < "$sin" > "$sout" &
+ mptcp_lib_nstat_init "${ns3}"
+ mptcp_lib_nstat_init "${ns1}"
+
+ ip netns exec ${ns3} \
+ ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $max_time \
+ 0.0.0.0 < "$sin" > "$sout" &
local spid=$!
mptcp_lib_wait_local_port_listen "${ns3}" "${port}"
- timeout ${timeout_test} \
- ip netns exec ${ns1} \
- ./mptcp_connect -jt ${timeout_poll} -p $port -T $max_time \
- 10.0.3.3 < "$cin" > "$cout" &
+ ip netns exec ${ns1} \
+ ./mptcp_connect -jt ${timeout_poll} -p $port -T $max_time \
+ 10.0.3.3 < "$cin" > "$cout" &
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${ns3}" "${ns1}" "${port}" \
+ "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
wait $cpid
local retc=$?
wait $spid
local rets=$?
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
if $capture; then
sleep 1
kill ${cappid_listener}
kill ${cappid_connector}
fi
- NSTAT_HISTORY=/tmp/${ns3}.nstat ip netns exec ${ns3} \
- nstat | grep Tcp > /tmp/${ns3}.out
- NSTAT_HISTORY=/tmp/${ns1}.nstat ip netns exec ${ns1} \
- nstat | grep Tcp > /tmp/${ns1}.out
+ mptcp_lib_nstat_get "${ns3}"
+ mptcp_lib_nstat_get "${ns1}"
cmp $sin $cout > /dev/null 2>&1
local cmps=$?
cmp $cin $sout > /dev/null 2>&1
local cmpc=$?
- if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
- [ $cmpc -eq 0 ] && [ $cmps -eq 0 ]; then
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ] &&
+ [ $cmpc -eq 0 ] && [ $cmps -eq 0 ] &&
+ [ $timeout_pid -eq 0 ]; then
printf "%-16s" " max $max_time "
mptcp_lib_pr_ok
cat "$capout"
@@ -204,8 +209,7 @@ do_transfer()
fi
mptcp_lib_pr_fail "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${ns3}" "${ns1}" "${port}" \
- "/tmp/${ns3}.out" "/tmp/${ns1}.out"
+ mptcp_lib_pr_err_stats "${ns3}" "${ns1}" "${port}"
ls -l $sin $cout
ls -l $cin $sout
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 87323942cb8a..e9ae1806ab07 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -211,7 +211,8 @@ make_connection()
ip netns exec "$ns1" \
./mptcp_connect -s MPTCP -w 300 -p $app_port -l $listen_addr > /dev/null 2>&1 &
local server_pid=$!
- sleep 0.5
+
+ mptcp_lib_wait_local_port_listen "${ns1}" "${port}"
# Run the client, transfer $file and stay connected to the server
# to conduct tests
diff --git a/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt b/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt
index b2b2cdf27e20..454441e7ecff 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
// Test that we correctly skip zero-length IOVs.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
+
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
index a82c8899d36b..0a0700afdaa3 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
@@ -4,6 +4,8 @@
// send a packet with MSG_ZEROCOPY and receive the notification ID
// repeat and verify IDs are consecutive
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
index c01915e7f4a1..df91675d2991 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
@@ -3,6 +3,8 @@
//
// send multiple packets, then read one range of all notifications.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
index 6509882932e9..2963cfcb14df 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Minimal client-side zerocopy test
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
index 2cd78755cb2a..ea0c2fa73c2d 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
@@ -7,6 +7,8 @@
// First send on a closed socket and wait for (absent) notification.
// Then connect and send and verify that notification nr. is zero.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
index 7671c20e01cf..4df978a9b82e 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
@@ -7,6 +7,9 @@
// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
// is correctly fired only once, when EPOLLET is set. send another packet with
// MSG_ZEROCOPY. confirm that EPOLLERR is correctly fired again only once.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
index fadc480fdb7f..36b6edc4858c 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
@@ -8,6 +8,9 @@
// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
// is correctly fired only once, when EPOLLET is set. send another packet with
// MSG_ZEROCOPY. confirm that EPOLLERR is correctly fired again only once.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
index 5bfa0d1d2f4a..1bea6f3b4558 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
@@ -8,6 +8,9 @@
// is correctly fired only once, when EPOLLONESHOT is set. send another packet
// with MSG_ZEROCOPY. confirm that EPOLLERR is not fired. Rearm the FD and
// confirm that EPOLLERR is correctly set.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
index 4a73bbf46961..e27c21ff5d18 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
@@ -8,6 +8,8 @@
// one will have no data in the initial send. On return 0 the
// zerocopy notification counter is not incremented. Verify this too.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
// Send a FastOpen request, no cookie yet so no data in SYN
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
index 36086c5877ce..b1fa77c77dfa 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
@@ -4,6 +4,8 @@
// send data with MSG_FASTOPEN | MSG_ZEROCOPY and verify that the
// kernel returns the notification ID.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh
./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x207`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
index 672f817faca0..2f5317d0a9fa 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
@@ -7,6 +7,8 @@
// because each iovec element becomes a frag
// 3) the PSH bit is set on an skb when it runs out of fragments
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
index a9a1ac0aea4f..9d5272c6b207 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
@@ -4,6 +4,8 @@
// verify that SO_EE_CODE_ZEROCOPY_COPIED is set on zerocopy
// packets of all sizes, including the smallest payload, 1B.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 163a084d525d..248c2b91fe42 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -8,6 +8,7 @@ ALL_TESTS="
kci_test_polrouting
kci_test_route_get
kci_test_addrlft
+ kci_test_addrlft_route_cleanup
kci_test_promote_secondaries
kci_test_tc
kci_test_gre
@@ -323,6 +324,25 @@ kci_test_addrlft()
end_test "PASS: preferred_lft addresses have expired"
}
+kci_test_addrlft_route_cleanup()
+{
+ local ret=0
+ local test_addr="2001:db8:99::1/64"
+ local test_prefix="2001:db8:99::/64"
+
+ run_cmd ip -6 addr add $test_addr dev "$devdummy" valid_lft 300 preferred_lft 300
+ run_cmd_grep "$test_prefix proto kernel" ip -6 route show dev "$devdummy"
+ run_cmd ip -6 addr del $test_addr dev "$devdummy"
+ run_cmd_grep_fail "$test_prefix" ip -6 route show dev "$devdummy"
+
+ if [ $ret -ne 0 ]; then
+ end_test "FAIL: route not cleaned up when address with valid_lft deleted"
+ return 1
+ fi
+
+ end_test "PASS: route cleaned up when address with valid_lft deleted"
+}
+
kci_test_promote_secondaries()
{
run_cmd ifconfig "$devdummy"
diff --git a/tools/testing/selftests/net/setup_loopback.sh b/tools/testing/selftests/net/setup_loopback.sh
deleted file mode 100644
index 2070b57849de..000000000000
--- a/tools/testing/selftests/net/setup_loopback.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-readonly FLUSH_PATH="/sys/class/net/${dev}/gro_flush_timeout"
-readonly IRQ_PATH="/sys/class/net/${dev}/napi_defer_hard_irqs"
-readonly FLUSH_TIMEOUT="$(< ${FLUSH_PATH})"
-readonly HARD_IRQS="$(< ${IRQ_PATH})"
-readonly server_ns=$(mktemp -u server-XXXXXXXX)
-readonly client_ns=$(mktemp -u client-XXXXXXXX)
-
-netdev_check_for_carrier() {
- local -r dev="$1"
-
- for i in {1..5}; do
- carrier="$(cat /sys/class/net/${dev}/carrier)"
- if [[ "${carrier}" -ne 1 ]] ; then
- echo "carrier not ready yet..." >&2
- sleep 1
- else
- echo "carrier ready" >&2
- break
- fi
- done
- echo "${carrier}"
-}
-
-# Assumes that there is no existing ipvlan device on the physical device
-setup_loopback_environment() {
- local dev="$1"
-
- # Fail hard if cannot turn on loopback mode for current NIC
- ethtool -K "${dev}" loopback on || exit 1
- sleep 1
-
- # Check for the carrier
- carrier=$(netdev_check_for_carrier ${dev})
- if [[ "${carrier}" -ne 1 ]] ; then
- echo "setup_loopback_environment failed"
- exit 1
- fi
-}
-
-setup_macvlan_ns(){
- local -r link_dev="$1"
- local -r ns_name="$2"
- local -r ns_dev="$3"
- local -r ns_mac="$4"
- local -r addr="$5"
-
- ip link add link "${link_dev}" dev "${ns_dev}" \
- address "${ns_mac}" type macvlan
- exit_code=$?
- if [[ "${exit_code}" -ne 0 ]]; then
- echo "setup_macvlan_ns failed"
- exit $exit_code
- fi
-
- [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
- ip link set dev "${ns_dev}" netns "${ns_name}"
- ip -netns "${ns_name}" link set dev "${ns_dev}" up
- if [[ -n "${addr}" ]]; then
- ip -netns "${ns_name}" addr add dev "${ns_dev}" "${addr}"
- fi
-
- sleep 1
-}
-
-cleanup_macvlan_ns(){
- while (( $# >= 2 )); do
- ns_name="$1"
- ns_dev="$2"
- ip -netns "${ns_name}" link del dev "${ns_dev}"
- ip netns del "${ns_name}"
- shift 2
- done
-}
-
-cleanup_loopback(){
- local -r dev="$1"
-
- ethtool -K "${dev}" loopback off
- sleep 1
-
- # Check for the carrier
- carrier=$(netdev_check_for_carrier ${dev})
- if [[ "${carrier}" -ne 1 ]] ; then
- echo "setup_loopback_environment failed"
- exit 1
- fi
-}
-
-setup_interrupt() {
- # Use timer on host to trigger the network stack
- # Also disable device interrupt to not depend on NIC interrupt
- # Reduce test flakiness caused by unexpected interrupts
- echo 100000 >"${FLUSH_PATH}"
- echo 50 >"${IRQ_PATH}"
-}
-
-setup_ns() {
- # Set up server_ns namespace and client_ns namespace
- setup_macvlan_ns "${dev}" ${server_ns} server "${SERVER_MAC}"
- setup_macvlan_ns "${dev}" ${client_ns} client "${CLIENT_MAC}"
-}
-
-cleanup_ns() {
- cleanup_macvlan_ns ${server_ns} server ${client_ns} client
-}
-
-setup() {
- setup_loopback_environment "${dev}"
- setup_interrupt
-}
-
-cleanup() {
- cleanup_loopback "${dev}"
-
- echo "${FLUSH_TIMEOUT}" >"${FLUSH_PATH}"
- echo "${HARD_IRQS}" >"${IRQ_PATH}"
-}
diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh
deleted file mode 100644
index 152bf4c65747..000000000000
--- a/tools/testing/selftests/net/setup_veth.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-readonly server_ns=$(mktemp -u server-XXXXXXXX)
-readonly client_ns=$(mktemp -u client-XXXXXXXX)
-
-setup_veth_ns() {
- local -r link_dev="$1"
- local -r ns_name="$2"
- local -r ns_dev="$3"
- local -r ns_mac="$4"
-
- [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
- echo 200000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
- echo 1 > "/sys/class/net/${ns_dev}/napi_defer_hard_irqs"
- ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535
- ip -netns "${ns_name}" link set dev "${ns_dev}" up
-
- ip netns exec "${ns_name}" ethtool -K "${ns_dev}" gro on tso off
-}
-
-setup_ns() {
- # Set up server_ns namespace and client_ns namespace
- ip link add name server type veth peer name client
-
- setup_veth_ns "${dev}" ${server_ns} server "${SERVER_MAC}"
- setup_veth_ns "${dev}" ${client_ns} client "${CLIENT_MAC}"
-}
-
-cleanup_ns() {
- local ns_name
-
- for ns_name in ${client_ns} ${server_ns}; do
- [[ -e /var/run/netns/"${ns_name}" ]] && ip netns del "${ns_name}"
- done
-}
-
-setup() {
- # no global init setup step needed
- :
-}
-
-cleanup() {
- cleanup_ns
-}
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index 8457b7ccbc09..b76df1efc2ef 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -174,7 +174,7 @@ static int do_recv_errqueue_timeout(int fdt)
msg.msg_controllen = sizeof(control);
while (1) {
- const char *reason;
+ const char *reason = NULL;
ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno == EAGAIN)
diff --git a/tools/testing/selftests/net/toeplitz.sh b/tools/testing/selftests/net/toeplitz.sh
deleted file mode 100755
index 8ff172f7bb1b..000000000000
--- a/tools/testing/selftests/net/toeplitz.sh
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# extended toeplitz test: test rxhash plus, optionally, either (1) rss mapping
-# from rxhash to rx queue ('-rss') or (2) rps mapping from rxhash to cpu
-# ('-rps <rps_map>')
-#
-# irq-pattern-prefix can be derived from /sys/kernel/irq/*/action,
-# which is a driver-specific encoding.
-#
-# invoke as ./toeplitz.sh (-i <iface>) -u|-t -4|-6 \
-# [(-rss -irq_prefix <irq-pattern-prefix>)|(-rps <rps_map>)]
-
-source setup_loopback.sh
-readonly SERVER_IP4="192.168.1.200/24"
-readonly SERVER_IP6="fda8::1/64"
-readonly SERVER_MAC="aa:00:00:00:00:02"
-
-readonly CLIENT_IP4="192.168.1.100/24"
-readonly CLIENT_IP6="fda8::2/64"
-readonly CLIENT_MAC="aa:00:00:00:00:01"
-
-PORT=8000
-KEY="$(</proc/sys/net/core/netdev_rss_key)"
-TEST_RSS=false
-RPS_MAP=""
-PROTO_FLAG=""
-IP_FLAG=""
-DEV="eth0"
-
-# Return the number of rxqs among which RSS is configured to spread packets.
-# This is determined by reading the RSS indirection table using ethtool.
-get_rss_cfg_num_rxqs() {
- echo $(ethtool -x "${DEV}" |
- grep -E [[:space:]]+[0-9]+:[[:space:]]+ |
- cut -d: -f2- |
- awk '{$1=$1};1' |
- tr ' ' '\n' |
- sort -u |
- wc -l)
-}
-
-# Return a list of the receive irq handler cpus.
-# The list is ordered by the irqs, so first rxq-0 cpu, then rxq-1 cpu, etc.
-# Reads /sys/kernel/irq/ in order, so algorithm depends on
-# irq_{rxq-0} < irq_{rxq-1}, etc.
-get_rx_irq_cpus() {
- CPUS=""
- # sort so that irq 2 is read before irq 10
- SORTED_IRQS=$(for i in /sys/kernel/irq/*; do echo $i; done | sort -V)
- # Consider only as many queues as RSS actually uses. We assume that
- # if RSS_CFG_NUM_RXQS=N, then RSS uses rxqs 0-(N-1).
- RSS_CFG_NUM_RXQS=$(get_rss_cfg_num_rxqs)
- RXQ_COUNT=0
-
- for i in ${SORTED_IRQS}
- do
- [[ "${RXQ_COUNT}" -lt "${RSS_CFG_NUM_RXQS}" ]] || break
- # lookup relevant IRQs by action name
- [[ -e "$i/actions" ]] || continue
- cat "$i/actions" | grep -q "${IRQ_PATTERN}" || continue
- irqname=$(<"$i/actions")
-
- # does the IRQ get called
- irqcount=$(cat "$i/per_cpu_count" | tr -d '0,')
- [[ -n "${irqcount}" ]] || continue
-
- # lookup CPU
- irq=$(basename "$i")
- cpu=$(cat "/proc/irq/$irq/smp_affinity_list")
-
- if [[ -z "${CPUS}" ]]; then
- CPUS="${cpu}"
- else
- CPUS="${CPUS},${cpu}"
- fi
- RXQ_COUNT=$((RXQ_COUNT+1))
- done
-
- echo "${CPUS}"
-}
-
-get_disable_rfs_cmd() {
- echo "echo 0 > /proc/sys/net/core/rps_sock_flow_entries;"
-}
-
-get_set_rps_bitmaps_cmd() {
- CMD=""
- for i in /sys/class/net/${DEV}/queues/rx-*/rps_cpus
- do
- CMD="${CMD} echo $1 > ${i};"
- done
-
- echo "${CMD}"
-}
-
-get_disable_rps_cmd() {
- echo "$(get_set_rps_bitmaps_cmd 0)"
-}
-
-die() {
- echo "$1"
- exit 1
-}
-
-check_nic_rxhash_enabled() {
- local -r pattern="receive-hashing:\ on"
-
- ethtool -k "${DEV}" | grep -q "${pattern}" || die "rxhash must be enabled"
-}
-
-parse_opts() {
- local prog=$0
- shift 1
-
- while [[ "$1" =~ "-" ]]; do
- if [[ "$1" = "-irq_prefix" ]]; then
- shift
- IRQ_PATTERN="^$1-[0-9]*$"
- elif [[ "$1" = "-u" || "$1" = "-t" ]]; then
- PROTO_FLAG="$1"
- elif [[ "$1" = "-4" ]]; then
- IP_FLAG="$1"
- SERVER_IP="${SERVER_IP4}"
- CLIENT_IP="${CLIENT_IP4}"
- elif [[ "$1" = "-6" ]]; then
- IP_FLAG="$1"
- SERVER_IP="${SERVER_IP6}"
- CLIENT_IP="${CLIENT_IP6}"
- elif [[ "$1" = "-rss" ]]; then
- TEST_RSS=true
- elif [[ "$1" = "-rps" ]]; then
- shift
- RPS_MAP="$1"
- elif [[ "$1" = "-i" ]]; then
- shift
- DEV="$1"
- else
- die "Usage: ${prog} (-i <iface>) -u|-t -4|-6 \
- [(-rss -irq_prefix <irq-pattern-prefix>)|(-rps <rps_map>)]"
- fi
- shift
- done
-}
-
-setup() {
- setup_loopback_environment "${DEV}"
-
- # Set up server_ns namespace and client_ns namespace
- setup_macvlan_ns "${DEV}" $server_ns server \
- "${SERVER_MAC}" "${SERVER_IP}"
- setup_macvlan_ns "${DEV}" $client_ns client \
- "${CLIENT_MAC}" "${CLIENT_IP}"
-}
-
-cleanup() {
- cleanup_macvlan_ns $server_ns server $client_ns client
- cleanup_loopback "${DEV}"
-}
-
-parse_opts $0 $@
-
-setup
-trap cleanup EXIT
-
-check_nic_rxhash_enabled
-
-# Actual test starts here
-if [[ "${TEST_RSS}" = true ]]; then
- # RPS/RFS must be disabled because they move packets between cpus,
- # which breaks the PACKET_FANOUT_CPU identification of RSS decisions.
- eval "$(get_disable_rfs_cmd) $(get_disable_rps_cmd)" \
- ip netns exec $server_ns ./toeplitz "${IP_FLAG}" "${PROTO_FLAG}" \
- -d "${PORT}" -i "${DEV}" -k "${KEY}" -T 1000 \
- -C "$(get_rx_irq_cpus)" -s -v &
-elif [[ ! -z "${RPS_MAP}" ]]; then
- eval "$(get_disable_rfs_cmd) $(get_set_rps_bitmaps_cmd ${RPS_MAP})" \
- ip netns exec $server_ns ./toeplitz "${IP_FLAG}" "${PROTO_FLAG}" \
- -d "${PORT}" -i "${DEV}" -k "${KEY}" -T 1000 \
- -r "0x${RPS_MAP}" -s -v &
-else
- ip netns exec $server_ns ./toeplitz "${IP_FLAG}" "${PROTO_FLAG}" \
- -d "${PORT}" -i "${DEV}" -k "${KEY}" -T 1000 -s -v &
-fi
-
-server_pid=$!
-
-ip netns exec $client_ns ./toeplitz_client.sh "${PROTO_FLAG}" \
- "${IP_FLAG}" "${SERVER_IP%%/*}" "${PORT}" &
-
-client_pid=$!
-
-wait "${server_pid}"
-exit_code=$?
-kill -9 "${client_pid}"
-if [[ "${exit_code}" -eq 0 ]]; then
- echo "Test Succeeded!"
-fi
-exit "${exit_code}"
diff --git a/tools/testing/selftests/net/toeplitz_client.sh b/tools/testing/selftests/net/toeplitz_client.sh
deleted file mode 100755
index 2fef34f4aba1..000000000000
--- a/tools/testing/selftests/net/toeplitz_client.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# A simple program for generating traffic for the toeplitz test.
-#
-# This program sends packets periodically for, conservatively, 20 seconds. The
-# intent is for the calling program to kill this program once it is no longer
-# needed, rather than waiting for the 20 second expiration.
-
-send_traffic() {
- expiration=$((SECONDS+20))
- while [[ "${SECONDS}" -lt "${expiration}" ]]
- do
- if [[ "${PROTO}" == "-u" ]]; then
- echo "msg $i" | nc "${IPVER}" -u -w 0 "${ADDR}" "${PORT}"
- else
- echo "msg $i" | nc "${IPVER}" -w 0 "${ADDR}" "${PORT}"
- fi
- sleep 0.001
- done
-}
-
-PROTO=$1
-IPVER=$2
-ADDR=$3
-PORT=$4
-
-send_traffic
diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index dae91eb97d69..bcc14688661d 100644
--- a/tools/testing/selftests/net/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -217,7 +217,7 @@ static void print_timestamp_usr(void)
static void print_timestamp(struct scm_timestamping *tss, int tstype,
int tskey, int payload_len)
{
- const char *tsname;
+ const char *tsname = NULL;
validate_key(tskey, tstype);
diff --git a/tools/testing/selftests/user_events/perf_test.c b/tools/testing/selftests/user_events/perf_test.c
index 5288e768b207..68625362add2 100644
--- a/tools/testing/selftests/user_events/perf_test.c
+++ b/tools/testing/selftests/user_events/perf_test.c
@@ -236,7 +236,7 @@ TEST_F(user, perf_empty_events) {
ASSERT_EQ(1 << reg.enable_bit, self->check);
/* Ensure write shows up at correct offset */
- ASSERT_NE(-1, write(self->data_fd, &reg.write_index,
+ ASSERT_NE(-1, write(self->data_fd, (void *)&reg.write_index,
sizeof(reg.write_index)));
val = (void *)(((char *)perf_page) + perf_page->data_offset);
ASSERT_EQ(PERF_RECORD_SAMPLE, *val);
diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h
index 240409bf5f8a..69ec0c856481 100644
--- a/tools/testing/selftests/vfio/lib/include/vfio_util.h
+++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h
@@ -4,9 +4,12 @@
#include <fcntl.h>
#include <string.h>
-#include <linux/vfio.h>
+
+#include <uapi/linux/types.h>
+#include <linux/iommufd.h>
#include <linux/list.h>
#include <linux/pci_regs.h>
+#include <linux/vfio.h>
#include "../../../kselftest.h"
@@ -185,6 +188,13 @@ struct vfio_pci_device {
struct vfio_pci_driver driver;
};
+struct iova_allocator {
+ struct iommu_iova_range *ranges;
+ u32 nranges;
+ u32 range_idx;
+ u64 range_offset;
+};
+
/*
* Return the BDF string of the device that the test should use.
*
@@ -206,6 +216,13 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_
void vfio_pci_device_cleanup(struct vfio_pci_device *device);
void vfio_pci_device_reset(struct vfio_pci_device *device);
+struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
+ u32 *nranges);
+
+struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device);
+void iova_allocator_cleanup(struct iova_allocator *allocator);
+iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
+
int __vfio_pci_dma_map(struct vfio_pci_device *device,
struct vfio_dma_region *region);
int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
index a381fd253aa7..b479a359da12 100644
--- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c
+++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
@@ -12,11 +12,12 @@
#include <sys/mman.h>
#include <uapi/linux/types.h>
+#include <linux/iommufd.h>
#include <linux/limits.h>
#include <linux/mman.h>
+#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/vfio.h>
-#include <linux/iommufd.h>
#include "../../../kselftest.h"
#include <vfio_util.h>
@@ -29,6 +30,249 @@
VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
} while (0)
+static struct vfio_info_cap_header *next_cap_hdr(void *buf, u32 bufsz,
+ u32 *cap_offset)
+{
+ struct vfio_info_cap_header *hdr;
+
+ if (!*cap_offset)
+ return NULL;
+
+ VFIO_ASSERT_LT(*cap_offset, bufsz);
+ VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr));
+
+ hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset);
+ *cap_offset = hdr->next;
+
+ return hdr;
+}
+
+static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *info,
+ u16 cap_id)
+{
+ struct vfio_info_cap_header *hdr;
+ u32 cap_offset = info->cap_offset;
+ u32 max_depth;
+ u32 depth = 0;
+
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS))
+ return NULL;
+
+ if (cap_offset)
+ VFIO_ASSERT_GE(cap_offset, sizeof(*info));
+
+ max_depth = (info->argsz - sizeof(*info)) / sizeof(*hdr);
+
+ while ((hdr = next_cap_hdr(info, info->argsz, &cap_offset))) {
+ depth++;
+ VFIO_ASSERT_LE(depth, max_depth, "Capability chain contains a cycle\n");
+
+ if (hdr->id == cap_id)
+ return hdr;
+ }
+
+ return NULL;
+}
+
+/* Return buffer including capability chain, if present. Free with free() */
+static struct vfio_iommu_type1_info *vfio_iommu_get_info(struct vfio_pci_device *device)
+{
+ struct vfio_iommu_type1_info *info;
+
+ info = malloc(sizeof(*info));
+ VFIO_ASSERT_NOT_NULL(info);
+
+ *info = (struct vfio_iommu_type1_info) {
+ .argsz = sizeof(*info),
+ };
+
+ ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
+ VFIO_ASSERT_GE(info->argsz, sizeof(*info));
+
+ info = realloc(info, info->argsz);
+ VFIO_ASSERT_NOT_NULL(info);
+
+ ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
+ VFIO_ASSERT_GE(info->argsz, sizeof(*info));
+
+ return info;
+}
+
+/*
+ * Return iova ranges for the device's container. Normalize vfio_iommu_type1 to
+ * report iommufd's iommu_iova_range. Free with free().
+ */
+static struct iommu_iova_range *vfio_iommu_iova_ranges(struct vfio_pci_device *device,
+ u32 *nranges)
+{
+ struct vfio_iommu_type1_info_cap_iova_range *cap_range;
+ struct vfio_iommu_type1_info *info;
+ struct vfio_info_cap_header *hdr;
+ struct iommu_iova_range *ranges = NULL;
+
+ info = vfio_iommu_get_info(device);
+ hdr = vfio_iommu_info_cap_hdr(info, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
+ VFIO_ASSERT_NOT_NULL(hdr);
+
+ cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header);
+ VFIO_ASSERT_GT(cap_range->nr_iovas, 0);
+
+ ranges = calloc(cap_range->nr_iovas, sizeof(*ranges));
+ VFIO_ASSERT_NOT_NULL(ranges);
+
+ for (u32 i = 0; i < cap_range->nr_iovas; i++) {
+ ranges[i] = (struct iommu_iova_range){
+ .start = cap_range->iova_ranges[i].start,
+ .last = cap_range->iova_ranges[i].end,
+ };
+ }
+
+ *nranges = cap_range->nr_iovas;
+
+ free(info);
+ return ranges;
+}
+
+/* Return iova ranges of the device's IOAS. Free with free() */
+static struct iommu_iova_range *iommufd_iova_ranges(struct vfio_pci_device *device,
+ u32 *nranges)
+{
+ struct iommu_iova_range *ranges;
+ int ret;
+
+ struct iommu_ioas_iova_ranges query = {
+ .size = sizeof(query),
+ .ioas_id = device->ioas_id,
+ };
+
+ ret = ioctl(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
+ VFIO_ASSERT_EQ(ret, -1);
+ VFIO_ASSERT_EQ(errno, EMSGSIZE);
+ VFIO_ASSERT_GT(query.num_iovas, 0);
+
+ ranges = calloc(query.num_iovas, sizeof(*ranges));
+ VFIO_ASSERT_NOT_NULL(ranges);
+
+ query.allowed_iovas = (uintptr_t)ranges;
+
+ ioctl_assert(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
+ *nranges = query.num_iovas;
+
+ return ranges;
+}
+
+static int iova_range_comp(const void *a, const void *b)
+{
+ const struct iommu_iova_range *ra = a, *rb = b;
+
+ if (ra->start < rb->start)
+ return -1;
+
+ if (ra->start > rb->start)
+ return 1;
+
+ return 0;
+}
+
+/* Return sorted IOVA ranges of the device. Free with free(). */
+struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
+ u32 *nranges)
+{
+ struct iommu_iova_range *ranges;
+
+ if (device->iommufd)
+ ranges = iommufd_iova_ranges(device, nranges);
+ else
+ ranges = vfio_iommu_iova_ranges(device, nranges);
+
+ if (!ranges)
+ return NULL;
+
+ VFIO_ASSERT_GT(*nranges, 0);
+
+ /* Sort and check that ranges are sane and non-overlapping */
+ qsort(ranges, *nranges, sizeof(*ranges), iova_range_comp);
+ VFIO_ASSERT_LT(ranges[0].start, ranges[0].last);
+
+ for (u32 i = 1; i < *nranges; i++) {
+ VFIO_ASSERT_LT(ranges[i].start, ranges[i].last);
+ VFIO_ASSERT_LT(ranges[i - 1].last, ranges[i].start);
+ }
+
+ return ranges;
+}
+
+struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device)
+{
+ struct iova_allocator *allocator;
+ struct iommu_iova_range *ranges;
+ u32 nranges;
+
+ ranges = vfio_pci_iova_ranges(device, &nranges);
+ VFIO_ASSERT_NOT_NULL(ranges);
+
+ allocator = malloc(sizeof(*allocator));
+ VFIO_ASSERT_NOT_NULL(allocator);
+
+ *allocator = (struct iova_allocator){
+ .ranges = ranges,
+ .nranges = nranges,
+ .range_idx = 0,
+ .range_offset = 0,
+ };
+
+ return allocator;
+}
+
+void iova_allocator_cleanup(struct iova_allocator *allocator)
+{
+ free(allocator->ranges);
+ free(allocator);
+}
+
+iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size)
+{
+ VFIO_ASSERT_GT(size, 0, "Invalid size arg, zero\n");
+ VFIO_ASSERT_EQ(size & (size - 1), 0, "Invalid size arg, non-power-of-2\n");
+
+ for (;;) {
+ struct iommu_iova_range *range;
+ iova_t iova, last;
+
+ VFIO_ASSERT_LT(allocator->range_idx, allocator->nranges,
+ "IOVA allocator out of space\n");
+
+ range = &allocator->ranges[allocator->range_idx];
+ iova = range->start + allocator->range_offset;
+
+ /* Check for sufficient space at the current offset */
+ if (check_add_overflow(iova, size - 1, &last) ||
+ last > range->last)
+ goto next_range;
+
+ /* Align iova to size */
+ iova = last & ~(size - 1);
+
+ /* Check for sufficient space at the aligned iova */
+ if (check_add_overflow(iova, size - 1, &last) ||
+ last > range->last)
+ goto next_range;
+
+ if (last == range->last) {
+ allocator->range_idx++;
+ allocator->range_offset = 0;
+ } else {
+ allocator->range_offset = last - range->start + 1;
+ }
+
+ return iova;
+
+next_range:
+ allocator->range_idx++;
+ allocator->range_offset = 0;
+ }
+}
+
iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
{
struct vfio_dma_region *region;
diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
index 4f1ea79a200c..102603d4407d 100644
--- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
+++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
@@ -3,6 +3,8 @@
#include <sys/mman.h>
#include <unistd.h>
+#include <uapi/linux/types.h>
+#include <linux/iommufd.h>
#include <linux/limits.h>
#include <linux/mman.h>
#include <linux/sizes.h>
@@ -93,6 +95,7 @@ static int iommu_mapping_get(const char *bdf, u64 iova,
FIXTURE(vfio_dma_mapping_test) {
struct vfio_pci_device *device;
+ struct iova_allocator *iova_allocator;
};
FIXTURE_VARIANT(vfio_dma_mapping_test) {
@@ -117,10 +120,12 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB |
FIXTURE_SETUP(vfio_dma_mapping_test)
{
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
+ self->iova_allocator = iova_allocator_init(self->device);
}
FIXTURE_TEARDOWN(vfio_dma_mapping_test)
{
+ iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
}
@@ -142,7 +147,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
else
ASSERT_NE(region.vaddr, MAP_FAILED);
- region.iova = (u64)region.vaddr;
+ region.iova = iova_allocator_alloc(self->iova_allocator, size);
region.size = size;
vfio_pci_dma_map(self->device, &region);
@@ -219,7 +224,10 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
FIXTURE_SETUP(vfio_dma_map_limit_test)
{
struct vfio_dma_region *region = &self->region;
+ struct iommu_iova_range *ranges;
u64 region_size = getpagesize();
+ iova_t last_iova;
+ u32 nranges;
/*
* Over-allocate mmap by double the size to provide enough backing vaddr
@@ -232,8 +240,13 @@ FIXTURE_SETUP(vfio_dma_map_limit_test)
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT_NE(region->vaddr, MAP_FAILED);
- /* One page prior to the end of address space */
- region->iova = ~(iova_t)0 & ~(region_size - 1);
+ ranges = vfio_pci_iova_ranges(self->device, &nranges);
+ VFIO_ASSERT_NOT_NULL(ranges);
+ last_iova = ranges[nranges - 1].last;
+ free(ranges);
+
+ /* One page prior to the last iova */
+ region->iova = last_iova & ~(region_size - 1);
region->size = region_size;
}
@@ -276,6 +289,7 @@ TEST_F(vfio_dma_map_limit_test, overflow)
struct vfio_dma_region *region = &self->region;
int rc;
+ region->iova = ~(iova_t)0 & ~(region->size - 1);
region->size = self->mmap_size;
rc = __vfio_pci_dma_map(self->device, region);
diff --git a/tools/testing/selftests/vfio/vfio_pci_driver_test.c b/tools/testing/selftests/vfio/vfio_pci_driver_test.c
index 2dbd70b7db62..f69eec8b928d 100644
--- a/tools/testing/selftests/vfio/vfio_pci_driver_test.c
+++ b/tools/testing/selftests/vfio/vfio_pci_driver_test.c
@@ -19,6 +19,7 @@ static const char *device_bdf;
} while (0)
static void region_setup(struct vfio_pci_device *device,
+ struct iova_allocator *iova_allocator,
struct vfio_dma_region *region, u64 size)
{
const int flags = MAP_SHARED | MAP_ANONYMOUS;
@@ -29,7 +30,7 @@ static void region_setup(struct vfio_pci_device *device,
VFIO_ASSERT_NE(vaddr, MAP_FAILED);
region->vaddr = vaddr;
- region->iova = (u64)vaddr;
+ region->iova = iova_allocator_alloc(iova_allocator, size);
region->size = size;
vfio_pci_dma_map(device, region);
@@ -44,6 +45,7 @@ static void region_teardown(struct vfio_pci_device *device,
FIXTURE(vfio_pci_driver_test) {
struct vfio_pci_device *device;
+ struct iova_allocator *iova_allocator;
struct vfio_dma_region memcpy_region;
void *vaddr;
int msi_fd;
@@ -72,14 +74,15 @@ FIXTURE_SETUP(vfio_pci_driver_test)
struct vfio_pci_driver *driver;
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
+ self->iova_allocator = iova_allocator_init(self->device);
driver = &self->device->driver;
- region_setup(self->device, &self->memcpy_region, SZ_1G);
- region_setup(self->device, &driver->region, SZ_2M);
+ region_setup(self->device, self->iova_allocator, &self->memcpy_region, SZ_1G);
+ region_setup(self->device, self->iova_allocator, &driver->region, SZ_2M);
/* Any IOVA that doesn't overlap memcpy_region and driver->region. */
- self->unmapped_iova = 8UL * SZ_1G;
+ self->unmapped_iova = iova_allocator_alloc(self->iova_allocator, SZ_1G);
vfio_pci_driver_init(self->device);
self->msi_fd = self->device->msi_eventfds[driver->msi];
@@ -108,6 +111,7 @@ FIXTURE_TEARDOWN(vfio_pci_driver_test)
region_teardown(self->device, &self->memcpy_region);
region_teardown(self->device, &driver->region);
+ iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
}
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index d4517386e551..9e1250790f33 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -2015,6 +2015,11 @@ static void test_stream_transport_change_client(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
+ /* Although setting SO_LINGER does not affect the original test
+ * for null-ptr-deref, it may trigger a lockdep warning.
+ */
+ enable_so_linger(s, 1);
+
ret = connect(s, (struct sockaddr *)&sa, sizeof(sa));
/* The connect can fail due to signals coming from the thread,
* or because the receiver connection queue is full.
@@ -2352,7 +2357,7 @@ static struct test_case test_cases[] = {
.run_server = test_stream_nolinger_server,
},
{
- .name = "SOCK_STREAM transport change null-ptr-deref",
+ .name = "SOCK_STREAM transport change null-ptr-deref, lockdep warn",
.run_client = test_stream_transport_change_client,
.run_server = test_stream_transport_change_server,
},