summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-01-22 20:13:25 -0800
committerJakub Kicinski <kuba@kernel.org>2026-01-22 20:14:36 -0800
commit9abf22075da98c615be2f608ec1167329a71eafd (patch)
tree8b2da9cc0b7f9410e3619223a679467d40391004
parent0b87bbf65d7d33754fd12df5984b9741e886dc69 (diff)
parent0a80e38d0fe1fe7b59c1e93ad908c4148a15926a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.19-rc7). Conflicts: drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b35a6fd37a00 ("hinic3: Add adaptive IRQ coalescing with DIM") fb2bb2a1ebf7 ("hinic3: Fix netif_queue_set_napi queue_index input parameter error") https://lore.kernel.org/fc0a7fdf08789a52653e8ad05281a0a849e79206.1768915707.git.zhuyikai1@h-partners.com drivers/net/wireless/ath/ath12k/mac.c drivers/net/wireless/ath/ath12k/wifi7/hw.c 31707572108d ("wifi: ath12k: Fix wrong P2P device link id issue") c26f294fef2a ("wifi: ath12k: Move ieee80211_ops callback to the arch specific module") https://lore.kernel.org/20260114123751.6a208818@canb.auug.org.au Adjacent changes: drivers/net/wireless/ath/ath12k/mac.c 8b8d6ee53dfd ("wifi: ath12k: Fix scan state stuck in ABORTING after cancel_remain_on_channel") 914c890d3b90 ("wifi: ath12k: Add framework for hardware specific ieee80211_ops registration") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap5
-rw-r--r--CREDITS4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt35
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst4
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.yaml10
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml17
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es8316.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt5640.yaml11
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-spdif.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml4
-rw-r--r--Documentation/mm/allocation-profiling.rst10
-rw-r--r--Documentation/netlink/specs/dev-energymodel.yaml175
-rw-r--r--Documentation/netlink/specs/em.yaml113
-rw-r--r--Documentation/netlink/specs/fou.yaml2
-rw-r--r--Documentation/process/maintainer-netdev.rst12
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/microchip/lan966x-pcb8290.dts1
-rw-r--r--arch/arm/boot/dts/microchip/sama7d65.dtsi4
-rw-r--r--arch/arm/mach-npcm/Kconfig1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/talos.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-sakurapi-rk3308b.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326-odroid-go3.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-nanopi-m5.dts12
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi4
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi3
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi31
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi35
-rw-r--r--arch/loongarch/kernel/head.S8
-rw-r--r--arch/loongarch/kernel/perf_event.c21
-rw-r--r--arch/loongarch/kvm/intc/eiointc.c1
-rw-r--r--arch/loongarch/kvm/intc/ipi.c1
-rw-r--r--arch/loongarch/kvm/intc/pch_pic.c1
-rw-r--r--arch/mips/mm/init.c23
-rw-r--r--arch/powerpc/kernel/watchdog.c15
-rw-r--r--arch/x86/include/asm/kfence.h29
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c21
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h3
-rw-r--r--arch/x86/mm/kaslr.c10
-rw-r--r--block/bio-integrity-auto.c2
-rw-r--r--drivers/acpi/x86/s2idle.c9
-rw-r--r--drivers/ata/ahci.c10
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/block/null_blk/main.c12
-rw-r--r--drivers/block/rnbd/rnbd-clt.c1
-rw-r--r--drivers/cxl/acpi.c11
-rw-r--r--drivers/cxl/core/hdm.c4
-rw-r--r--drivers/cxl/core/port.c2
-rw-r--r--drivers/cxl/core/region.c36
-rw-r--r--drivers/dax/dax-private.h10
-rw-r--r--drivers/dma/apple-admac.c1
-rw-r--r--drivers/dma/at_hdmac.c9
-rw-r--r--drivers/dma/bcm-sba-raid.c6
-rw-r--r--drivers/dma/cv1800b-dmamux.c17
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c4
-rw-r--r--drivers/dma/fsl-edma-common.c1
-rw-r--r--drivers/dma/idxd/compat.c23
-rw-r--r--drivers/dma/lpc18xx-dmamux.c19
-rw-r--r--drivers/dma/lpc32xx-dmamux.c19
-rw-r--r--drivers/dma/mmp_pdma.c26
-rw-r--r--drivers/dma/qcom/gpi.c6
-rw-r--r--drivers/dma/sh/rz-dmac.c18
-rw-r--r--drivers/dma/stm32/stm32-dmamux.c31
-rw-r--r--drivers/dma/tegra210-adma.c10
-rw-r--r--drivers/dma/ti/dma-crossbar.c35
-rw-r--r--drivers/dma/ti/k3-udma-private.c2
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/dma/xilinx/xdma-regs.h1
-rw-r--r--drivers/dma/xilinx/xdma.c2
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c7
-rw-r--r--drivers/dpll/dpll_core.c12
-rw-r--r--drivers/edac/i3200_edac.c11
-rw-r--r--drivers/edac/x38_edac.c9
-rw-r--r--drivers/firmware/efi/cper.c2
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/gpio/gpio-davinci.c18
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c9
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c69
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c10
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c17
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c4
-rw-r--r--drivers/hv/hv_common.c12
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/mshv_eventfd.c2
-rw-r--r--drivers/hv/mshv_regions.c93
-rw-r--r--drivers/hv/mshv_root_main.c17
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c7
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c11
-rw-r--r--drivers/i2c/busses/i2c-riic.c46
-rw-r--r--drivers/iommu/iommu-sva.c1
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c4
-rw-r--r--drivers/isdn/mISDN/timerdev.c13
-rw-r--r--drivers/leds/led-class.c10
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/dev/dev.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/can/usb/esd_usb.c9
-rw-r--r--drivers/net/can/usb/gs_usb.c7
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c9
-rw-r--r--drivers/net/can/usb/mcba_usb.c8
-rw-r--r--drivers/net/can/usb/usb_8dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c5
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c5
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c69
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c22
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c31
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c4
-rw-r--r--drivers/net/ipvlan/ipvlan.h2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c16
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c49
-rw-r--r--drivers/net/netdevsim/bpf.c6
-rw-r--r--drivers/net/netdevsim/dev.c2
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c4
-rw-r--r--drivers/net/phy/intel-xway.c7
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c16
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c11
-rw-r--r--drivers/net/wireless/ath/ath12k/wifi7/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c17
-rw-r--r--drivers/nfc/virtual_ncidev.c4
-rw-r--r--drivers/nvme/host/apple.c1
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/pci.c7
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/tcp.c21
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c2
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c3
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c15
-rw-r--r--drivers/phy/microchip/Kconfig2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c16
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c14
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c3
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c7
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c4
-rw-r--r--drivers/pwm/core.c10
-rw-r--r--drivers/pwm/pwm-max7360.c1
-rw-r--r--drivers/resctrl/mpam_internal.h9
-rw-r--r--drivers/soundwire/bus_type.c2
-rw-r--r--drivers/soundwire/slave.c1
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-apple.c64
-rw-r--r--drivers/usb/gadget/function/f_uvc.c4
-rw-r--r--drivers/usb/gadget/function/uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c23
-rw-r--r--drivers/usb/gadget/function/uvc_video.c14
-rw-r--r--drivers/usb/host/ohci-platform.c1
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/xhci-sideband.c1
-rw-r--r--drivers/usb/host/xhci-tegra.c2
-rw-r--r--drivers/usb/host/xhci.c15
-rw-r--r--drivers/usb/serial/f81232.c77
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h2
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--fs/btrfs/Kconfig6
-rw-r--r--fs/btrfs/disk-io.c19
-rw-r--r--fs/btrfs/fs.h8
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/btrfs/reflink.c23
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/btrfs/space-info.c8
-rw-r--r--fs/btrfs/sysfs.c52
-rw-r--r--fs/btrfs/tests/extent-map-tests.c3
-rw-r--r--fs/btrfs/tests/qgroup-tests.c6
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c42
-rw-r--r--fs/btrfs/volumes.h4
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/xattr.c1
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/nfs/blocklayout/dev.c6
-rw-r--r--fs/nfs/delegation.c7
-rw-r--r--fs/nfs/dir.c78
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/inode.c10
-rw-r--r--fs/nfs/io.c2
-rw-r--r--fs/nfs/localio.c32
-rw-r--r--fs/nfs/nfs42proc.c29
-rw-r--r--fs/nfs/nfs4proc.c53
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/nfstrace.h3
-rw-r--r--fs/nfs/pnfs.c58
-rw-r--r--fs/nfs/pnfs.h17
-rw-r--r--fs/nfs/write.c33
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c11
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.c53
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h2
-rw-r--r--fs/xfs/xfs_log.c8
-rw-r--r--fs/xfs/xfs_rtalloc.c6
-rw-r--r--include/asm-generic/tlb.h77
-rw-r--r--include/drm/bridge/dw_hdmi_qp.h1
-rw-r--r--include/drm/display/drm_dp_helper.h57
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h1
-rw-r--r--include/hyperv/hvhdk.h47
-rw-r--r--include/linux/energy_model.h2
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/kfence.h1
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h19
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sched/mm.h1
-rw-r--r--include/linux/textsearch.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/trace/events/rxrpc.h4
-rw-r--r--include/uapi/linux/dev_energymodel.h82
-rw-r--r--include/uapi/linux/energy_model.h63
-rw-r--r--include/uapi/linux/ext4.h2
-rw-r--r--include/uapi/linux/landlock.h37
-rw-r--r--include/uapi/linux/nl80211.h5
-rw-r--r--io_uring/io_uring.c8
-rw-r--r--kernel/cgroup/cgroup.c5
-rw-r--r--kernel/cgroup/cpuset.c5
-rw-r--r--kernel/cgroup/legacy_freezer.c9
-rw-r--r--kernel/dma/pool.c27
-rw-r--r--kernel/liveupdate/kexec_handover.c37
-rw-r--r--kernel/module/kmod.c1
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/power/em_netlink.c213
-rw-r--r--kernel/power/em_netlink_autogen.c58
-rw-r--r--kernel/power/em_netlink_autogen.h22
-rw-r--r--kernel/power/energy_model.c6
-rw-r--r--kernel/printk/nbcon.c38
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/deadline.c36
-rw-r--r--kernel/sched/ext.c1
-rw-r--r--kernel/sched/sched.h27
-rw-r--r--kernel/sched/syscalls.c32
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/buildid.c32
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/damon/core.c41
-rw-r--r--mm/damon/sysfs-schemes.c10
-rw-r--r--mm/damon/sysfs.c9
-rw-r--r--mm/hugetlb.c147
-rw-r--r--mm/init-mm.c5
-rw-r--r--mm/internal.h8
-rw-r--r--mm/kfence/core.c17
-rw-r--r--mm/kmsan/shadow.c2
-rw-r--r--mm/memory.c11
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/mmu_gather.c33
-rw-r--r--mm/numa_memblks.c2
-rw-r--r--mm/page_alloc.c65
-rw-r--r--mm/rmap.c45
-rw-r--r--mm/slub.c8
-rw-r--r--mm/vma.c122
-rw-r--r--mm/vma.h3
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c13
-rw-r--r--mm/zswap.c2
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/ipv4/fou_core.c3
-rw-r--r--net/ipv4/fou_nl.c2
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/l2tp/l2tp_core.c8
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/key.c3
-rw-r--r--net/mac80211/mlme.c213
-rw-r--r--net/mac80211/scan.c9
-rw-r--r--net/netrom/nr_route.c13
-rw-r--r--net/openvswitch/vport.c11
-rw-r--r--net/rxrpc/ar-internal.h9
-rw-r--r--net/rxrpc/conn_event.c2
-rw-r--r--net/rxrpc/output.c14
-rw-r--r--net/rxrpc/peer_event.c17
-rw-r--r--net/rxrpc/proc.c4
-rw-r--r--net/rxrpc/recvmsg.c19
-rw-r--r--net/rxrpc/rxgk.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/sched/act_ife.c6
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sched/sch_teql.c5
-rw-r--r--net/sctp/sm_statefuns.c10
-rw-r--r--net/vmw_vsock/virtio_transport_common.c36
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/wireless/util.c8
-rw-r--r--security/landlock/audit.c2
-rw-r--r--security/landlock/domain.h2
-rw-r--r--security/landlock/errata/abi-6.h2
-rw-r--r--security/landlock/fs.c14
-rw-r--r--security/landlock/net.c118
-rw-r--r--security/landlock/ruleset.c1
-rw-r--r--security/landlock/task.c12
-rw-r--r--sound/core/oss/pcm_oss.c4
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/hda/codecs/realtek/alc269.c2
-rw-r--r--sound/hda/codecs/side-codecs/cirrus_scodec_test.c3
-rw-r--r--sound/hda/codecs/side-codecs/tas2781_hda_i2c.c18
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/codecs/tlv320adcx140.c13
-rw-r--r--sound/soc/codecs/wsa881x.c9
-rw-r--r--sound/soc/codecs/wsa883x.c26
-rw-r--r--sound/soc/codecs/wsa884x.c3
-rw-r--r--sound/soc/generic/simple-card-utils.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw.c8
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs42l43.c2
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c43
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/tegra/tegra210_ahub.c6
-rw-r--r--sound/soc/ti/davinci-evm.c39
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--tools/net/ynl/Makefile3
-rwxr-xr-xtools/net/ynl/ynl-regen.sh2
-rw-r--r--tools/objtool/Makefile24
-rw-r--r--tools/objtool/include/objtool/warn.h4
-rw-r--r--tools/perf/util/parse-events.c7
-rw-r--r--tools/testing/cxl/test/cxl_translate.c30
-rw-r--r--tools/testing/selftests/landlock/common.h1
-rw-r--r--tools/testing/selftests/landlock/fs_test.c34
-rw-r--r--tools/testing/selftests/landlock/net_test.c30
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c154
-rw-r--r--tools/testing/selftests/landlock/scoped_abstract_unix_test.c23
-rw-r--r--tools/testing/selftests/landlock/scoped_base_variants.h9
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c2
-rw-r--r--tools/testing/selftests/mm/merge.c384
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/amt.sh7
-rw-r--r--tools/testing/selftests/net/config2
-rwxr-xr-xtools/testing/selftests/net/fib-onlink-tests.sh71
-rwxr-xr-xtools/testing/selftests/net/ipvtap_test.sh168
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json25
-rw-r--r--tools/testing/selftests/x86/Makefile1
-rw-r--r--tools/testing/vsock/util.h2
-rw-r--r--tools/testing/vsock/vsock_test.c117
-rw-r--r--tools/testing/vsock/vsock_test_zerocopy.c74
-rw-r--r--tools/testing/vsock/vsock_test_zerocopy.h3
425 files changed, 4715 insertions, 2343 deletions
diff --git a/.mailmap b/.mailmap
index fa018b5bd533..428d721ffbb1 100644
--- a/.mailmap
+++ b/.mailmap
@@ -12,6 +12,7 @@
#
Aaron Durbin <adurbin@google.com>
Abel Vesa <abelvesa@kernel.org> <abel.vesa@nxp.com>
+Abel Vesa <abelvesa@kernel.org> <abel.vesa@linaro.org>
Abel Vesa <abelvesa@kernel.org> <abelvesa@gmail.com>
Abhijeet Dharmapurikar <quic_adharmap@quicinc.com> <adharmap@codeaurora.org>
Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org>
@@ -207,6 +208,7 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
+Daniel Thompson <danielt@kernel.org> <daniel.thompson@linaro.org>
Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
@@ -794,6 +796,7 @@ Sven Eckelmann <sven@narfation.org> <sven.eckelmann@open-mesh.com>
Sven Eckelmann <sven@narfation.org> <sven.eckelmann@openmesh.com>
Sven Eckelmann <sven@narfation.org> <sven@open-mesh.com>
Sven Peter <sven@kernel.org> <sven@svenpeter.dev>
+Szymon Wilczek <swilczek.lx@gmail.com> <szymonwilczek@gmx.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>
@@ -876,6 +879,8 @@ Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn>
Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com>
+Yixun Lan <dlan@kernel.org> <dlan@gentoo.org>
+Yixun Lan <dlan@kernel.org> <yixun.lan@amlogic.com>
Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com>
Yu-Chun Lin <eleanor.lin@realtek.com> <eleanor15x@gmail.com>
Yusuke Goda <goda.yusuke@renesas.com>
diff --git a/CREDITS b/CREDITS
index 680bc55024a9..96ea9ce21026 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2231,6 +2231,10 @@ S: Markham, Ontario
S: L3R 8B2
S: Canada
+N: Krzysztof Kozlowski
+E: krzk@kernel.org
+D: NFC network subsystem and drivers maintainer
+
N: Christian Krafft
D: PowerPC Cell support
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a8d0afde7f85..1058f2a6d6a8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2917,6 +2917,41 @@ Kernel parameters
for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
are exclusive, so you cannot specify multiple forms.
+ kfence.burst= [MM,KFENCE] The number of additional successive
+ allocations to be attempted through KFENCE for each
+ sample interval.
+ Format: <unsigned integer>
+ Default: 0
+
+ kfence.check_on_panic=
+ [MM,KFENCE] Whether to check all KFENCE-managed objects'
+ canaries on panic.
+ Format: <bool>
+ Default: false
+
+ kfence.deferrable=
+ [MM,KFENCE] Whether to use a deferrable timer to trigger
+ allocations. This avoids forcing CPU wake-ups if the
+ system is idle, at the risk of a less predictable
+ sample interval.
+ Format: <bool>
+ Default: CONFIG_KFENCE_DEFERRABLE
+
+ kfence.sample_interval=
+ [MM,KFENCE] KFENCE's sample interval in milliseconds.
+ Format: <unsigned integer>
+ 0 - Disable KFENCE.
+ >0 - Enabled KFENCE with given sample interval.
+ Default: CONFIG_KFENCE_SAMPLE_INTERVAL
+
+ kfence.skip_covered_thresh=
+ [MM,KFENCE] If pool utilization reaches this threshold
+ (pool usage%), KFENCE limits currently covered
+ allocations of the same source from further filling
+ up the pool.
+ Format: <unsigned integer>
+ Default: 75
+
kgdbdbgp= [KGDB,HW,EARLY] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval]
The controller # is the number of the ehci usb debug
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 4d71211fdad8..245bf6394935 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -494,6 +494,10 @@ memory allocations.
The default value depends on CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT.
+When CONFIG_MEM_ALLOC_PROFILING_DEBUG=y, this control is read-only to avoid
+warnings produced by allocations made while profiling is disabled and freed
+when it's enabled.
+
memory_failure_early_kill
=========================
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.yaml b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.yaml
index 2aa75b7add7b..daa70a8500e9 100644
--- a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.yaml
@@ -16,7 +16,8 @@ properties:
- brcm,iproc-nic-i2c
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
clock-frequency:
enum: [ 100000, 400000 ]
@@ -41,8 +42,15 @@ allOf:
contains:
const: brcm,iproc-nic-i2c
then:
+ properties:
+ reg:
+ minItems: 2
required:
- brcm,ape-hsls-addr-mask
+ else:
+ properties:
+ reg:
+ maxItems: 1
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 48bd11410e8c..f5068df20cfe 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -56,7 +56,7 @@ properties:
clocks:
minItems: 5
- maxItems: 7
+ maxItems: 6
clock-names:
minItems: 5
@@ -67,7 +67,6 @@ properties:
- enum: [rchng, refgen]
- const: pipe
- const: pipediv2
- - const: phy_aux
power-domains:
maxItems: 1
@@ -180,6 +179,7 @@ allOf:
contains:
enum:
- qcom,glymur-qmp-gen5x4-pcie-phy
+ - qcom,qcs8300-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x4-pcie-phy
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
@@ -202,19 +202,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,qcs8300-qmp-gen4x2-pcie-phy
- then:
- properties:
- clocks:
- minItems: 7
- clock-names:
- minItems: 7
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,glymur-qmp-gen5x4-pcie-phy
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
diff --git a/Documentation/devicetree/bindings/sound/everest,es8316.yaml b/Documentation/devicetree/bindings/sound/everest,es8316.yaml
index 81a0215050e0..fe5d938ca310 100644
--- a/Documentation/devicetree/bindings/sound/everest,es8316.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es8316.yaml
@@ -49,6 +49,10 @@ properties:
items:
- const: mclk
+ interrupts:
+ maxItems: 1
+ description: Headphone detect interrupt
+
port:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml
index 3f4f59287c1c..2eb631950963 100644
--- a/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml
+++ b/Documentation/devicetree/bindings/sound/realtek,rt5640.yaml
@@ -47,6 +47,12 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: mclk
+
interrupts:
maxItems: 1
description: The CODEC's interrupt output.
@@ -98,6 +104,7 @@ properties:
- 4 # Use GPIO2 for jack-detect
- 5 # Use GPIO3 for jack-detect
- 6 # Use GPIO4 for jack-detect
+ - 7 # Use HDA header for jack-detect
realtek,jack-detect-not-inverted:
description:
@@ -121,6 +128,10 @@ properties:
- 2 # Scale current by 1.0
- 3 # Scale current by 1.5
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml b/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
index 32dea7392e8d..56c755c22945 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
@@ -70,6 +70,9 @@ properties:
"#sound-dai-cells":
const: 0
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index a792434c59db..a7f58114c02e 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -406,7 +406,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,ipq5018-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8074-dwc3
- qcom,msm8953-dwc3
@@ -428,6 +427,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,msm8994-dwc3
- qcom,msm8996-dwc3
- qcom,qcs404-dwc3
- qcom,sdm660-dwc3
@@ -451,6 +451,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
then:
properties:
@@ -488,7 +489,6 @@ allOf:
enum:
- qcom,ipq4019-dwc3
- qcom,ipq8064-dwc3
- - qcom,msm8994-dwc3
- qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
- qcom,qdu1000-dwc3
diff --git a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
index 8cee7c5582f2..7d784a648b7d 100644
--- a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
@@ -420,7 +420,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,ipq5018-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8074-dwc3
- qcom,msm8953-dwc3
@@ -443,6 +442,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,msm8994-dwc3
- qcom,msm8996-dwc3
- qcom,qcs404-dwc3
- qcom,sdm660-dwc3
@@ -467,6 +467,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
then:
properties:
@@ -509,7 +510,6 @@ allOf:
- qcom,ipq4019-dwc3
- qcom,ipq8064-dwc3
- qcom,kaanapali-dwc3
- - qcom,msm8994-dwc3
- qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
- qcom,qdu1000-dwc3
diff --git a/Documentation/mm/allocation-profiling.rst b/Documentation/mm/allocation-profiling.rst
index 316311240e6a..5389d241176a 100644
--- a/Documentation/mm/allocation-profiling.rst
+++ b/Documentation/mm/allocation-profiling.rst
@@ -33,6 +33,16 @@ Boot parameter:
sysctl:
/proc/sys/vm/mem_profiling
+ 1: Enable memory profiling.
+
+ 0: Disable memory profiling.
+
+ The default value depends on CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT.
+
+ When CONFIG_MEM_ALLOC_PROFILING_DEBUG=y, this control is read-only to avoid
+ warnings produced by allocations made while profiling is disabled and freed
+ when it's enabled.
+
Runtime info:
/proc/allocinfo
diff --git a/Documentation/netlink/specs/dev-energymodel.yaml b/Documentation/netlink/specs/dev-energymodel.yaml
new file mode 100644
index 000000000000..11faabfdfbe8
--- /dev/null
+++ b/Documentation/netlink/specs/dev-energymodel.yaml
@@ -0,0 +1,175 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+#
+# Copyright (c) 2025 Valve Corporation.
+#
+---
+name: dev-energymodel
+
+doc: |
+ Energy model netlink interface to notify its changes.
+
+protocol: genetlink
+
+uapi-header: linux/dev_energymodel.h
+
+definitions:
+ -
+ type: flags
+ name: perf-state-flags
+ entries:
+ -
+ name: perf-state-inefficient
+ doc: >-
+ The performance state is inefficient. There is in this perf-domain,
+ another performance state with a higher frequency but a lower or
+ equal power cost.
+ -
+ type: flags
+ name: perf-domain-flags
+ entries:
+ -
+ name: perf-domain-microwatts
+ doc: >-
+ The power values are in micro-Watts or some other scale.
+ -
+ name: perf-domain-skip-inefficiencies
+ doc: >-
+ Skip inefficient states when estimating energy consumption.
+ -
+ name: perf-domain-artificial
+ doc: >-
+ The power values are artificial and might be created by platform
+ missing real power information.
+
+attribute-sets:
+ -
+ name: perf-domain
+ doc: >-
+ Information on a single performance domains.
+ attributes:
+ -
+ name: pad
+ type: pad
+ -
+ name: perf-domain-id
+ type: u32
+ doc: >-
+ A unique ID number for each performance domain.
+ -
+ name: flags
+ type: u64
+ doc: >-
+ Bitmask of performance domain flags.
+ enum: perf-domain-flags
+ -
+ name: cpus
+ type: u64
+ multi-attr: true
+ doc: >-
+ CPUs that belong to this performance domain.
+ -
+ name: perf-table
+ doc: >-
+ Performance states table.
+ attributes:
+ -
+ name: perf-domain-id
+ type: u32
+ doc: >-
+ A unique ID number for each performance domain.
+ -
+ name: perf-state
+ type: nest
+ nested-attributes: perf-state
+ multi-attr: true
+ -
+ name: perf-state
+ doc: >-
+ Performance state of a performance domain.
+ attributes:
+ -
+ name: pad
+ type: pad
+ -
+ name: performance
+ type: u64
+ doc: >-
+ CPU performance (capacity) at a given frequency.
+ -
+ name: frequency
+ type: u64
+ doc: >-
+ The frequency in KHz, for consistency with CPUFreq.
+ -
+ name: power
+ type: u64
+ doc: >-
+ The power consumed at this level (by 1 CPU or by a registered
+ device). It can be a total power: static and dynamic.
+ -
+ name: cost
+ type: u64
+ doc: >-
+ The cost coefficient associated with this level, used during energy
+ calculation. Equal to: power * max_frequency / frequency.
+ -
+ name: flags
+ type: u64
+ doc: >-
+ Bitmask of performance state flags.
+ enum: perf-state-flags
+
+operations:
+ list:
+ -
+ name: get-perf-domains
+ attribute-set: perf-domain
+ doc: Get the list of information for all performance domains.
+ do:
+ request:
+ attributes:
+ - perf-domain-id
+ reply:
+ attributes: &perf-domain-attrs
+ - pad
+ - perf-domain-id
+ - flags
+ - cpus
+ dump:
+ reply:
+ attributes: *perf-domain-attrs
+ -
+ name: get-perf-table
+ attribute-set: perf-table
+ doc: Get the energy model table of a performance domain.
+ do:
+ request:
+ attributes:
+ - perf-domain-id
+ reply:
+ attributes:
+ - perf-domain-id
+ - perf-state
+ -
+ name: perf-domain-created
+ doc: A performance domain is created.
+ notify: get-perf-table
+ mcgrp: event
+ -
+ name: perf-domain-updated
+ doc: A performance domain is updated.
+ notify: get-perf-table
+ mcgrp: event
+ -
+ name: perf-domain-deleted
+ doc: A performance domain is deleted.
+ attribute-set: perf-table
+ event:
+ attributes:
+ - perf-domain-id
+ mcgrp: event
+
+mcast-groups:
+ list:
+ -
+ name: event
diff --git a/Documentation/netlink/specs/em.yaml b/Documentation/netlink/specs/em.yaml
deleted file mode 100644
index 9905ca482325..000000000000
--- a/Documentation/netlink/specs/em.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
-name: em
-
-doc: |
- Energy model netlink interface to notify its changes.
-
-protocol: genetlink
-
-uapi-header: linux/energy_model.h
-
-attribute-sets:
- -
- name: pds
- attributes:
- -
- name: pd
- type: nest
- nested-attributes: pd
- multi-attr: true
- -
- name: pd
- attributes:
- -
- name: pad
- type: pad
- -
- name: pd-id
- type: u32
- -
- name: flags
- type: u64
- -
- name: cpus
- type: string
- -
- name: pd-table
- attributes:
- -
- name: pd-id
- type: u32
- -
- name: ps
- type: nest
- nested-attributes: ps
- multi-attr: true
- -
- name: ps
- attributes:
- -
- name: pad
- type: pad
- -
- name: performance
- type: u64
- -
- name: frequency
- type: u64
- -
- name: power
- type: u64
- -
- name: cost
- type: u64
- -
- name: flags
- type: u64
-
-operations:
- list:
- -
- name: get-pds
- attribute-set: pds
- doc: Get the list of information for all performance domains.
- do:
- reply:
- attributes:
- - pd
- -
- name: get-pd-table
- attribute-set: pd-table
- doc: Get the energy model table of a performance domain.
- do:
- request:
- attributes:
- - pd-id
- reply:
- attributes:
- - pd-id
- - ps
- -
- name: pd-created
- doc: A performance domain is created.
- notify: get-pd-table
- mcgrp: event
- -
- name: pd-updated
- doc: A performance domain is updated.
- notify: get-pd-table
- mcgrp: event
- -
- name: pd-deleted
- doc: A performance domain is deleted.
- attribute-set: pd-table
- event:
- attributes:
- - pd-id
- mcgrp: event
-
-mcast-groups:
- list:
- -
- name: event
diff --git a/Documentation/netlink/specs/fou.yaml b/Documentation/netlink/specs/fou.yaml
index 8e7974ec453f..331f1b342b3a 100644
--- a/Documentation/netlink/specs/fou.yaml
+++ b/Documentation/netlink/specs/fou.yaml
@@ -39,6 +39,8 @@ attribute-sets:
-
name: ipproto
type: u8
+ checks:
+ min: 1
-
name: type
type: u8
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index 989192421cc9..6bce4507d5d3 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -363,6 +363,18 @@ just do it. As a result, a sequence of smaller series gets merged quicker and
with better review coverage. Re-posting large series also increases the mailing
list traffic.
+Limit patches outstanding on mailing list
+-----------------------------------------
+
+Avoid having more than 15 patches, across all series, outstanding for
+review on the mailing list for a single tree. In other words, a maximum of
+15 patches under review on net, and a maximum of 15 patches under review on
+net-next.
+
+This limit is intended to focus developer effort on testing patches before
+upstream review. Aiding the quality of upstream submissions, and easing the
+load on reviewers.
+
.. _rcs:
Local variable ordering ("reverse xmas tree", "RCS")
diff --git a/MAINTAINERS b/MAINTAINERS
index 92768bceb929..ed816ef6e57c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -314,6 +314,7 @@ R: Mauro Carvalho Chehab <mchehab@kernel.org>
R: Shuai Xue <xueshuai@linux.alibaba.com>
L: linux-acpi@vger.kernel.org
F: drivers/acpi/apei/
+F: drivers/firmware/efi/cper*
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: "Rafael J. Wysocki" <rafael@kernel.org>
@@ -3131,6 +3132,7 @@ F: drivers/*/*ma35*
K: ma35d1
ARM/NUVOTON NPCM ARCHITECTURE
+M: Andrew Jeffery <andrew@codeconstruct.com.au>
M: Avi Fishman <avifishman70@gmail.com>
M: Tomer Maimon <tmaimon77@gmail.com>
M: Tali Perry <tali.perry1@gmail.com>
@@ -3139,6 +3141,7 @@ R: Nancy Yuen <yuenn@google.com>
R: Benjamin Fair <benjaminfair@google.com>
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bmc/linux.git
F: Documentation/devicetree/bindings/*/*/*npcm*
F: Documentation/devicetree/bindings/*/*npcm*
F: Documentation/devicetree/bindings/rtc/nuvoton,nct3018y.yaml
@@ -6420,6 +6423,7 @@ F: include/linux/blk-cgroup.h
CONTROL GROUP - CPUSET
M: Waiman Long <longman@redhat.com>
+R: Chen Ridong <chenridong@huaweicloud.com>
L: cgroups@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
@@ -9303,12 +9307,12 @@ M: Lukasz Luba <lukasz.luba@arm.com>
M: "Rafael J. Wysocki" <rafael@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
-F: kernel/power/energy_model.c
-F: include/linux/energy_model.h
+F: Documentation/netlink/specs/dev-energymodel.yaml
F: Documentation/power/energy-model.rst
-F: Documentation/netlink/specs/em.yaml
-F: include/uapi/linux/energy_model.h
+F: include/linux/energy_model.h
+F: include/uapi/linux/dev_energymodel.h
F: kernel/power/em_netlink*.*
+F: kernel/power/energy_model.c
EPAPR HYPERVISOR BYTE CHANNEL DEVICE DRIVER
M: Laurentiu Tudor <laurentiu.tudor@nxp.com>
@@ -9519,6 +9523,7 @@ F: arch/arm/boot/compressed/efi-header.S
F: arch/x86/platform/efi/
F: drivers/firmware/efi/
F: include/linux/efi*.h
+X: drivers/firmware/efi/cper*
EXTERNAL CONNECTOR SUBSYSTEM (EXTCON)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -18490,9 +18495,8 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
-M: Krzysztof Kozlowski <krzk@kernel.org>
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/net/nfc/
@@ -21119,6 +21123,10 @@ S: Maintained
F: rust/helpers/pwm.c
F: rust/kernel/pwm.rs
+PWM SUBSYSTEM DRIVERS [RUST]
+R: Michal Wilczynski <m.wilczynski@samsung.com>
+F: drivers/pwm/*.rs
+
PXA GPIO DRIVER
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: linux-gpio@vger.kernel.org
@@ -22548,7 +22556,7 @@ F: drivers/mailbox/riscv-sbi-mpxy-mbox.c
F: include/linux/mailbox/riscv-rpmi-message.h
RISC-V SPACEMIT SoC Support
-M: Yixun Lan <dlan@gentoo.org>
+M: Yixun Lan <dlan@kernel.org>
L: linux-riscv@lists.infradead.org
L: spacemit@lists.linux.dev
S: Maintained
diff --git a/Makefile b/Makefile
index 9d38125263fb..1465f715786d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/microchip/lan966x-pcb8290.dts b/arch/arm/boot/dts/microchip/lan966x-pcb8290.dts
index 3b7577e48b46..50bd29572f3e 100644
--- a/arch/arm/boot/dts/microchip/lan966x-pcb8290.dts
+++ b/arch/arm/boot/dts/microchip/lan966x-pcb8290.dts
@@ -54,6 +54,7 @@
&mdio0 {
pinctrl-0 = <&miim_a_pins>;
pinctrl-names = "default";
+ reset-gpios = <&gpio 53 GPIO_ACTIVE_LOW>;
status = "okay";
ext_phy0: ethernet-phy@7 {
diff --git a/arch/arm/boot/dts/microchip/sama7d65.dtsi b/arch/arm/boot/dts/microchip/sama7d65.dtsi
index cd2cf9a6f40b..868045c650a7 100644
--- a/arch/arm/boot/dts/microchip/sama7d65.dtsi
+++ b/arch/arm/boot/dts/microchip/sama7d65.dtsi
@@ -527,7 +527,7 @@
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 37>;
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
dmas = <&dma0 AT91_XDMAC_DT_PERID(12)>,
<&dma0 AT91_XDMAC_DT_PERID(11)>;
dma-names = "tx", "rx";
@@ -676,7 +676,7 @@
flx9: flexcom@e2820000 {
compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
reg = <0xe2820000 0x200>;
- ranges = <0x0 0xe281c000 0x800>;
+ ranges = <0x0 0xe2820000 0x800>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 43>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
index 63b42a19d1b8..d933e8abb50f 100644
--- a/arch/arm/mach-npcm/Kconfig
+++ b/arch/arm/mach-npcm/Kconfig
@@ -30,7 +30,6 @@ config ARCH_NPCM7XX
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_720789
select ARM_ERRATA_754322
- select ARM_ERRATA_794072
select PL310_ERRATA_588369
select PL310_ERRATA_727915
select MFD_SYSCON
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 709da31d5785..137aa8375257 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -202,19 +202,6 @@
nvidia,outputs = <&dsia &dsib &sor0 &sor1>;
nvidia,head = <0>;
-
- interconnects = <&mc TEGRA210_MC_DISPLAY0A &emc>,
- <&mc TEGRA210_MC_DISPLAY0B &emc>,
- <&mc TEGRA210_MC_DISPLAY0C &emc>,
- <&mc TEGRA210_MC_DISPLAYHC &emc>,
- <&mc TEGRA210_MC_DISPLAYD &emc>,
- <&mc TEGRA210_MC_DISPLAYT &emc>;
- interconnect-names = "wina",
- "winb",
- "winc",
- "cursor",
- "wind",
- "wint";
};
dc@54240000 {
@@ -230,15 +217,6 @@
nvidia,outputs = <&dsia &dsib &sor0 &sor1>;
nvidia,head = <1>;
-
- interconnects = <&mc TEGRA210_MC_DISPLAY0AB &emc>,
- <&mc TEGRA210_MC_DISPLAY0BB &emc>,
- <&mc TEGRA210_MC_DISPLAY0CB &emc>,
- <&mc TEGRA210_MC_DISPLAYHCB &emc>;
- interconnect-names = "wina",
- "winb",
- "winc",
- "cursor";
};
dsia: dsi@54300000 {
@@ -1052,7 +1030,6 @@
#iommu-cells = <1>;
#reset-cells = <1>;
- #interconnect-cells = <1>;
};
emc: external-memory-controller@7001b000 {
@@ -1066,7 +1043,6 @@
nvidia,memory-controller = <&mc>;
operating-points-v2 = <&emc_icc_dvfs_opp_table>;
- #interconnect-cells = <0>;
#cooling-cells = <2>;
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index 5334adebf278..b9e0d9c7c065 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -5788,8 +5788,12 @@
clocks = <&rpmhcc RPMH_CXO_CLK>;
clock-names = "xo";
- power-domains = <&rpmhpd SC8280XP_NSP>;
- power-domain-names = "nsp";
+ power-domains = <&rpmhpd SC8280XP_NSP>,
+ <&rpmhpd SC8280XP_CX>,
+ <&rpmhpd SC8280XP_MXC>;
+ power-domain-names = "nsp",
+ "cx",
+ "mxc";
memory-region = <&pil_nsp0_mem>;
@@ -5919,8 +5923,12 @@
clocks = <&rpmhcc RPMH_CXO_CLK>;
clock-names = "xo";
- power-domains = <&rpmhpd SC8280XP_NSP>;
- power-domain-names = "nsp";
+ power-domains = <&rpmhpd SC8280XP_NSP>,
+ <&rpmhpd SC8280XP_CX>,
+ <&rpmhpd SC8280XP_MXC>;
+ power-domain-names = "nsp",
+ "cx",
+ "mxc";
memory-region = <&pil_nsp1_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts b/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
index a259eb9d45ae..8aead6dc25e0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
@@ -31,9 +31,9 @@
};
&display_panel {
- status = "okay";
+ compatible = "samsung,sofef00-ams628nw01", "samsung,sofef00";
- compatible = "samsung,sofef00";
+ status = "okay";
};
&bq27441_fg {
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index 2ca9e50ef599..e3f93f4f412d 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -4133,8 +4133,6 @@
usb_1: usb@a600000 {
compatible = "qcom,sm8550-dwc3", "qcom,snps-dwc3";
reg = <0x0 0x0a600000 0x0 0xfc100>;
- #address-cells = <1>;
- #size-cells = <0>;
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index 07ae74851621..f8e1950a74ac 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -5150,9 +5150,6 @@
dma-coherent;
- #address-cells = <1>;
- #size-cells = <0>;
-
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/qcom/talos.dtsi b/arch/arm64/boot/dts/qcom/talos.dtsi
index d1dbfa3bd81c..95d26e313622 100644
--- a/arch/arm64/boot/dts/qcom/talos.dtsi
+++ b/arch/arm64/boot/dts/qcom/talos.dtsi
@@ -1399,10 +1399,10 @@
<&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
<&gcc GCC_UFS_PHY_AHB_CLK>,
<&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
- <&gcc GCC_UFS_PHY_ICE_CORE_CLK>,
<&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
- <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>;
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
clock-names = "core_clk",
"bus_aggr_clk",
"iface_clk",
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-sakurapi-rk3308b.dts b/arch/arm64/boot/dts/rockchip/rk3308-sakurapi-rk3308b.dts
index e5e6b800c2d1..3473db08b9b2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-sakurapi-rk3308b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-sakurapi-rk3308b.dts
@@ -199,7 +199,7 @@
compatible = "brcm,bcm43455-fmac", "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpio0>;
- interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "host-wake";
pinctrl-names = "default";
pinctrl-0 = <&wifi_host_wake>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go3.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go3.dts
index 35bbaf559ca3..6b0563cb4d3a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go3.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go3.dts
@@ -14,7 +14,8 @@
joystick_mux_controller: mux-controller {
compatible = "gpio-mux";
- pinctrl = <&mux_en_pins>;
+ pinctrl-0 = <&mux_en_pins>;
+ pinctrl-names = "default";
#mux-control-cells = <0>;
mux-gpios = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index e7d4a2f9a95e..b2de018a7d36 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -424,9 +424,7 @@
&pcie0 {
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
- max-link-speed = <2>;
num-lanes = <2>;
- pinctrl-names = "default";
status = "okay";
vpcie12v-supply = <&vcc12v_dcin>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi
index 8d94d9f91a5c..3a9a10f531bd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi
@@ -71,7 +71,6 @@
};
&pcie0 {
- max-link-speed = <1>;
num-lanes = <1>;
vpcie3v3-supply = <&vcc3v3_sys>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index eaaca08a7601..810ab6ff4e67 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -969,7 +969,6 @@
};
&spi1 {
- max-freq = <10000000>;
status = "okay";
spiflash: flash@0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
index 2dca1dca20b8..5de964d369b0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
@@ -40,13 +40,13 @@
button-up {
label = "Volume Up";
linux,code = <KEY_VOLUMEUP>;
- press-threshold-microvolt = <100000>;
+ press-threshold-microvolt = <2000>;
};
button-down {
label = "Volume Down";
linux,code = <KEY_VOLUMEDOWN>;
- press-threshold-microvolt = <600000>;
+ press-threshold-microvolt = <300000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 587e89d7fc5e..8299e9d10c7c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -483,7 +483,7 @@
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
- gpios {
+ gpio-pins {
bios_disable_override_hog_pin: bios-disable-override-hog-pin {
rockchip,pins =
<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
index 74160cf89188..6d52e3723a4e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
@@ -529,11 +529,11 @@
rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
};
- vsel1_gpio: vsel1-gpio {
+ vsel1_gpio: vsel1-gpio-pin {
rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
};
- vsel2_gpio: vsel2-gpio {
+ vsel2_gpio: vsel2-gpio-pin {
rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso
index 70c23e1bf14b..d1a906031912 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso
+++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso
@@ -11,7 +11,6 @@
#include "rk3568-wolfvision-pf5-display.dtsi"
&st7789 {
- compatible = "jasonic,jt240mhqs-hwt-ek-e3",
- "sitronix,st7789v";
+ compatible = "jasonic,jt240mhqs-hwt-ek-e3";
rotation = <270>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-nanopi-m5.dts b/arch/arm64/boot/dts/rockchip/rk3576-nanopi-m5.dts
index cce34c541f7c..bb2cc2814b83 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-nanopi-m5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-nanopi-m5.dts
@@ -201,6 +201,7 @@
pinctrl-names = "default";
pinctrl-0 = <&hp_det_l>;
+ simple-audio-card,bitclock-master = <&masterdai>;
simple-audio-card,format = "i2s";
simple-audio-card,hp-det-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_LOW>;
simple-audio-card,mclk-fs = <256>;
@@ -211,15 +212,16 @@
"Headphones", "HPOR",
"IN1P", "Microphone Jack";
simple-audio-card,widgets =
- "Headphone", "Headphone Jack",
+ "Headphone", "Headphones",
"Microphone", "Microphone Jack";
simple-audio-card,codec {
sound-dai = <&rt5616>;
};
- simple-audio-card,cpu {
+ masterdai: simple-audio-card,cpu {
sound-dai = <&sai2>;
+ system-clock-frequency = <12288000>;
};
};
};
@@ -727,10 +729,12 @@
rt5616: audio-codec@1b {
compatible = "realtek,rt5616";
reg = <0x1b>;
- assigned-clocks = <&cru CLK_SAI2_MCLKOUT>;
+ assigned-clocks = <&cru CLK_SAI2_MCLKOUT_TO_IO>;
assigned-clock-rates = <12288000>;
- clocks = <&cru CLK_SAI2_MCLKOUT>;
+ clocks = <&cru CLK_SAI2_MCLKOUT_TO_IO>;
clock-names = "mclk";
+ pinctrl-0 = <&sai2m0_mclk>;
+ pinctrl-names = "default";
#sound-dai-cells = <0>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576.dtsi b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
index a86fc6b4e8c4..c72343e7a045 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
@@ -1261,7 +1261,7 @@
gpu: gpu@27800000 {
compatible = "rockchip,rk3576-mali", "arm,mali-bifrost";
- reg = <0x0 0x27800000 0x0 0x200000>;
+ reg = <0x0 0x27800000 0x0 0x20000>;
assigned-clocks = <&scmi_clk SCMI_CLK_GPU>;
assigned-clock-rates = <198000000>;
clocks = <&cru CLK_GPU>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
index 2a7921793020..7ab12d1054a7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -1200,7 +1200,7 @@
status = "disabled";
};
- rknn_mmu_1: iommu@fdac9000 {
+ rknn_mmu_1: iommu@fdaca000 {
compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
reg = <0x0 0xfdaca000 0x0 0x100>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -1230,7 +1230,7 @@
status = "disabled";
};
- rknn_mmu_2: iommu@fdad9000 {
+ rknn_mmu_2: iommu@fdada000 {
compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu";
reg = <0x0 0xfdada000 0x0 0x100>;
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH 0>;
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
index 357de4ca7555..e759fae77dcf 100644
--- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -131,6 +131,7 @@
reg-names = "main", "isr0";
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
@@ -149,6 +150,7 @@
reg-names = "main", "isr0";
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <4>;
@@ -164,6 +166,7 @@
compatible = "loongson,ls2k0500-eiointc";
reg = <0x0 0x1fe11600 0x0 0xea00>;
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <1>;
interrupt-parent = <&cpuintc>;
interrupts = <3>;
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
index 60ab425f793f..be4f7d119660 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -46,7 +46,7 @@
};
/* i2c of the dvi eeprom edid */
- i2c-gpio-0 {
+ i2c-0 {
compatible = "i2c-gpio";
scl-gpios = <&gpio0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -57,7 +57,7 @@
};
/* i2c of the eeprom edid */
- i2c-gpio-1 {
+ i2c-1 {
compatible = "i2c-gpio";
scl-gpios = <&gpio0 33 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio0 32 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -114,6 +114,7 @@
<0x0 0x1fe01140 0x0 0x8>;
reg-names = "main", "isr0", "isr1";
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
@@ -131,6 +132,7 @@
<0x0 0x1fe01148 0x0 0x8>;
reg-names = "main", "isr0", "isr1";
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <3>;
@@ -437,54 +439,47 @@
gmac0: ethernet@3,0 {
reg = <0x1800 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc0>;
- interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
- <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc0 12 IRQ_TYPE_LEVEL_HIGH>,
+ <&liointc0 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
status = "disabled";
};
gmac1: ethernet@3,1 {
reg = <0x1900 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc0>;
- interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
- <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc0 14 IRQ_TYPE_LEVEL_HIGH>,
+ <&liointc0 15 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
status = "disabled";
};
ehci0: usb@4,1 {
reg = <0x2100 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc1>;
- interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc1 18 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
ohci0: usb@4,2 {
reg = <0x2200 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc1>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc1 19 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
display@6,0 {
reg = <0x3000 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc0>;
- interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc0 28 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
hda@7,0 {
reg = <0x3800 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc0>;
- interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc0 4 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sata: sata@8,0 {
reg = <0x4000 0x0 0x0 0x0 0x0>;
- interrupt-parent = <&liointc0>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&liointc0 19 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
index 6c77b86ee06c..3678c084adf7 100644
--- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -126,6 +126,7 @@
reg = <0x0 0x1fe01400 0x0 0x64>;
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
@@ -140,6 +141,7 @@
compatible = "loongson,ls2k2000-eiointc";
reg = <0x0 0x1fe01600 0x0 0xea00>;
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <1>;
interrupt-parent = <&cpuintc>;
interrupts = <3>;
@@ -149,6 +151,7 @@
compatible = "loongson,pch-pic-1.0";
reg = <0x0 0x10000000 0x0 0x400>;
interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <2>;
loongson,pic-base-vec = <0>;
interrupt-parent = <&eiointc>;
@@ -291,65 +294,57 @@
gmac0: ethernet@3,0 {
reg = <0x1800 0x0 0x0 0x0 0x0>;
- interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
- <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&pic 12 IRQ_TYPE_LEVEL_HIGH>,
+ <&pic 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
- interrupt-parent = <&pic>;
status = "disabled";
};
gmac1: ethernet@3,1 {
reg = <0x1900 0x0 0x0 0x0 0x0>;
- interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
- <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&pic 14 IRQ_TYPE_LEVEL_HIGH>,
+ <&pic 15 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
- interrupt-parent = <&pic>;
status = "disabled";
};
gmac2: ethernet@3,2 {
reg = <0x1a00 0x0 0x0 0x0 0x0>;
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
- <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&pic 17 IRQ_TYPE_LEVEL_HIGH>,
+ <&pic 18 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
- interrupt-parent = <&pic>;
status = "disabled";
};
xhci0: usb@4,0 {
reg = <0x2000 0x0 0x0 0x0 0x0>;
- interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&pic>;
+ interrupts-extended = <&pic 48 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
xhci1: usb@19,0 {
reg = <0xc800 0x0 0x0 0x0 0x0>;
- interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&pic>;
+ interrupts-extended = <&pic 22 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
display@6,1 {
reg = <0x3100 0x0 0x0 0x0 0x0>;
- interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&pic>;
+ interrupts-extended = <&pic 28 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
i2s@7,0 {
reg = <0x3800 0x0 0x0 0x0 0x0>;
- interrupts = <78 IRQ_TYPE_LEVEL_HIGH>,
- <79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&pic 78 IRQ_TYPE_LEVEL_HIGH>,
+ <&pic 79 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tx", "rx";
- interrupt-parent = <&pic>;
status = "disabled";
};
sata: sata@8,0 {
reg = <0x4000 0x0 0x0 0x0 0x0>;
- interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&pic>;
+ interrupts-extended = <&pic 16 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 7f288e89573b..4eed7bc312a8 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -126,14 +126,6 @@ SYM_CODE_START(smpboot_entry)
LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
- /* Enable PG */
- li.w t0, 0xb0 # PLV=0, IE=0, PG=1
- csrwr t0, LOONGARCH_CSR_CRMD
- li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
- csrwr t0, LOONGARCH_CSR_PRMD
- li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
- csrwr t0, LOONGARCH_CSR_EUEN
-
la.pcrel t0, cpuboot_data
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index 9d257c8519c9..e34a6fb33e11 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -626,6 +626,18 @@ static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 conf
return pev;
}
+static inline bool loongarch_pmu_event_requires_counter(const struct perf_event *event)
+{
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ case PERF_TYPE_RAW:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int validate_group(struct perf_event *event)
{
struct cpu_hw_events fake_cpuc;
@@ -633,15 +645,18 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
- if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ if (loongarch_pmu_event_requires_counter(leader) &&
+ loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL;
for_each_sibling_event(sibling, leader) {
- if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ if (loongarch_pmu_event_requires_counter(sibling) &&
+ loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL;
}
- if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ if (loongarch_pmu_event_requires_counter(event) &&
+ loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -EINVAL;
return 0;
diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
index 29886876143f..dfaf6ccfdd8b 100644
--- a/arch/loongarch/kvm/intc/eiointc.c
+++ b/arch/loongarch/kvm/intc/eiointc.c
@@ -679,6 +679,7 @@ static void kvm_eiointc_destroy(struct kvm_device *dev)
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
kfree(eiointc);
+ kfree(dev);
}
static struct kvm_device_ops kvm_eiointc_dev_ops = {
diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
index 05cefd29282e..1058c13dba7f 100644
--- a/arch/loongarch/kvm/intc/ipi.c
+++ b/arch/loongarch/kvm/intc/ipi.c
@@ -459,6 +459,7 @@ static void kvm_ipi_destroy(struct kvm_device *dev)
ipi = kvm->arch.ipi;
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
kfree(ipi);
+ kfree(dev);
}
static struct kvm_device_ops kvm_ipi_dev_ops = {
diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c
index a698a73de399..4addb34bf432 100644
--- a/arch/loongarch/kvm/intc/pch_pic.c
+++ b/arch/loongarch/kvm/intc/pch_pic.c
@@ -475,6 +475,7 @@ static void kvm_pch_pic_destroy(struct kvm_device *dev)
/* unregister pch pic device and free it's memory */
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device);
kfree(s);
+ kfree(dev);
}
static struct kvm_device_ops kvm_pch_pic_dev_ops = {
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index a673d3d68254..8986048f9b11 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -425,6 +425,28 @@ void __init paging_init(void)
static struct kcore_list kcore_kseg0;
#endif
+static inline void __init highmem_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ /*
+ * If CPU cannot support HIGHMEM discard the memory above highstart_pfn
+ */
+ if (cpu_has_dc_aliases) {
+ memblock_remove(PFN_PHYS(highstart_pfn), -1);
+ return;
+ }
+
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ if (!memblock_is_memory(PFN_PHYS(tmp)))
+ SetPageReserved(page);
+ }
+#endif
+}
+
void __init arch_mm_preinit(void)
{
/*
@@ -435,6 +457,7 @@ void __init arch_mm_preinit(void)
maar_init();
setup_zero_pages(); /* Setup zeroed pages. */
+ highmem_init();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 2429cb1c7baa..764001deb060 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/processor.h>
#include <linux/smp.h>
+#include <linux/sys_info.h>
#include <asm/interrupt.h>
#include <asm/paca.h>
@@ -235,7 +236,11 @@ static void watchdog_smp_panic(int cpu)
pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
- if (!sysctl_hardlockup_all_cpu_backtrace) {
+ if (sysctl_hardlockup_all_cpu_backtrace ||
+ (hardlockup_si_mask & SYS_INFO_ALL_BT)) {
+ trigger_allbutcpu_cpu_backtrace(cpu);
+ cpumask_clear(&wd_smp_cpus_ipi);
+ } else {
/*
* Try to trigger the stuck CPUs, unless we are going to
* get a backtrace on all of them anyway.
@@ -244,11 +249,9 @@ static void watchdog_smp_panic(int cpu)
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
__cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
}
- } else {
- trigger_allbutcpu_cpu_backtrace(cpu);
- cpumask_clear(&wd_smp_cpus_ipi);
}
+ sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
@@ -415,9 +418,11 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
- if (sysctl_hardlockup_all_cpu_backtrace)
+ if (sysctl_hardlockup_all_cpu_backtrace ||
+ (hardlockup_si_mask & SYS_INFO_ALL_BT))
trigger_allbutcpu_cpu_backtrace(cpu);
+ sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
index ff5c7134a37a..acf9ffa1a171 100644
--- a/arch/x86/include/asm/kfence.h
+++ b/arch/x86/include/asm/kfence.h
@@ -42,10 +42,34 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
unsigned int level;
pte_t *pte = lookup_address(addr, &level);
+ pteval_t val;
if (WARN_ON(!pte || level != PG_LEVEL_4K))
return false;
+ val = pte_val(*pte);
+
+ /*
+ * protect requires making the page not-present. If the PTE is
+ * already in the right state, there's nothing to do.
+ */
+ if (protect != !!(val & _PAGE_PRESENT))
+ return true;
+
+ /*
+ * Otherwise, invert the entire PTE. This avoids writing out an
+ * L1TF-vulnerable PTE (not present, without the high address bits
+ * set).
+ */
+ set_pte(pte, __pte(~val));
+
+ /*
+ * If the page was protected (non-present) and we're making it
+ * present, there is no need to flush the TLB at all.
+ */
+ if (!protect)
+ return true;
+
/*
* We need to avoid IPIs, as we may get KFENCE allocations or faults
* with interrupts disabled. Therefore, the below is best-effort, and
@@ -53,11 +77,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
* lazy fault handling takes care of faults after the page is PRESENT.
*/
- if (protect)
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
- else
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
-
/*
* Flush this CPU's TLB, assuming whoever did the allocation/free is
* likely to continue running on this CPU.
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 3792ab4819dc..6ebff44a3f75 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -825,7 +825,8 @@ static __init bool get_mem_config(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return __get_mem_config_intel(&hw_res->r_resctrl);
- else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
return false;
@@ -987,7 +988,8 @@ static __init void rdt_init_res_defs(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
rdt_init_res_defs_intel();
- else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
rdt_init_res_defs_amd();
}
@@ -1019,8 +1021,19 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
c->x86_cache_occ_scale = ebx;
c->x86_cache_mbm_width_offset = eax & 0xff;
- if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
- c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
+ if (!c->x86_cache_mbm_width_offset) {
+ switch (c->x86_vendor) {
+ case X86_VENDOR_AMD:
+ c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
+ break;
+ case X86_VENDOR_HYGON:
+ c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_HYGON;
+ break;
+ default:
+ /* Leave c->x86_cache_mbm_width_offset as 0 */
+ break;
+ }
+ }
}
}
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 4a916c84a322..79c18657ede0 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -14,6 +14,9 @@
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
+/* Hygon MBM counter width as an offset from MBM_CNTR_WIDTH_BASE */
+#define MBM_CNTR_WIDTH_OFFSET_HYGON 8
+
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 3c306de52fd4..834641c6049a 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -115,12 +115,12 @@ void __init kernel_randomize_memory(void)
/*
* Adapt physical memory region size based on available memory,
- * except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
- * device BAR space assuming the direct map space is large enough
- * for creating a ZONE_DEVICE mapping in the direct map corresponding
- * to the physical BAR address.
+ * except when CONFIG_ZONE_DEVICE is enabled. ZONE_DEVICE wants to map
+ * any physical address into the direct-map. KASLR wants to reliably
+ * steal some physical address bits. Those design choices are in direct
+ * conflict.
*/
- if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
+ if (!IS_ENABLED(CONFIG_ZONE_DEVICE) && (memory_tb < kaslr_regions[0].size_tb))
kaslr_regions[0].size_tb = memory_tb;
/*
diff --git a/block/bio-integrity-auto.c b/block/bio-integrity-auto.c
index 9850c338548d..cff025b06be1 100644
--- a/block/bio-integrity-auto.c
+++ b/block/bio-integrity-auto.c
@@ -140,7 +140,7 @@ bool bio_integrity_prep(struct bio *bio)
return true;
set_flags = false;
gfp |= __GFP_ZERO;
- } else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
+ } else if (bi->metadata_size > bi->pi_tuple_size)
gfp |= __GFP_ZERO;
break;
default:
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 6d4d06236f61..cc3c83e4cc23 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -28,6 +28,10 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+static bool check_lps0_constraints __read_mostly;
+module_param(check_lps0_constraints, bool, 0644);
+MODULE_PARM_DESC(check_lps0_constraints, "Check LPS0 device constraints");
+
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -515,7 +519,8 @@ static struct acpi_scan_handler lps0_handler = {
static int acpi_s2idle_begin_lps0(void)
{
- if (pm_debug_messages_on && !lpi_constraints_table) {
+ if (lps0_device_handle && !sleep_no_lps0 && check_lps0_constraints &&
+ !lpi_constraints_table) {
if (acpi_s2idle_vendor_amd())
lpi_device_get_constraints_amd();
else
@@ -539,7 +544,7 @@ static int acpi_s2idle_prepare_late_lps0(void)
if (!lps0_device_handle || sleep_no_lps0)
return 0;
- if (pm_debug_messages_on)
+ if (check_lps0_constraints)
lpi_check_constraints();
/* Screen off */
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7a7f88b3fa2b..931d0081169b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2094,13 +2094,13 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
- ahci_mark_external_port(ap);
-
- ahci_update_initial_lpm_policy(ap);
-
/* disabled/not-implemented port */
- if (!(hpriv->port_map & (1 << i)))
+ if (!(hpriv->port_map & (1 << i))) {
ap->ops = &ata_dummy_port_ops;
+ } else {
+ ahci_mark_external_port(ap);
+ ahci_update_initial_lpm_policy(ap);
+ }
}
/* apply workaround for ASUS P5W DH Deluxe mainboard */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 09d8c035fcdf..ddf9a7b28a59 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2872,7 +2872,8 @@ static void ata_dev_config_lpm(struct ata_device *dev)
static void ata_dev_print_features(struct ata_device *dev)
{
- if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
+ if (!(dev->flags & ATA_DFLAG_FEATURES_MASK) && !dev->cpr_log &&
+ !ata_id_has_hipm(dev->id) && !ata_id_has_dipm(dev->id))
return;
ata_dev_info(dev,
@@ -3116,6 +3117,11 @@ int ata_dev_configure(struct ata_device *dev)
ata_mode_string(xfer_mask),
cdb_intr_string, atapi_an_string,
dma_dir_string);
+
+ ata_dev_config_lpm(dev);
+
+ if (print_info)
+ ata_dev_print_features(dev);
}
/* determine max_sectors */
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index b2817a2995d6..04e1e774645e 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -909,7 +909,7 @@ static bool ata_scsi_lpm_supported(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
- if (ap->flags & ATA_FLAG_NO_LPM)
+ if ((ap->flags & ATA_FLAG_NO_LPM) || !ap->ops->set_lpm)
return false;
ata_for_each_link(link, ap, EDGE) {
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index c7c0fb79a6bf..4c0632ab4e1b 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -665,12 +665,22 @@ static void nullb_add_fault_config(struct nullb_device *dev)
configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
}
+static void nullb_del_fault_config(struct nullb_device *dev)
+{
+ config_item_put(&dev->init_hctx_fault_config.group.cg_item);
+ config_item_put(&dev->requeue_config.group.cg_item);
+ config_item_put(&dev->timeout_config.group.cg_item);
+}
+
#else
static void nullb_add_fault_config(struct nullb_device *dev)
{
}
+static void nullb_del_fault_config(struct nullb_device *dev)
+{
+}
#endif
static struct
@@ -702,7 +712,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
null_del_dev(dev->nullb);
mutex_unlock(&lock);
}
-
+ nullb_del_fault_config(dev);
config_item_put(item);
}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index d1c354636315..8194a970f002 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1662,7 +1662,6 @@ static void destroy_sysfs(struct rnbd_clt_dev *dev,
/* To avoid deadlock firstly remove itself */
sysfs_remove_file_self(&dev->kobj, sysfs_self);
kobject_del(&dev->kobj);
- kobject_put(&dev->kobj);
}
}
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 77ac940e3013..49bba2b9a3c4 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -75,9 +75,16 @@ EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate");
static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
{
- struct cxl_cxims_data *cximsd = cxlrd->platform_data;
+ int hbiw = cxlrd->cxlsd.nr_targets;
+ struct cxl_cxims_data *cximsd;
+
+ /* No xormaps for host bridge interleave ways of 1 or 3 */
+ if (hbiw == 1 || hbiw == 3)
+ return addr;
+
+ cximsd = cxlrd->platform_data;
- return cxl_do_xormap_calc(cximsd, addr, cxlrd->cxlsd.nr_targets);
+ return cxl_do_xormap_calc(cximsd, addr, hbiw);
}
struct cxl_cxims_context {
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 1c5d2022c87a..eb5a3a7640c6 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -403,7 +403,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
* is not set.
*/
if (cxled->part < 0)
- for (int i = 0; cxlds->nr_partitions; i++)
+ for (int i = 0; i < cxlds->nr_partitions; i++)
if (resource_contains(&cxlds->part[i].res, res)) {
cxled->part = i;
break;
@@ -530,7 +530,7 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
- resource_size_t base = -1;
+ resource_size_t base = RESOURCE_SIZE_MAX;
lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index fef3aa0c6680..3310dbfae9d6 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1590,7 +1590,7 @@ static int update_decoder_targets(struct device *dev, void *data)
cxlsd->target[i] = dport;
dev_dbg(dev, "dport%d found in target list, index %d\n",
dport->port_id, i);
- return 1;
+ return 0;
}
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index ae899f68551f..5bd1213737fa 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -759,7 +759,7 @@ static ssize_t extended_linear_cache_size_show(struct device *dev,
ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- return sysfs_emit(buf, "%#llx\n", p->cache_size);
+ return sysfs_emit(buf, "%pap\n", &p->cache_size);
}
static DEVICE_ATTR_RO(extended_linear_cache_size);
@@ -3118,7 +3118,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled = NULL;
- u64 dpa_offset, hpa_offset, hpa;
+ u64 base, dpa_offset, hpa_offset, hpa;
u16 eig = 0;
u8 eiw = 0;
int pos;
@@ -3136,8 +3136,14 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
ways_to_eiw(p->interleave_ways, &eiw);
granularity_to_eig(p->interleave_granularity, &eig);
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+ base = cxl_dpa_resource_start(cxled);
+ if (base == RESOURCE_SIZE_MAX)
+ return ULLONG_MAX;
+
+ dpa_offset = dpa - base;
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
+ if (hpa_offset == ULLONG_MAX)
+ return ULLONG_MAX;
/* Apply the hpa_offset to the region base address */
hpa = hpa_offset + p->res->start + p->cache_size;
@@ -3146,6 +3152,9 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
if (cxlrd->ops.hpa_to_spa)
hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa);
+ if (hpa == ULLONG_MAX)
+ return ULLONG_MAX;
+
if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
@@ -3170,7 +3179,8 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
struct cxl_region_params *p = &cxlr->params;
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_endpoint_decoder *cxled;
- u64 hpa, hpa_offset, dpa_offset;
+ u64 hpa_offset = offset;
+ u64 dpa, dpa_offset;
u16 eig = 0;
u8 eiw = 0;
int pos;
@@ -3187,10 +3197,13 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
* CXL HPA is assumed to equal SPA.
*/
if (cxlrd->ops.spa_to_hpa) {
- hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
- hpa_offset = hpa - p->res->start;
- } else {
- hpa_offset = offset;
+ hpa_offset = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
+ if (hpa_offset == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev, "HPA not found for %pr offset %#llx\n",
+ p->res, offset);
+ return -ENXIO;
+ }
+ hpa_offset -= p->res->start;
}
pos = cxl_calculate_position(hpa_offset, eiw, eig);
@@ -3207,8 +3220,13 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
cxled = p->targets[i];
if (cxled->pos != pos)
continue;
+
+ dpa = cxl_dpa_resource_start(cxled);
+ if (dpa != RESOURCE_SIZE_MAX)
+ dpa += dpa_offset;
+
result->cxlmd = cxled_to_memdev(cxled);
- result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
+ result->dpa = dpa;
return 0;
}
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 0867115aeef2..c6ae27c982f4 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -67,14 +67,16 @@ struct dev_dax_range {
/**
* struct dev_dax - instance data for a subdivision of a dax region, and
* data while the device is activated in the driver.
- * @region - parent region
- * @dax_dev - core dax functionality
+ * @region: parent region
+ * @dax_dev: core dax functionality
+ * @align: alignment of this instance
* @target_node: effective numa node if dev_dax memory range is onlined
* @dyn_id: is this a dynamic or statically created instance
* @id: ida allocated id when the dax_region is not static
* @ida: mapping id allocator
- * @dev - device core
- * @pgmap - pgmap for memmap setup / lifetime (driver owned)
+ * @dev: device core
+ * @pgmap: pgmap for memmap setup / lifetime (driver owned)
+ * @memmap_on_memory: allow kmem to put the memmap in the memory
* @nr_range: size of @ranges
* @ranges: range tuples of memory used
*/
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index bd49f0374291..04bbd774b3b4 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -936,6 +936,7 @@ static void admac_remove(struct platform_device *pdev)
}
static const struct of_device_id admac_of_match[] = {
+ { .compatible = "apple,t8103-admac", },
{ .compatible = "apple,admac", },
{ }
};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7d226453961f..22bb604a3f97 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1765,6 +1765,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
static void atc_free_chan_resources(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_slave *atslave;
BUG_ON(atc_chan_is_enabled(atchan));
@@ -1774,8 +1775,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*
* Free atslave allocated in at_dma_xlate()
*/
- kfree(chan->private);
- chan->private = NULL;
+ atslave = chan->private;
+ if (atslave) {
+ put_device(atslave->dma_dev);
+ kfree(atslave);
+ chan->private = NULL;
+ }
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 7f0e76439ce5..ed037fa883f6 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1699,7 +1699,7 @@ static int sba_probe(struct platform_device *pdev)
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
- goto fail_free_mchan;
+ goto fail_put_mbox;
/* Check availability of debugfs */
if (!debugfs_initialized())
@@ -1729,6 +1729,8 @@ skip_debugfs:
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
+fail_put_mbox:
+ put_device(sba->mbox_dev);
fail_free_mchan:
mbox_free_channel(sba->mchan);
return ret;
@@ -1744,6 +1746,8 @@ static void sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
+ put_device(sba->mbox_dev);
+
mbox_free_channel(sba->mchan);
}
diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c
index e900d6595617..f7a952fcbc7d 100644
--- a/drivers/dma/cv1800b-dmamux.c
+++ b/drivers/dma/cv1800b-dmamux.c
@@ -102,11 +102,11 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct llist_node *node;
unsigned long flags;
unsigned int chid, devid, cpuid;
- int ret;
+ int ret = -EINVAL;
if (dma_spec->args_count != DMAMUX_NCELLS) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
devid = dma_spec->args[0];
@@ -115,18 +115,18 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (devid > MAX_DMA_MAPPING_ID) {
dev_err(&pdev->dev, "invalid device id: %u\n", devid);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
if (cpuid > MAX_DMA_CPU_ID) {
dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@@ -136,8 +136,6 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (map->peripheral == devid && map->cpu == cpuid)
goto found;
}
-
- ret = -EINVAL;
goto failed;
} else {
node = llist_del_first(&dmamux->free_maps);
@@ -171,12 +169,17 @@ found:
dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
chid, devid, cpuid);
+ put_device(&pdev->dev);
+
return map;
failed:
spin_unlock_irqrestore(&dmamux->lock, flags);
of_node_put(dma_spec->np);
dev_err(&pdev->dev, "errno %d\n", ret);
+err_put_pdev:
+ put_device(&pdev->dev);
+
return ERR_PTR(ret);
}
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index deadf135681b..cbec277af4dd 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -90,7 +90,7 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
ret = -EBUSY;
- goto free_map;
+ goto put_dma_spec_np;
}
mask = BIT(map->req_idx);
@@ -103,6 +103,8 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
+put_dma_spec_np:
+ of_node_put(dma_spec->np);
free_map:
kfree(map);
put_device:
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a59212758029..7137f51ff6a0 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -873,6 +873,7 @@ err_errirq:
free_irq(fsl_chan->txirq, fsl_chan);
err_txirq:
dma_pool_destroy(fsl_chan->tcd_pool);
+ clk_disable_unprepare(fsl_chan->clk);
return ret;
}
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
index eff9943f1a42..95b8ef958633 100644
--- a/drivers/dma/idxd/compat.c
+++ b/drivers/dma/idxd/compat.c
@@ -20,11 +20,16 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t c
int rc = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
- if (dev && dev->driver) {
+ if (!dev)
+ return -ENODEV;
+
+ if (dev->driver) {
device_driver_detach(dev);
rc = count;
}
+ put_device(dev);
+
return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
@@ -38,9 +43,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
struct idxd_dev *idxd_dev;
dev = bus_find_device_by_name(bus, NULL, buf);
- if (!dev || dev->driver || drv != &dsa_drv.drv)
+ if (!dev)
return -ENODEV;
+ if (dev->driver || drv != &dsa_drv.drv)
+ goto err_put_dev;
+
idxd_dev = confdev_to_idxd_dev(dev);
if (is_idxd_dev(idxd_dev)) {
alt_drv = driver_find("idxd", bus);
@@ -53,13 +61,20 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
alt_drv = driver_find("user", bus);
}
if (!alt_drv)
- return -ENODEV;
+ goto err_put_dev;
rc = device_driver_attach(alt_drv, dev);
if (rc < 0)
- return rc;
+ goto err_put_dev;
+
+ put_device(dev);
return count;
+
+err_put_dev:
+ put_device(dev);
+
+ return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
index 2b6436f4b193..d3ff521951b8 100644
--- a/drivers/dma/lpc18xx-dmamux.c
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -57,30 +57,31 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
unsigned long flags;
unsigned mux;
+ int ret = -EINVAL;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
mux = dma_spec->args[0];
if (mux >= dmamux->dma_master_requests) {
dev_err(&pdev->dev, "invalid mux number: %d\n",
dma_spec->args[0]);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
dma_spec->args[1]);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@@ -89,7 +90,8 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
mux, mux, dmamux->muxes[mux].value);
of_node_put(dma_spec->np);
- return ERR_PTR(-EBUSY);
+ ret = -EBUSY;
+ goto err_put_pdev;
}
dmamux->muxes[mux].busy = true;
@@ -106,7 +108,14 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
dmamux->muxes[mux].value, mux);
+ put_device(&pdev->dev);
+
return &dmamux->muxes[mux];
+
+err_put_pdev:
+ put_device(&pdev->dev);
+
+ return ERR_PTR(ret);
}
static int lpc18xx_dmamux_probe(struct platform_device *pdev)
diff --git a/drivers/dma/lpc32xx-dmamux.c b/drivers/dma/lpc32xx-dmamux.c
index 351d7e23e615..33be714740dd 100644
--- a/drivers/dma/lpc32xx-dmamux.c
+++ b/drivers/dma/lpc32xx-dmamux.c
@@ -95,11 +95,12 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
unsigned long flags;
struct lpc32xx_dmamux *mux = NULL;
+ int ret = -EINVAL;
int i;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
@@ -111,20 +112,20 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
if (!mux) {
dev_err(&pdev->dev, "invalid mux request number: %d\n",
dma_spec->args[0]);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
if (dma_spec->args[2] > 1) {
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
dma_spec->args[1]);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@@ -133,7 +134,8 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_err(dev, "dma request signal %d busy, routed to %s\n",
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
of_node_put(dma_spec->np);
- return ERR_PTR(-EBUSY);
+ ret = -EBUSY;
+ goto err_put_pdev;
}
mux->busy = true;
@@ -148,7 +150,14 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_dbg(dev, "dma request signal %d routed to %s\n",
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+ put_device(&pdev->dev);
+
return mux;
+
+err_put_pdev:
+ put_device(&pdev->dev);
+
+ return ERR_PTR(ret);
}
static int lpc32xx_dmamux_probe(struct platform_device *pdev)
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index d07229a74886..d12e729ee12c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -152,8 +152,8 @@ struct mmp_pdma_phy {
*
* Controller Configuration:
* @run_bits: Control bits in DCSR register for channel start/stop
- * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
- * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ * @dma_width: DMA addressing width in bits (32 or 64). Determines the
+ * DMA mask capability of the controller hardware.
*/
struct mmp_pdma_ops {
/* Hardware Register Operations */
@@ -173,7 +173,7 @@ struct mmp_pdma_ops {
/* Controller Configuration */
u32 run_bits;
- u64 dma_mask;
+ u32 dma_width;
};
struct mmp_pdma_device {
@@ -928,6 +928,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
{
struct mmp_pdma_desc_sw *sw;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ unsigned long flags;
u64 curr;
u32 residue = 0;
bool passed = false;
@@ -945,6 +946,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
else
curr = pdev->ops->read_src_addr(chan->phy);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
list_for_each_entry(sw, &chan->chain_running, node) {
u64 start, end;
u32 len;
@@ -989,6 +992,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
continue;
if (sw->async_tx.cookie == cookie) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return residue;
} else {
residue = 0;
@@ -996,6 +1000,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
}
}
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
/* We should only get here in case of cyclic transactions */
return residue;
}
@@ -1172,7 +1178,7 @@ static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
.get_desc_src_addr = get_desc_src_addr_32,
.get_desc_dst_addr = get_desc_dst_addr_32,
.run_bits = (DCSR_RUN),
- .dma_mask = 0, /* let OF/platform set DMA mask */
+ .dma_width = 32,
};
static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
@@ -1185,7 +1191,7 @@ static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
.get_desc_src_addr = get_desc_src_addr_64,
.get_desc_dst_addr = get_desc_dst_addr_64,
.run_bits = (DCSR_RUN | DCSR_LPAEEN),
- .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+ .dma_width = 64,
};
static const struct of_device_id mmp_pdma_dt_ids[] = {
@@ -1314,13 +1320,9 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- /* Set DMA mask based on ops->dma_mask, or OF/platform */
- if (pdev->ops->dma_mask)
- dma_set_mask(pdev->dev, pdev->ops->dma_mask);
- else if (pdev->dev->coherent_dma_mask)
- dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
- else
- dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
+ /* Set DMA mask based on controller hardware capabilities */
+ dma_set_mask_and_coherent(pdev->dev,
+ DMA_BIT_MASK(pdev->ops->dma_width));
ret = dma_async_device_register(&pdev->device);
if (ret) {
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 66bfea1f156d..6e30f3aa401e 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1605,14 +1605,16 @@ static int
gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
{
struct gchan *gchan = to_gchan(chan);
+ void *new_config;
if (!config->peripheral_config)
return -EINVAL;
- gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
- if (!gchan->config)
+ new_config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
+ if (!new_config)
return -ENOMEM;
+ gchan->config = new_config;
memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
return 0;
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 1f687b08d6b8..9e5f088355e2 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -557,11 +557,16 @@ rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int rz_dmac_terminate_all(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.base;
unsigned long flags;
+ unsigned int i;
LIST_HEAD(head);
rz_dmac_disable_hw(channel);
spin_lock_irqsave(&channel->vc.lock, flags);
+ for (i = 0; i < DMAC_NR_LMDESC; i++)
+ lmdesc[i].header = 0;
+
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
vchan_get_all_descriptors(&channel->vc, &head);
@@ -854,6 +859,13 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
+static void rz_dmac_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -876,6 +888,10 @@ static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
return -ENODEV;
}
+ ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev);
+ if (ret)
+ return ret;
+
dmac_index = args.args[0];
if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
@@ -1055,8 +1071,6 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {
diff --git a/drivers/dma/stm32/stm32-dmamux.c b/drivers/dma/stm32/stm32-dmamux.c
index 8d77e2a7939a..db13498b9c9f 100644
--- a/drivers/dma/stm32/stm32-dmamux.c
+++ b/drivers/dma/stm32/stm32-dmamux.c
@@ -90,23 +90,25 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
struct stm32_dmamux *mux;
u32 i, min, max;
- int ret;
+ int ret = -EINVAL;
unsigned long flags;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
if (dma_spec->args[0] > dmamux->dmamux_requests) {
dev_err(&pdev->dev, "invalid mux request number: %d\n",
dma_spec->args[0]);
- return ERR_PTR(-EINVAL);
+ goto err_put_pdev;
}
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
- if (!mux)
- return ERR_PTR(-ENOMEM);
+ if (!mux) {
+ ret = -ENOMEM;
+ goto err_put_pdev;
+ }
spin_lock_irqsave(&dmamux->lock, flags);
mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
@@ -116,7 +118,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
spin_unlock_irqrestore(&dmamux->lock, flags);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
ret = -ENOMEM;
- goto error_chan_id;
+ goto err_free_mux;
}
set_bit(mux->chan_id, dmamux->dma_inuse);
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -133,8 +135,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
- ret = -EINVAL;
- goto error;
+ goto err_clear_inuse;
}
/* Set dma request */
@@ -142,7 +143,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
spin_unlock_irqrestore(&dmamux->lock, flags);
- goto error;
+ goto err_put_dma_spec_np;
}
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -160,13 +161,19 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
mux->request, mux->master, mux->chan_id);
+ put_device(&pdev->dev);
+
return mux;
-error:
+err_put_dma_spec_np:
+ of_node_put(dma_spec->np);
+err_clear_inuse:
clear_bit(mux->chan_id, dmamux->dma_inuse);
-
-error_chan_id:
+err_free_mux:
kfree(mux);
+err_put_pdev:
+ put_device(&pdev->dev);
+
return ERR_PTR(ret);
}
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index d0e8bb27a03b..215bfef37ec6 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -429,10 +429,17 @@ static void tegra_adma_stop(struct tegra_adma_chan *tdc)
return;
}
- kfree(tdc->desc);
+ vchan_terminate_vdesc(&tdc->desc->vd);
tdc->desc = NULL;
}
+static void tegra_adma_synchronize(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+
+ vchan_synchronize(&tdc->vc);
+}
+
static void tegra_adma_start(struct tegra_adma_chan *tdc)
{
struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc);
@@ -1155,6 +1162,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->dma_dev.device_config = tegra_adma_slave_config;
tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
+ tdma->dma_dev.device_synchronize = tegra_adma_synchronize;
tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 7f17ee87a6dc..e04077d542d2 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -79,34 +79,35 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
- struct ti_am335x_xbar_map *map;
+ struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL);
if (dma_spec->args_count != 3)
- return ERR_PTR(-EINVAL);
+ goto out_put_pdev;
if (dma_spec->args[2] >= xbar->xbar_events) {
dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
dma_spec->args[2]);
- return ERR_PTR(-EINVAL);
+ goto out_put_pdev;
}
if (dma_spec->args[0] >= xbar->dma_requests) {
dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
dma_spec->args[0]);
- return ERR_PTR(-EINVAL);
+ goto out_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
- return ERR_PTR(-EINVAL);
+ goto out_put_pdev;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
- return ERR_PTR(-ENOMEM);
+ map = ERR_PTR(-ENOMEM);
+ goto out_put_pdev;
}
map->dma_line = (u16)dma_spec->args[0];
@@ -120,6 +121,9 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
+out_put_pdev:
+ put_device(&pdev->dev);
+
return map;
}
@@ -241,28 +245,26 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
- struct ti_dra7_xbar_map *map;
+ struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL);
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
- put_device(&pdev->dev);
- return ERR_PTR(-EINVAL);
+ goto out_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
- put_device(&pdev->dev);
- return ERR_PTR(-EINVAL);
+ goto out_put_pdev;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
- put_device(&pdev->dev);
- return ERR_PTR(-ENOMEM);
+ map = ERR_PTR(-ENOMEM);
+ goto out_put_pdev;
}
mutex_lock(&xbar->mutex);
@@ -273,8 +275,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
of_node_put(dma_spec->np);
- put_device(&pdev->dev);
- return ERR_PTR(-ENOMEM);
+ map = ERR_PTR(-ENOMEM);
+ goto out_put_pdev;
}
set_bit(map->xbar_out, xbar->dma_inuse);
mutex_unlock(&xbar->mutex);
@@ -288,6 +290,9 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
+out_put_pdev:
+ put_device(&pdev->dev);
+
return map;
}
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 05228bf00033..624360423ef1 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -42,9 +42,9 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
ud = platform_get_drvdata(pdev);
+ put_device(&pdev->dev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
- put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 8c023c6e623a..73ed4b794630 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1808,6 +1808,8 @@ static int omap_dma_probe(struct platform_device *pdev)
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
rc);
+ if (od->ll123_supported)
+ dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
return rc;
}
@@ -1823,6 +1825,8 @@ static int omap_dma_probe(struct platform_device *pdev)
if (rc) {
pr_warn("OMAP-DMA: failed to register DMA controller\n");
dma_async_device_unregister(&od->ddev);
+ if (od->ll123_supported)
+ dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
}
}
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 6ad08878e938..70bca92621aa 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -9,6 +9,7 @@
/* The length of register space exposed to host */
#define XDMA_REG_SPACE_LEN 65536
+#define XDMA_MAX_REG_OFFSET (XDMA_REG_SPACE_LEN - 4)
/*
* maximum number of DMA channels for each direction:
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 0d88b1a670e1..5ecf8223c112 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -38,7 +38,7 @@ static const struct regmap_config xdma_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = XDMA_REG_SPACE_LEN,
+ .max_register = XDMA_MAX_REG_OFFSET,
};
/**
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index fabff602065f..89a8254d9cdc 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -131,6 +131,7 @@
#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
+#define XILINX_DMA_DFAULT_ADDRWIDTH 0x20
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -3159,7 +3160,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
- u32 num_frames, addr_width, len_width;
+ u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@@ -3235,7 +3236,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
if (err < 0)
- dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
+ dev_warn(xdev->dev,
+ "missing xlnx,addrwidth property, using default value %d\n",
+ XILINX_DMA_DFAULT_ADDRWIDTH);
if (addr_width > 32)
xdev->ext_addr = true;
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index a461095efd8a..8879a7235156 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -83,10 +83,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
if (ref->pin != pin)
continue;
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
- if (reg) {
- refcount_inc(&ref->refcount);
- return 0;
- }
+ if (reg)
+ return -EEXIST;
ref_exists = true;
break;
}
@@ -164,10 +162,8 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
if (ref->dpll != dpll)
continue;
reg = dpll_pin_registration_find(ref, ops, priv, cookie);
- if (reg) {
- refcount_inc(&ref->refcount);
- return 0;
- }
+ if (reg)
+ return -EEXIST;
ref_exists = true;
break;
}
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index afccdebf5ac1..6cade6d7ceff 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -358,10 +358,11 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
- sizeof(struct i3200_priv));
+
+ rc = -ENOMEM;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct i3200_priv));
if (!mci)
- return -ENOMEM;
+ goto unmap;
edac_dbg(3, "MC: init mci\n");
@@ -421,9 +422,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
+ edac_mc_free(mci);
+unmap:
iounmap(window);
- if (mci)
- edac_mc_free(mci);
return rc;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 49ab5721aab2..292dda754c23 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -341,9 +341,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = x38_channel_num;
layers[1].is_virt_csrow = false;
+
+
+ rc = -ENOMEM;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
- return -ENOMEM;
+ goto unmap;
edac_dbg(3, "MC: init mci\n");
@@ -403,9 +406,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
+ edac_mc_free(mci);
+unmap:
iounmap(window);
- if (mci)
- edac_mc_free(mci);
return rc;
}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 0232bd040f61..bd99802cb0ca 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -162,7 +162,7 @@ int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
len -= size;
str += size;
}
- return len - buf_size;
+ return buf_size - len;
}
EXPORT_SYMBOL_GPL(cper_bits_to_str);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 55452e61af31..17b5f3415465 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -74,10 +74,10 @@ struct mm_struct efi_mm = {
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
.user_ns = &init_user_ns,
- .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
#ifdef CONFIG_SCHED_MM_CID
.mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
#endif
+ .flexible_array = MM_STRUCT_FLEXIBLE_ARRAY_INIT,
};
struct workqueue_struct *efi_rts_wq;
@@ -819,6 +819,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
if (tbl) {
phys_initrd_start = tbl->base;
phys_initrd_size = tbl->size;
+ tbl->base = tbl->size = 0;
early_memunmap(tbl, sizeof(*tbl));
}
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 538f27209ce7..97780f27ce5b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -6,6 +6,7 @@
* Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
*/
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -109,6 +110,22 @@ davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
return __davinci_direction(chip, offset, true, value);
}
+static int davinci_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct davinci_gpio_controller *d = gpiochip_get_data(chip);
+ struct davinci_gpio_regs __iomem *g;
+ u32 mask = __gpio_mask(offset), val;
+ int bank = offset / 32;
+
+ g = d->regs[bank];
+
+ guard(spinlock_irqsave)(&d->lock);
+
+ val = readl_relaxed(&g->dir);
+
+ return (val & mask) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
/*
* Read the pin's value (works even if it's set up as output);
* returns zero/nonzero.
@@ -203,6 +220,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
chips->chip.set = davinci_gpio_set;
+ chips->chip.get_direction = davinci_get_direction;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index dcf427d3cf43..fe2d107b0a84 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -468,9 +468,6 @@ int gpiod_get_direction(struct gpio_desc *desc)
test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
- if (!guard.gc->get_direction)
- return -ENOTSUPP;
-
ret = gpiochip_get_direction(guard.gc, offset);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f9774f58ce1..b20a06abb65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,6 +274,8 @@ extern int amdgpu_rebar;
extern int amdgpu_wbrf;
extern int amdgpu_user_queue;
+extern uint amdgpu_hdmi_hpd_debounce_delay_ms;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d5c44bd34d45..d2c3885de711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5063,6 +5063,14 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ /*
+ * device went through surprise hotplug; we need to destroy topology
+ * before ip_fini_early to prevent kfd locking refcount issues by calling
+ * amdgpu_amdkfd_suspend()
+ */
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_amdkfd_device_fini_sw(adev);
+
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b5d34797d606..52bc04452812 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1880,7 +1880,12 @@ int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb)
{
struct amdgpu_bo *abo;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_framebuffer *fb;
+
+ if (drm_drv_uses_atomic_modeset(plane->dev))
+ fb = plane->state->fb;
+ else
+ fb = plane->fb;
if (!fb)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e22cfa7c6d32..c1461317eb29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -95,18 +95,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
attach->peer2peer = false;
- /*
- * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
- * Such buffers cannot be safely accessed over P2P due to device-local
- * compression metadata. Fallback to system-memory path instead.
- * Device supports GFX12 (GC 12.x or newer)
- * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
- *
- */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
- bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
- attach->peer2peer = false;
-
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 848e6b7db482..6ccb80e2d7c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -247,6 +247,7 @@ int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
int amdgpu_rebar = -1; /* auto */
int amdgpu_user_queue = -1;
+uint amdgpu_hdmi_hpd_debounce_delay_ms;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -1123,6 +1124,16 @@ module_param_named(rebar, amdgpu_rebar, int, 0444);
MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+/*
+ * DOC: hdmi_hpd_debounce_delay_ms (uint)
+ * HDMI HPD disconnect debounce delay in milliseconds.
+ *
+ * Used to filter short disconnect->reconnect HPD toggles some HDMI sinks
+ * generate while entering/leaving power save. Set to 0 to disable by default.
+ */
+MODULE_PARM_DESC(hdmi_hpd_debounce_delay_ms, "HDMI HPD disconnect debounce delay in milliseconds (0 to disable (by default), 1500 is common)");
+module_param_named(hdmi_hpd_debounce_delay_ms, amdgpu_hdmi_hpd_debounce_delay_ms, uint, 0644);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d2237ce9da70..1485f4789440 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -375,7 +375,7 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @start_page: first page to map in the GART aperture
* @num_pages: number of pages to be mapped
* @flags: page table entry flags
- * @dst: CPU address of the GART table
+ * @dst: valid CPU address of GART table, cannot be null
*
* Binds a BO that is allocated in VRAM to the GART page table
* (all ASICs).
@@ -396,7 +396,7 @@ void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
return;
for (i = 0; i < num_pages; ++i) {
- amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ amdgpu_gmc_set_pte_pde(adev, dst,
start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 869bceb0fe2c..8924380086c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -732,6 +732,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
return 0;
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
+
+ if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid)
+ return 0;
+
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 9a969175900e..58b26c78b642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -885,12 +885,28 @@ static int amdgpu_userq_input_args_validate(struct drm_device *dev,
return 0;
}
+bool amdgpu_userq_enabled(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ return true;
+ }
+
+ return false;
+}
+
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_userq *args = data;
int r;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index c37444427a14..b48b3bc293fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -141,6 +141,7 @@ uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
struct drm_file *filp);
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+bool amdgpu_userq_enabled(struct drm_device *dev);
int amdgpu_userq_suspend(struct amdgpu_device *adev);
int amdgpu_userq_resume(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index eba9fb359047..85e9edc1cb6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -141,6 +141,8 @@ static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
{
+ dma_fence_put(userq->last_fence);
+
amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
xa_destroy(&userq->fence_drv_xa);
/* Drop the fence_drv reference held by user queue */
@@ -471,6 +473,9 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_exec exec;
u64 wptr;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
num_syncobj_handles = args->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
size_mul(sizeof(u32), num_syncobj_handles));
@@ -653,6 +658,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
int r, i, rentry, wentry, cnt;
struct drm_exec exec;
+ if (!amdgpu_userq_enabled(dev))
+ return -ENOTSUPP;
+
num_read_bo_handles = wait_info->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
size_mul(sizeof(u32), num_read_bo_handles));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c362d4dfb5bb..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1069,9 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked &&
- /* SI doesn't support pasid or KIQ/MES */
- params->adev->family > AMDGPU_FAMILY_SI) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 8ad7519f7b58..f1ee3921d970 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1235,16 +1235,16 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_WC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
break;
case AMDGPU_VM_MTYPE_RW:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
break;
case AMDGPU_VM_MTYPE_CC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
break;
case AMDGPU_VM_MTYPE_UC:
- *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d7a2e7178ea9..625ea8ab7a74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1209,14 +1209,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
- if (dqm->dev->kfd->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes)
pdd->last_evict_timestamp = get_jiffies_64();
- retval = suspend_all_queues_mes(dqm);
- if (retval) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
- }
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@@ -1246,10 +1240,6 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
- } else {
- retval = resume_all_queues_mes(dqm);
- if (retval)
- dev_err(dev, "Resuming all queues failed");
}
out:
@@ -2919,6 +2909,14 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
return retval;
}
+static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
+ struct kfd_mem_obj *mqd)
+{
+ WARN(!mqd, "No hiq sdma mqd trunk to free");
+
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
+}
+
struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
@@ -3042,19 +3040,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
return dqm;
}
+ if (!dev->kfd->shared_resources.enable_mes)
+ deallocate_hiq_sdma_mqd(dev, &dqm->hiq_sdma_mqd);
+
out_free:
kfree(dqm);
return NULL;
}
-static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
- struct kfd_mem_obj *mqd)
-{
- WARN(!mqd, "No hiq sdma mqd trunk to free");
-
- amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
-}
-
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.stop(dqm);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 740711ac1037..1ea5a250440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5266,6 +5266,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
int min, max;
+ int real_brightness;
+ int init_brightness;
if (aconnector->bl_idx == -1)
return;
@@ -5290,6 +5292,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
+ init_brightness = props.brightness;
+
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
props.scale = BACKLIGHT_SCALE_NON_LINEAR;
@@ -5308,8 +5312,20 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
- } else
+ } else {
+ /*
+ * dm->brightness[x] can be inconsistent just after startup until
+ * ops.get_brightness is called.
+ */
+ real_brightness =
+ amdgpu_dm_backlight_ops.get_brightness(dm->backlight_dev[aconnector->bl_idx]);
+
+ if (real_brightness != init_brightness) {
+ dm->actual_brightness[aconnector->bl_idx] = real_brightness;
+ dm->brightness[aconnector->bl_idx] = real_brightness;
+ }
drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
+ }
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5626,7 +5642,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
- drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ drm_info(adev_to_drm(adev), "%s: PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ aconnector->base.name,
link->psr_settings.psr_feature_enabled,
link->psr_settings.psr_version,
link->dpcd_caps.psr_info.psr_version,
@@ -8930,9 +8947,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
- aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
- INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
- aconnector->hdmi_prev_sink = NULL;
+ /*
+ * If HDMI HPD debounce delay is set, use the minimum between selected
+ * value and AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS
+ */
+ if (amdgpu_hdmi_hpd_debounce_delay_ms) {
+ aconnector->hdmi_hpd_debounce_delay_ms = min(amdgpu_hdmi_hpd_debounce_delay_ms,
+ AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS);
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+ } else {
+ aconnector->hdmi_hpd_debounce_delay_ms = 0;
+ }
/*
* configure support HPD hot plug connector_>polled default value is 0
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index bd0403005f37..beb0d04d3e68 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,7 +59,10 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
-#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
+/*
+ * Maximum HDMI HPD debounce delay in milliseconds
+ */
+#define AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS 5000
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
index b015e80672ec..fcd3ab4b0045 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
@@ -41,7 +41,7 @@
/* kHZ*/
#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
/* kHZ*/
-#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 340000
struct dp_hdmi_dongle_signature_data {
int8_t id[15];/* "DP-HDMI ADAPTOR"*/
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index e1940b8e5bc3..7fa6bc97a919 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -336,7 +336,7 @@ static void query_dp_dual_mode_adaptor(
/* Assume we have no valid DP passive dongle connected */
*dongle = DISPLAY_DONGLE_NONE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
if (!i2c_read(
@@ -392,6 +392,8 @@ static void query_dp_dual_mode_adaptor(
}
}
+ if (is_valid_hdmi_signature)
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
if (is_type2_dongle) {
uint32_t max_tmds_clk =
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 33c3cd2e1e24..d7642d388bc3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1702,8 +1702,9 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
+ int16_t od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ uint32_t power_limit;
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index fe4c026280f0..60166919c5b5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -163,6 +163,7 @@ struct dw_hdmi_qp {
unsigned long ref_clk_rate;
struct regmap *regm;
+ int main_irq;
unsigned long tmds_char_rate;
};
@@ -1271,6 +1272,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
dw_hdmi_qp_init_hw(hdmi);
+ hdmi->main_irq = plat_data->main_irq;
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
dw_hdmi_qp_main_hardirq, NULL,
IRQF_SHARED, dev_name(dev), hdmi);
@@ -1331,9 +1333,16 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
+void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi)
+{
+ disable_irq(hdmi->main_irq);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_qp_suspend);
+
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_init_hw(hdmi);
+ enable_irq(hdmi->main_irq);
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 8a06d296561d..0de47e83d84d 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1602,14 +1602,48 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
+/*
+ * drm_gpuvm_bo_destroy_not_in_lists() - final part of drm_gpuvm_bo cleanup
+ * @vm_bo: the &drm_gpuvm_bo to destroy
+ *
+ * It is illegal to call this method if the @vm_bo is present in the GEMs gpuva
+ * list, the extobj list, or the evicted list.
+ *
+ * Note that this puts a refcount on the GEM object, which may destroy the GEM
+ * object if the refcount reaches zero. It's illegal for this to happen if the
+ * caller holds the GEMs gpuva mutex because it would free the mutex.
+ */
+static void
+drm_gpuvm_bo_destroy_not_in_lists(struct drm_gpuvm_bo *vm_bo)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gem_object *obj = vm_bo->obj;
+
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+}
+
+static void
+drm_gpuvm_bo_destroy_not_in_lists_kref(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
+}
+
static void
drm_gpuvm_bo_destroy(struct kref *kref)
{
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
kref);
struct drm_gpuvm *gpuvm = vm_bo->vm;
- const struct drm_gpuvm_ops *ops = gpuvm->ops;
- struct drm_gem_object *obj = vm_bo->obj;
bool lock = !drm_gpuvm_resv_protected(gpuvm);
if (!lock)
@@ -1618,16 +1652,10 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(gpuvm, obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, vm_bo->obj);
list_del(&vm_bo->list.entry.gem);
- if (ops && ops->vm_bo_free)
- ops->vm_bo_free(vm_bo);
- else
- kfree(vm_bo);
-
- drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
/**
@@ -1745,9 +1773,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
void
drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
{
- const struct drm_gpuvm_ops *ops = gpuvm->ops;
struct drm_gpuvm_bo *vm_bo;
- struct drm_gem_object *obj;
struct llist_node *bo_defer;
bo_defer = llist_del_all(&gpuvm->bo_defer);
@@ -1766,14 +1792,7 @@ drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
while (bo_defer) {
vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
bo_defer = bo_defer->next;
- obj = vm_bo->obj;
- if (ops && ops->vm_bo_free)
- ops->vm_bo_free(vm_bo);
- else
- kfree(vm_bo);
-
- drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
@@ -1861,6 +1880,9 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
* count is decreased. If not found @__vm_bo is returned without further
* increase of the reference count.
*
+ * The provided @__vm_bo must not already be in the gpuva, evict, or extobj
+ * lists prior to calling this method.
+ *
* A new &drm_gpuvm_bo is added to the GEMs gpuva list.
*
* Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
@@ -1873,14 +1895,19 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
struct drm_gem_object *obj = __vm_bo->obj;
struct drm_gpuvm_bo *vm_bo;
+ drm_WARN_ON(gpuvm->drm, !drm_gpuvm_immediate_mode(gpuvm));
+
+ mutex_lock(&obj->gpuva.lock);
vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
if (vm_bo) {
- drm_gpuvm_bo_put(__vm_bo);
+ mutex_unlock(&obj->gpuva.lock);
+ kref_put(&__vm_bo->kref, drm_gpuvm_bo_destroy_not_in_lists_kref);
return vm_bo;
}
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
+ mutex_unlock(&obj->gpuva.lock);
return __vm_bo;
}
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 76d77a736d84..4b77be94348d 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -457,27 +457,20 @@ int gud_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
- const struct drm_format_info *format = fb->format;
+ const struct drm_format_info *format;
struct drm_connector *connector;
unsigned int i, num_properties;
struct gud_state_req *req;
int idx, ret;
size_t len;
- if (drm_WARN_ON_ONCE(plane->dev, !fb))
- return -EINVAL;
-
- if (drm_WARN_ON_ONCE(plane->dev, !crtc))
- return -EINVAL;
-
- crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
- mode = &crtc_state->mode;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
@@ -492,6 +485,9 @@ int gud_plane_atomic_check(struct drm_plane *plane,
if (old_plane_state->rotation != new_plane_state->rotation)
crtc_state->mode_changed = true;
+ mode = &crtc_state->mode;
+ format = fb->format;
+
if (old_fb && old_fb->format != format)
crtc_state->mode_changed = true;
@@ -598,7 +594,7 @@ void gud_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_helper_damage_iter iter;
int ret, idx;
- if (crtc->state->mode_changed || !crtc->state->enable) {
+ if (!crtc || crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
mutex_lock(&gdrm->damage_lock);
if (gdrm->fb) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 7582ef34bf3f..303d8d9b7775 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -686,7 +686,7 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
}
/* This list includes registers that are useful in debugging GuC hangs. */
-const struct {
+static const struct {
u32 start;
u32 count;
} guc_hw_reg_state[] = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index a95ee5dcc2e3..1a889139cb05 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -84,6 +84,7 @@ curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
asyh->curs.handle = handle;
asyh->curs.offset = offset;
asyh->set.curs = asyh->curs.visible;
+ nv50_atom(asyh->state.state)->lock_core = true;
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 3dd742b4f823..e32ed1db6c56 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -43,6 +43,9 @@ nv50_head_flush_clr(struct nv50_head *head,
union nv50_head_atom_mask clr = {
.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
};
+
+ lockdep_assert_held(&head->disp->mutex);
+
if (clr.crc) nv50_crc_atomic_clr(head);
if (clr.olut) head->func->olut_clr(head);
if (clr.core) head->func->core_clr(head);
@@ -65,6 +68,8 @@ nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
+ lockdep_assert_held(&head->disp->mutex);
+
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index b26b682826bc..162cc58c7b8f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -623,49 +623,6 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
if (IS_ERR(desc))
return ERR_CAST(desc);
- panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
- &panel_simple_funcs, desc->connector_type);
- if (IS_ERR(panel))
- return ERR_CAST(panel);
-
- panel->desc = desc;
-
- panel->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(panel->supply))
- return ERR_CAST(panel->supply);
-
- panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio))
- return dev_err_cast_probe(dev, panel->enable_gpio,
- "failed to request GPIO\n");
-
- err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
- if (err) {
- dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
- return ERR_PTR(err);
- }
-
- ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
- if (ddc) {
- panel->ddc = of_find_i2c_adapter_by_node(ddc);
- of_node_put(ddc);
-
- if (!panel->ddc)
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
- !of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
-
- if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* Optional data-mapping property for overriding bus format */
- err = panel_simple_override_nondefault_lvds_datamapping(dev, panel);
- if (err)
- goto free_ddc;
- }
-
connector_type = desc->connector_type;
/* Catch common mistakes for panels. */
switch (connector_type) {
@@ -690,8 +647,7 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
break;
case DRM_MODE_CONNECTOR_eDP:
dev_warn(dev, "eDP panels moved to panel-edp\n");
- err = -EINVAL;
- goto free_ddc;
+ return ERR_PTR(-EINVAL);
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
@@ -720,6 +676,49 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
break;
}
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, connector_type);
+ if (IS_ERR(panel))
+ return ERR_CAST(panel);
+
+ panel->desc = desc;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return ERR_CAST(panel->supply);
+
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_cast_probe(dev, panel->enable_gpio,
+ "failed to request GPIO\n");
+
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return ERR_PTR(err);
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!of_device_is_compatible(dev->of_node, "panel-dpi") &&
+ !of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+
+ if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* Optional data-mapping property for overriding bus format */
+ err = panel_simple_override_nondefault_lvds_datamapping(dev, panel);
+ if (err)
+ goto free_ddc;
+ }
+
dev_set_drvdata(dev, panel);
/*
@@ -1900,6 +1899,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index d4839d282689..f6339963e496 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1252,17 +1252,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup;
}
- /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
- * pre-allocated BO if the <BO,VM> association exists. Given we
- * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
- * be called immediately, and we have to hold the VM resv lock when
- * calling this function.
- */
- dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->base.base.gpuva.lock);
- dma_resv_unlock(panthor_vm_resv(vm));
op_ctx->map.bo_offset = offset;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index c9fe6aa3e3e3..8604342f9943 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -121,7 +121,7 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
struct drm_crtc *crtc = encoder->crtc;
/* Unconditionally switch to TMDS as FRL is not yet supported */
- gpiod_set_value(hdmi->frl_enable_gpio, 0);
+ gpiod_set_value_cansleep(hdmi->frl_enable_gpio, 0);
if (!crtc || !crtc->state)
return;
@@ -640,6 +640,15 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
}
+static int __maybe_unused dw_hdmi_qp_rockchip_suspend(struct device *dev)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_qp_suspend(dev, hdmi->hdmi);
+
+ return 0;
+}
+
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
@@ -655,7 +664,8 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
}
static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_qp_rockchip_suspend,
+ dw_hdmi_qp_rockchip_resume)
};
struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index cd8380f0eddc..f3950e8476a7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2104,7 +2104,7 @@ static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
* Spin until the previous port_mux figuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
- port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
+ port_mux_sel == vop2->old_port_sel, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
port_mux_sel, vop2->old_port_sel);
@@ -2124,7 +2124,7 @@ static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
* Spin until the previous layer configuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
- atv_layer_cfg == cfg, 0, 50 * 1000);
+ atv_layer_cfg == cfg, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
atv_layer_cfg, cfg);
@@ -2144,6 +2144,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
u8 layer_sel_id;
unsigned int ofs;
u32 ovl_ctrl;
+ u32 cfg_done;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
@@ -2298,8 +2299,16 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
rk3568_vop2_wait_for_port_mux_done(vop2);
}
- if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
- rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+ if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) {
+ cfg_done = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+ cfg_done &= (BIT(vop2->data->nr_vps) - 1);
+ cfg_done &= ~BIT(vp->id);
+ /*
+ * Changes of other VPs' overlays have not taken effect
+ */
+ if (cfg_done)
+ rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
+ }
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
mutex_unlock(&vop2->ovl_lock);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index da670d7eeb2e..de96bfe7562c 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -55,15 +55,6 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
#endif
/*
- * Input parsing
- */
-
-int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
- u64 value, u32 max);
-int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
- u64 value, u32 max);
-
-/*
* Display modes
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index f031a312c783..b22887e8c881 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -32,9 +32,15 @@
#include <drm/ttm/ttm_placement.h>
-static void vmw_bo_release(struct vmw_bo *vbo)
+/**
+ * vmw_bo_free - vmw_bo destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_resource *res;
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
@@ -62,20 +68,8 @@ static void vmw_bo_release(struct vmw_bo *vbo)
}
vmw_surface_unreference(&vbo->dumb_surface);
}
- drm_gem_object_release(&vbo->tbo.base);
-}
-
-/**
- * vmw_bo_free - vmw_bo destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-static void vmw_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_bo *vbo = to_vmw_bo(&bo->base);
-
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_release(vbo);
+ drm_gem_object_release(&vbo->tbo.base);
WARN_ON(vbo->dirty);
kfree(vbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 00be92da5509..85795082fef9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -515,12 +515,12 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
/**
* vmw_event_fence_action_seq_passed
*
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
+ * @f: The struct dma_fence which provides timestamp for the action event
+ * @cb: The struct dma_fence_cb callback for the action event.
*
- * This function is called when the seqno of the fence where @action is
- * attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context.
+ * This function is called when the seqno of the fence has passed
+ * and it is always called from atomic context.
+ * It queues the event on the submitter's event list.
*/
static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
struct dma_fence_cb *cb)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d32ce1cb579e..bc51b5d55e38 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -766,13 +766,15 @@ err_out:
return ERR_PTR(ret);
}
- ttm_bo_reserve(&bo->tbo, false, false, NULL);
- ret = vmw_bo_dirty_add(bo);
- if (!ret && surface && surface->res.func->dirty_alloc) {
- surface->res.coherent = true;
- ret = surface->res.func->dirty_alloc(&surface->res);
+ if (bo) {
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
}
- ttm_bo_unreserve(&bo->tbo);
return &vfb->base;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 69dfe69ce0f8..a8c8c9375d29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -923,8 +923,10 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
- if (unlikely(ret != 0))
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
goto no_reserve;
+ }
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 0a3ab7efed46..f1c17fb60dc1 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -195,13 +195,15 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
/*
* Write dump contents to the page. No need to synchronize; panic should
- * be single-threaded.
+ * be single-threaded. Ignore failures from kmsg_dump_get_buffer() since
+ * panic notification should be done even if there is no message data.
+ * Don't assume bytes_written is set in case of failure, so initialize it.
*/
kmsg_dump_rewind(&iter);
- kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
+ bytes_written = 0;
+ (void)kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
- if (!bytes_written)
- return;
+
/*
* P3 to contain the physical address of the panic page & P4 to
* contain the size of the panic data in that page. Rest of the
@@ -210,7 +212,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
hv_set_msr(HV_MSR_CRASH_P0, 0);
hv_set_msr(HV_MSR_CRASH_P1, 0);
hv_set_msr(HV_MSR_CRASH_P2, 0);
- hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page));
+ hv_set_msr(HV_MSR_CRASH_P3, bytes_written ? virt_to_phys(hv_panic_page) : 0);
hv_set_msr(HV_MSR_CRASH_P4, bytes_written);
/*
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index b2862e0a317a..cdbc5f5c3215 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -375,7 +375,7 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
return;
/*
- * The cmxchg() above does an implicit memory barrier to
+ * The cmpxchg() above does an implicit memory barrier to
* ensure the write to MessageType (ie set to
* HVMSG_NONE) happens before we read the
* MessagePending and EOMing. Otherwise, the EOMing
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
index d93a18f09c76..0b75ff1edb73 100644
--- a/drivers/hv/mshv_eventfd.c
+++ b/drivers/hv/mshv_eventfd.c
@@ -388,7 +388,7 @@ static int mshv_irqfd_assign(struct mshv_partition *pt,
{
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
struct mshv_irqfd *irqfd, *tmp;
- unsigned int events;
+ __poll_t events;
int ret;
int idx;
diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index 30bacba6aec3..adba3564d9f1 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -20,6 +20,41 @@
#define MSHV_MAP_FAULT_IN_PAGES PTRS_PER_PMD
/**
+ * mshv_chunk_stride - Compute stride for mapping guest memory
+ * @page : The page to check for huge page backing
+ * @gfn : Guest frame number for the mapping
+ * @page_count: Total number of pages in the mapping
+ *
+ * Determines the appropriate stride (in pages) for mapping guest memory.
+ * Uses huge page stride if the backing page is huge and the guest mapping
+ * is properly aligned; otherwise falls back to single page stride.
+ *
+ * Return: Stride in pages, or -EINVAL if page order is unsupported.
+ */
+static int mshv_chunk_stride(struct page *page,
+ u64 gfn, u64 page_count)
+{
+ unsigned int page_order;
+
+ /*
+ * Use single page stride by default. For huge page stride, the
+ * page must be compound and point to the head of the compound
+ * page, and both gfn and page_count must be huge-page aligned.
+ */
+ if (!PageCompound(page) || !PageHead(page) ||
+ !IS_ALIGNED(gfn, PTRS_PER_PMD) ||
+ !IS_ALIGNED(page_count, PTRS_PER_PMD))
+ return 1;
+
+ page_order = folio_order(page_folio(page));
+ /* The hypervisor only supports 2M huge page */
+ if (page_order != PMD_ORDER)
+ return -EINVAL;
+
+ return 1 << page_order;
+}
+
+/**
* mshv_region_process_chunk - Processes a contiguous chunk of memory pages
* in a region.
* @region : Pointer to the memory region structure.
@@ -45,25 +80,23 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
int (*handler)(struct mshv_mem_region *region,
u32 flags,
u64 page_offset,
- u64 page_count))
+ u64 page_count,
+ bool huge_page))
{
- u64 count, stride;
- unsigned int page_order;
+ u64 gfn = region->start_gfn + page_offset;
+ u64 count;
struct page *page;
- int ret;
+ int stride, ret;
page = region->pages[page_offset];
if (!page)
return -EINVAL;
- page_order = folio_order(page_folio(page));
- /* The hypervisor only supports 4K and 2M page sizes */
- if (page_order && page_order != PMD_ORDER)
- return -EINVAL;
+ stride = mshv_chunk_stride(page, gfn, page_count);
+ if (stride < 0)
+ return stride;
- stride = 1 << page_order;
-
- /* Start at stride since the first page is validated */
+ /* Start at stride since the first stride is validated */
for (count = stride; count < page_count; count += stride) {
page = region->pages[page_offset + count];
@@ -71,12 +104,13 @@ static long mshv_region_process_chunk(struct mshv_mem_region *region,
if (!page)
break;
- /* Break if page size changes */
- if (page_order != folio_order(page_folio(page)))
+ /* Break if stride size changes */
+ if (stride != mshv_chunk_stride(page, gfn + count,
+ page_count - count))
break;
}
- ret = handler(region, flags, page_offset, count);
+ ret = handler(region, flags, page_offset, count, stride > 1);
if (ret)
return ret;
@@ -108,7 +142,8 @@ static int mshv_region_process_range(struct mshv_mem_region *region,
int (*handler)(struct mshv_mem_region *region,
u32 flags,
u64 page_offset,
- u64 page_count))
+ u64 page_count,
+ bool huge_page))
{
long ret;
@@ -162,11 +197,10 @@ struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
static int mshv_region_chunk_share(struct mshv_mem_region *region,
u32 flags,
- u64 page_offset, u64 page_count)
+ u64 page_offset, u64 page_count,
+ bool huge_page)
{
- struct page *page = region->pages[page_offset];
-
- if (PageHuge(page) || PageTransCompound(page))
+ if (huge_page)
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
return hv_call_modify_spa_host_access(region->partition->pt_id,
@@ -188,11 +222,10 @@ int mshv_region_share(struct mshv_mem_region *region)
static int mshv_region_chunk_unshare(struct mshv_mem_region *region,
u32 flags,
- u64 page_offset, u64 page_count)
+ u64 page_offset, u64 page_count,
+ bool huge_page)
{
- struct page *page = region->pages[page_offset];
-
- if (PageHuge(page) || PageTransCompound(page))
+ if (huge_page)
flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
return hv_call_modify_spa_host_access(region->partition->pt_id,
@@ -212,11 +245,10 @@ int mshv_region_unshare(struct mshv_mem_region *region)
static int mshv_region_chunk_remap(struct mshv_mem_region *region,
u32 flags,
- u64 page_offset, u64 page_count)
+ u64 page_offset, u64 page_count,
+ bool huge_page)
{
- struct page *page = region->pages[page_offset];
-
- if (PageHuge(page) || PageTransCompound(page))
+ if (huge_page)
flags |= HV_MAP_GPA_LARGE_PAGE;
return hv_call_map_gpa_pages(region->partition->pt_id,
@@ -295,11 +327,10 @@ release_pages:
static int mshv_region_chunk_unmap(struct mshv_mem_region *region,
u32 flags,
- u64 page_offset, u64 page_count)
+ u64 page_offset, u64 page_count,
+ bool huge_page)
{
- struct page *page = region->pages[page_offset];
-
- if (PageHuge(page) || PageTransCompound(page))
+ if (huge_page)
flags |= HV_UNMAP_GPA_LARGE_PAGE;
return hv_call_unmap_gpa_pages(region->partition->pt_id,
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 1134a82c7881..681b58154d5e 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -611,7 +611,6 @@ mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
return NULL;
}
-#ifdef CONFIG_X86_64
static struct mshv_mem_region *
mshv_partition_region_by_gfn_get(struct mshv_partition *p, u64 gfn)
{
@@ -643,12 +642,17 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
{
struct mshv_partition *p = vp->vp_partition;
struct mshv_mem_region *region;
- struct hv_x64_memory_intercept_message *msg;
bool ret;
u64 gfn;
-
- msg = (struct hv_x64_memory_intercept_message *)
+#if defined(CONFIG_X86_64)
+ struct hv_x64_memory_intercept_message *msg =
+ (struct hv_x64_memory_intercept_message *)
+ vp->vp_intercept_msg_page->u.payload;
+#elif defined(CONFIG_ARM64)
+ struct hv_arm64_memory_intercept_message *msg =
+ (struct hv_arm64_memory_intercept_message *)
vp->vp_intercept_msg_page->u.payload;
+#endif
gfn = HVPFN_DOWN(msg->guest_physical_address);
@@ -666,9 +670,6 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
return ret;
}
-#else /* CONFIG_X86_64 */
-static bool mshv_handle_gpa_intercept(struct mshv_vp *vp) { return false; }
-#endif /* CONFIG_X86_64 */
static bool mshv_vp_handle_intercept(struct mshv_vp *vp)
{
@@ -1280,7 +1281,7 @@ mshv_map_user_memory(struct mshv_partition *partition,
long ret;
if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) ||
- !access_ok((const void *)mem.userspace_addr, mem.size))
+ !access_ok((const void __user *)mem.userspace_addr, mem.size))
return -EINVAL;
mmap_read_lock(current->mm);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 2a0962a0b441..d882126c1778 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -593,6 +593,13 @@ static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
return false;
/*
+ * A system-wide suspend or resume transition is in progress. LPI2C should use PIO to
+ * transfer data to avoid issue caused by no ready DMA HW resource.
+ */
+ if (pm_suspend_in_progress())
+ return false;
+
+ /*
* When the length of data is less than I2C_DMA_THRESHOLD,
* cpu mode is used directly to avoid low performance.
*/
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 3a04016db2c3..ae609bdd2ec4 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -116,6 +116,7 @@ struct geni_i2c_dev {
dma_addr_t dma_addr;
struct dma_chan *tx_c;
struct dma_chan *rx_c;
+ bool no_dma;
bool gpi_mode;
bool abort_done;
bool is_tx_multi_desc_xfer;
@@ -447,7 +448,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
size_t len = msg->len;
struct i2c_msg *cur;
- dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ dma_buf = gi2c->no_dma ? NULL : i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
geni_se_select_mode(se, GENI_SE_DMA);
else
@@ -486,7 +487,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
size_t len = msg->len;
struct i2c_msg *cur;
- dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ dma_buf = gi2c->no_dma ? NULL : i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
geni_se_select_mode(se, GENI_SE_DMA);
else
@@ -1080,10 +1081,12 @@ static int geni_i2c_probe(struct platform_device *pdev)
goto err_resources;
}
- if (desc && desc->no_dma_support)
+ if (desc && desc->no_dma_support) {
fifo_disable = false;
- else
+ gi2c->no_dma = true;
+ } else {
fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
+ }
if (fifo_disable) {
/* FIFO is disabled, so we can only use GPI DMA */
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 3e8f126cb7f7..9e3595b3623e 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -670,12 +670,39 @@ static const struct riic_of_data riic_rz_t2h_info = {
static int riic_i2c_suspend(struct device *dev)
{
- struct riic_dev *riic = dev_get_drvdata(dev);
- int ret;
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
- ret = pm_runtime_resume_and_get(dev);
- if (ret)
- return ret;
+static int riic_i2c_resume(struct device *dev)
+{
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int riic_i2c_suspend_noirq(struct device *dev)
+{
+ struct riic_dev *riic = dev_get_drvdata(dev);
i2c_mark_adapter_suspended(&riic->adapter);
@@ -683,12 +710,12 @@ static int riic_i2c_suspend(struct device *dev)
riic_clear_set_bit(riic, ICCR1_ICE, 0, RIIC_ICCR1);
pm_runtime_mark_last_busy(dev);
- pm_runtime_put_sync(dev);
+ pm_runtime_force_suspend(dev);
return reset_control_assert(riic->rstc);
}
-static int riic_i2c_resume(struct device *dev)
+static int riic_i2c_resume_noirq(struct device *dev)
{
struct riic_dev *riic = dev_get_drvdata(dev);
int ret;
@@ -697,6 +724,10 @@ static int riic_i2c_resume(struct device *dev)
if (ret)
return ret;
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
ret = riic_init_hw(riic);
if (ret) {
/*
@@ -714,6 +745,7 @@ static int riic_i2c_resume(struct device *dev)
}
static const struct dev_pm_ops riic_i2c_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend_noirq, riic_i2c_resume_noirq)
SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend, riic_i2c_resume)
};
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index d236aef80a8d..e1e63c2be82b 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -3,6 +3,7 @@
* Helpers for IOMMU drivers implementing SVA
*/
#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/iommu.h>
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 7228a33f6c37..643c8e459611 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -158,11 +158,11 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
tmp_vec.local_id = new_vec->local_id;
/* Point device to the temporary vector */
- imsic_msi_update_msg(d, &tmp_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
}
/* Point device to the new vector */
- imsic_msi_update_msg(d, new_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */
d->chip_data = new_vec;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index df98144a9539..33521c328a82 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -109,14 +109,14 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
spin_unlock_irq(&dev->lock);
if (filep->f_flags & O_NONBLOCK)
return -EAGAIN;
- wait_event_interruptible(dev->wait, (dev->work ||
+ wait_event_interruptible(dev->wait, (READ_ONCE(dev->work) ||
!list_empty(list)));
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&dev->lock);
}
if (dev->work)
- dev->work = 0;
+ WRITE_ONCE(dev->work, 0);
if (!list_empty(list)) {
timer = list_first_entry(list, struct mISDNtimer, list);
list_del(&timer->list);
@@ -141,13 +141,16 @@ mISDN_poll(struct file *filep, poll_table *wait)
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
if (dev) {
+ u32 work;
+
poll_wait(filep, &dev->wait, wait);
mask = 0;
- if (dev->work || !list_empty(&dev->expired))
+ work = READ_ONCE(dev->work);
+ if (work || !list_empty(&dev->expired))
mask |= (EPOLLIN | EPOLLRDNORM);
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
- dev->work, list_empty(&dev->expired));
+ work, list_empty(&dev->expired));
}
return mask;
}
@@ -172,7 +175,7 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
struct mISDNtimer *timer;
if (!timeout) {
- dev->work = 1;
+ WRITE_ONCE(dev->work, 1);
wake_up_interruptible(&dev->wait);
id = 0;
} else {
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 885399ed0776..d34a19453560 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -560,11 +560,6 @@ int led_classdev_register_ext(struct device *parent,
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
led_cdev->brightness_hw_changed = -1;
#endif
- /* add to the list of leds */
- down_write(&leds_list_lock);
- list_add_tail(&led_cdev->node, &leds_list);
- up_write(&leds_list_lock);
-
if (!led_cdev->max_brightness)
led_cdev->max_brightness = LED_FULL;
@@ -574,6 +569,11 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
+ /* add to the list of leds */
+ down_write(&leds_list_lock);
+ list_add_tail(&led_cdev->node, &leds_list);
+ up_write(&leds_list_lock);
+
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7923d052a8d..7d685b01862e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1895,6 +1895,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
+ if (slave_dev->type != ARPHRD_ETHER &&
+ BOND_MODE(bond) == BOND_MODE_8023AD) {
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "8023AD mode requires Ethernet devices");
+ return -EINVAL;
+ }
slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
bond_dev->type, slave_dev->type);
@@ -4113,8 +4119,9 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
case BOND_XMIT_POLICY_ENCAP23:
case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
- return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
- fk, data, l2_proto, nhoff, hlen, 0);
+ return __skb_flow_dissect(dev_net(bond->dev), skb,
+ &flow_keys_bonding, fk, data,
+ l2_proto, nhoff, hlen, 0);
default:
break;
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 7ab9578f5b89..769745e22a3c 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -332,6 +332,7 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
can_set_ml_priv(dev, can_ml);
+ can_set_cap(dev, CAN_CAP_CC);
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index de8e212a1366..4c219a5b139b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -486,11 +486,17 @@ resubmit_urb:
urb->transfer_buffer, RX_BUFFER_SIZE,
ems_usb_read_bulk_callback, dev);
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!retval)
+ return;
+
+ usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
- else if (retval)
+ else
netdev_err(netdev,
"failed resubmitting read bulk urb: %d\n", retval);
}
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 08da507faef4..8cc924c47042 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -541,13 +541,20 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ return;
+
+ usb_unanchor_urb(urb);
+
if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
- } else if (err) {
+ } else {
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index d093babbc320..192338b481f2 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -754,6 +754,10 @@ resubmit_urb:
usb_anchor_urb(urb, &parent->rx_submitted);
rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!rc)
+ return;
+
+ usb_unanchor_urb(urb);
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
@@ -762,6 +766,9 @@ device_detach:
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
+ } else if (rc != -ESHUTDOWN && net_ratelimit()) {
+ netdev_info(netdev, "failed to re-submit IN URB: %pe\n",
+ ERR_PTR(urb->status));
}
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 62701ec34272..d0a2a2a33c1c 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -361,7 +361,14 @@ resubmit_urb:
urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
kvaser_usb_read_bulk_callback, dev);
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ return;
+
+ usb_unanchor_urb(urb);
+
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
struct kvaser_usb_net_priv *priv;
@@ -372,7 +379,7 @@ resubmit_urb:
netif_device_detach(priv->netdev);
}
- } else if (err) {
+ } else {
dev_err(&dev->intf->dev,
"Failed resubmitting read bulk urb: %d\n", err);
}
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 41c0a1c399bf..04170326dc7e 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -608,11 +608,17 @@ resubmit_urb:
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!retval)
+ return;
+
+ usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
- else if (retval)
+ else
netdev_err(netdev, "failed resubmitting read bulk urb: %d\n",
retval);
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 7449328f7cd7..3125cf59d002 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -541,11 +541,17 @@ resubmit_urb:
urb->transfer_buffer, RX_BUFFER_SIZE,
usb_8dev_read_bulk_callback, priv);
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!retval)
+ return;
+
+ usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
- else if (retval)
+ else
netdev_err(netdev,
"failed resubmitting read bulk urb: %d\n", retval);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3ddd896d6987..b5a60a048896 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1837,7 +1837,7 @@ static void xgbe_get_stats64(struct net_device *netdev,
s->multicast = pstats->rxmulticastframes_g;
s->rx_length_errors = pstats->rxlengtherror;
s->rx_crc_errors = pstats->rxcrcerror;
- s->rx_fifo_errors = pstats->rxfifooverflow;
+ s->rx_over_errors = pstats->rxfifooverflow;
s->tx_packets = pstats->txframecount_gb;
s->tx_bytes = pstats->txoctetcount_gb;
@@ -2292,9 +2292,6 @@ read_again:
goto read_again;
if (error || packet->errors) {
- if (packet->errors)
- netif_err(pdata, rx_err, netdev,
- "error in received packet\n");
dev_kfree_skb(skb);
goto next_packet;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index fd35f4b4dc50..014340f33345 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -156,7 +156,7 @@ static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
ASP_RX_FILTER_NET_OFFSET_L4(32),
ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
- rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->ch) |
ASP_RX_FILTER_NET_CFG_EN |
ASP_RX_FILTER_NET_CFG_L2_EN |
ASP_RX_FILTER_NET_CFG_L3_EN |
@@ -166,7 +166,7 @@ static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
- rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->ch) |
ASP_RX_FILTER_NET_CFG_EN |
ASP_RX_FILTER_NET_CFG_L2_EN |
ASP_RX_FILTER_NET_CFG_L3_EN |
@@ -714,6 +714,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
nfilter = &priv->net_filters[open_index];
nfilter->claimed = true;
nfilter->port = intf->port;
+ nfilter->ch = intf->channel + priv->tx_chan_offset;
nfilter->hw_index = open_index;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 74adfdb50e11..e238507be40a 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -348,6 +348,7 @@ struct bcmasp_net_filter {
bool wake_filter;
int port;
+ int ch;
unsigned int hw_index;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 3a032d4ac598..eab81e073e1e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3801,6 +3801,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
{
int status;
bool pmac_valid = false;
+ u32 pmac_id;
eth_zero_addr(mac);
@@ -3813,7 +3814,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
adapter->if_handle, 0);
} else {
status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
- NULL, adapter->if_handle, 0);
+ &pmac_id, adapter->if_handle, 0);
}
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b633c6e2bab7..52e10467b3e4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2141,7 +2141,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
struct be_aic_obj *aic;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- u64 rx_pkts = 0, tx_pkts = 0;
+ u64 rx_pkts = 0, tx_pkts = 0, pkts;
ulong now;
u32 pps, delta;
int i;
@@ -2157,15 +2157,17 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
start = u64_stats_fetch_begin(&rxo->stats.sync);
- rx_pkts += rxo->stats.rx_pkts;
+ pkts = rxo->stats.rx_pkts;
} while (u64_stats_fetch_retry(&rxo->stats.sync, start));
+ rx_pkts += pkts;
}
for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
start = u64_stats_fetch_begin(&txo->stats.sync);
- tx_pkts += txo->stats.tx_reqs;
+ pkts = txo->stats.tx_reqs;
} while (u64_stats_fetch_retry(&txo->stats.sync, start));
+ tx_pkts += pkts;
}
/* Skip, if wrapped around or first calculation */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index cfb56bf0e361..a1405c928525 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1150,7 +1150,7 @@ fec_restart(struct net_device *ndev)
u32 rcntl = FEC_RCR_MII;
if (OPT_ARCH_HAS_MAX_FL)
- rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
+ rcntl |= (fep->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN) << 16;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
@@ -1285,12 +1285,13 @@ fec_restart(struct net_device *ndev)
/* When Jumbo Frame is enabled, the FIFO may not be large enough
* to hold an entire frame. In such cases, if the MTU exceeds
- * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
- * to operate in cut-through mode, triggered by the FIFO threshold.
+ * (PKT_MAXBUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN), configure
+ * the interface to operate in cut-through mode, triggered by
+ * the FIFO threshold.
* Otherwise, enable the ENET store-and-forward mode.
*/
if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
- (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ (ndev->mtu > (PKT_MAXBUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)))
writel(0xF, fep->hwp + FEC_X_WMRK);
else
writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
@@ -4039,7 +4040,7 @@ static int fec_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ order = get_order(new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN
+ FEC_DRV_RESERVE_SPACE);
fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
fep->pagepool_order = order;
@@ -4590,7 +4591,7 @@ fec_probe(struct platform_device *pdev)
else
fep->max_buf_size = PKT_MAXBUF_SIZE;
- ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
+ ndev->max_mtu = fep->max_buf_size - VLAN_ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index affd5a6c44e7..131d1210dc4a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1602,8 +1602,10 @@ static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
+ if (!tbiphy) {
pr_warn("Could not get TBI device\n");
+ return;
+ }
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 7b9269f6fdfc..a47464a22751 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2530,44 +2530,47 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
struct hns3_enet_ring *ring, bool is_tx)
{
+ struct ring_stats ring_stats;
unsigned int start;
do {
start = u64_stats_fetch_begin(&ring->syncp);
- if (is_tx) {
- stats->tx_bytes += ring->stats.tx_bytes;
- stats->tx_packets += ring->stats.tx_pkts;
- stats->tx_dropped += ring->stats.sw_err_cnt;
- stats->tx_dropped += ring->stats.tx_vlan_err;
- stats->tx_dropped += ring->stats.tx_l4_proto_err;
- stats->tx_dropped += ring->stats.tx_l2l3l4_err;
- stats->tx_dropped += ring->stats.tx_tso_err;
- stats->tx_dropped += ring->stats.over_max_recursion;
- stats->tx_dropped += ring->stats.hw_limitation;
- stats->tx_dropped += ring->stats.copy_bits_err;
- stats->tx_dropped += ring->stats.skb2sgl_err;
- stats->tx_dropped += ring->stats.map_sg_err;
- stats->tx_errors += ring->stats.sw_err_cnt;
- stats->tx_errors += ring->stats.tx_vlan_err;
- stats->tx_errors += ring->stats.tx_l4_proto_err;
- stats->tx_errors += ring->stats.tx_l2l3l4_err;
- stats->tx_errors += ring->stats.tx_tso_err;
- stats->tx_errors += ring->stats.over_max_recursion;
- stats->tx_errors += ring->stats.hw_limitation;
- stats->tx_errors += ring->stats.copy_bits_err;
- stats->tx_errors += ring->stats.skb2sgl_err;
- stats->tx_errors += ring->stats.map_sg_err;
- } else {
- stats->rx_bytes += ring->stats.rx_bytes;
- stats->rx_packets += ring->stats.rx_pkts;
- stats->rx_dropped += ring->stats.l2_err;
- stats->rx_errors += ring->stats.l2_err;
- stats->rx_errors += ring->stats.l3l4_csum_err;
- stats->rx_crc_errors += ring->stats.l2_err;
- stats->multicast += ring->stats.rx_multicast;
- stats->rx_length_errors += ring->stats.err_pkt_len;
- }
+ ring_stats = ring->stats;
} while (u64_stats_fetch_retry(&ring->syncp, start));
+
+ if (is_tx) {
+ stats->tx_bytes += ring_stats.tx_bytes;
+ stats->tx_packets += ring_stats.tx_pkts;
+ stats->tx_dropped += ring_stats.sw_err_cnt;
+ stats->tx_dropped += ring_stats.tx_vlan_err;
+ stats->tx_dropped += ring_stats.tx_l4_proto_err;
+ stats->tx_dropped += ring_stats.tx_l2l3l4_err;
+ stats->tx_dropped += ring_stats.tx_tso_err;
+ stats->tx_dropped += ring_stats.over_max_recursion;
+ stats->tx_dropped += ring_stats.hw_limitation;
+ stats->tx_dropped += ring_stats.copy_bits_err;
+ stats->tx_dropped += ring_stats.skb2sgl_err;
+ stats->tx_dropped += ring_stats.map_sg_err;
+ stats->tx_errors += ring_stats.sw_err_cnt;
+ stats->tx_errors += ring_stats.tx_vlan_err;
+ stats->tx_errors += ring_stats.tx_l4_proto_err;
+ stats->tx_errors += ring_stats.tx_l2l3l4_err;
+ stats->tx_errors += ring_stats.tx_tso_err;
+ stats->tx_errors += ring_stats.over_max_recursion;
+ stats->tx_errors += ring_stats.hw_limitation;
+ stats->tx_errors += ring_stats.copy_bits_err;
+ stats->tx_errors += ring_stats.skb2sgl_err;
+ stats->tx_errors += ring_stats.map_sg_err;
+ } else {
+ stats->rx_bytes += ring_stats.rx_bytes;
+ stats->rx_packets += ring_stats.rx_pkts;
+ stats->rx_dropped += ring_stats.l2_err;
+ stats->rx_errors += ring_stats.l2_err;
+ stats->rx_errors += ring_stats.l3l4_csum_err;
+ stats->rx_crc_errors += ring_stats.l2_err;
+ stats->multicast += ring_stats.rx_multicast;
+ stats->rx_length_errors += ring_stats.err_pkt_len;
+ }
}
static void hns3_nic_get_stats64(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 9bb708fa42f2..416e02e7b995 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -731,7 +731,7 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
#define HCLGE_FD_AD_COUNTER_NUM_S 13
-#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(19, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c589baea7c77..b8e2aa19f9e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5690,7 +5690,7 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
- action->counter_id);
+ action->next_input_key);
req->ad_data = cpu_to_le64(ad_data);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
index 6950ee4d037b..e7d6c2033b45 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -63,21 +63,12 @@ static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
napi_enable(&irq_cfg->napi);
}
static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
{
napi_disable(&irq_cfg->napi);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_RX, NULL);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_TX, NULL);
- netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
netif_napi_del(&irq_cfg->napi);
}
@@ -240,6 +231,11 @@ int hinic3_qps_irq_init(struct net_device *netdev)
INIT_WORK(&irq_cfg->rxq->dim.work, hinic3_rx_dim_work);
irq_cfg->rxq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_SET_MSIX_AUTO_MASK);
@@ -254,6 +250,10 @@ err_release_irqs:
q_id--;
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
@@ -276,6 +276,10 @@ void hinic3_qps_irq_uninit(struct net_device *netdev)
for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index d88b7f3fd1f9..2ef39cc70c21 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -460,6 +460,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf)
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
ice_deinit_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 147aaee192a7..00f75d87c73f 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -979,6 +979,7 @@ void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index c0a19f232538..64e798b8f18f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2214,7 +2214,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
- timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
+ timeout = jiffies + 10 * usecs_to_jiffies(ICE_CTL_Q_SQ_CMD_TIMEOUT);
do {
status = ice_aq_release_res(hw, res, 0, NULL);
if (status != -EIO)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 969d4f8f9c02..3565a5d96c6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3626,11 +3626,7 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
if (!lut)
return -ENOMEM;
- err = ice_get_rss_key(vsi, rxfh->key);
- if (err)
- goto out;
-
- err = ice_get_rss_lut(vsi, lut, vsi->rss_table_size);
+ err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size);
if (err)
goto out;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 44f3c2bab308..6dabac51e1f9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -400,6 +400,8 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
+ u64_stats_init(&ring_stats->syncp);
+
WRITE_ONCE(tx_ring_stats[i], ring_stats);
}
@@ -419,6 +421,8 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
+ u64_stats_init(&ring_stats->syncp);
+
WRITE_ONCE(rx_ring_stats[i], ring_stats);
}
@@ -3809,22 +3813,31 @@ int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_pf *pf = vsi->back;
struct ice_vlan vlan;
int err;
- vlan = ICE_VLAN(0, 0, 0);
- err = vlan_ops->del_vlan(vsi, &vlan);
- if (err && err != -EEXIST)
- return err;
+ if (pf->lag && pf->lag->primary) {
+ dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting prune list\n");
+ } else {
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+ }
/* in SVM both VLAN 0 filters are identical */
if (!ice_is_dvm_ena(&vsi->back->hw))
return 0;
- vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
- err = vlan_ops->del_vlan(vsi, &vlan);
- if (err && err != -EEXIST)
- return err;
+ if (pf->lag && pf->lag->primary) {
+ dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting QinQ prune list\n");
+ } else {
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+ }
/* when deleting the last VLAN filter, make sure to disable the VLAN
* promisc mode so the filter isn't left by accident
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4bb68e7a00f5..de488185cd4a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4836,6 +4836,7 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_dpll_deinit(pf);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
xa_destroy(&pf->eswitch.reprs);
+ ice_hwmon_exit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5437,8 +5438,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
- ice_hwmon_exit(pf);
-
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
@@ -7989,6 +7988,34 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
}
/**
+ * ice_get_rss - Get RSS LUT and/or key
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the key in
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return: 0 on success, negative on failure
+ */
+int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ int err;
+
+ if (seed) {
+ err = ice_get_rss_key(vsi, seed);
+ if (err)
+ return err;
+ }
+
+ if (lut) {
+ err = ice_get_rss_lut(vsi, lut, lut_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_set_rss_hfunc - Set RSS HASH function
* @vsi: Pointer to VSI structure
* @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 3e1052d070cf..0a8b50350b86 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -108,11 +108,11 @@ static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
ptp_read_system_prets(sts);
idpf_ptp_enable_shtime(adapter);
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
/* Read the system timestamp post PHC read */
ptp_read_system_postts(sts);
- lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
spin_unlock(&ptp->read_dev_clk_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 97a5fe766b6b..66ba645e8b90 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3956,7 +3956,7 @@ static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
static void idpf_net_dim(struct idpf_q_vector *q_vector)
{
struct dim_sample dim_sample = { };
- u64 packets, bytes;
+ u64 packets, bytes, pkts, bts;
u32 i;
if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
@@ -3968,9 +3968,12 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- packets += u64_stats_read(&txq->q_stats.packets);
- bytes += u64_stats_read(&txq->q_stats.bytes);
+ pkts = u64_stats_read(&txq->q_stats.packets);
+ bts = u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
+
+ packets += pkts;
+ bytes += bts;
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
@@ -3987,9 +3990,12 @@ check_rx_itr:
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- packets += u64_stats_read(&rxq->q_stats.packets);
- bytes += u64_stats_read(&rxq->q_stats.bytes);
+ pkts = u64_stats_read(&rxq->q_stats.packets);
+ bts = u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
+
+ packets += pkts;
+ bytes += bts;
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 498ba1522ca4..9482ab11f050 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -443,9 +443,10 @@
#define IGC_TXPBSIZE_DEFAULT ( \
IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+/* TSN value following I225/I226 SW User Manual Section 7.5.4 */
#define IGC_TXPBSIZE_TSN ( \
- IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
- IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
+ IGC_TXPB0SIZE(5) | IGC_TXPB1SIZE(5) | IGC_TXPB2SIZE(5) | \
+ IGC_TXPB3SIZE(5) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index e94c1922b97a..3172cdbca9cc 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1565,8 +1565,8 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
- /* Do not allow channel reconfiguration when mqprio is enabled */
- if (adapter->strict_priority_enable)
+ /* Do not allow channel reconfiguration when any TSN qdisc is enabled */
+ if (adapter->flags & IGC_FLAG_TSN_ANY_ENABLED)
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 7aafa60ba0c8..89a321a344d2 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7759,6 +7759,11 @@ int igc_reinit_queues(struct igc_adapter *adapter)
if (netif_running(netdev))
err = igc_open(netdev);
+ if (!err) {
+ /* Restore default IEEE 802.1Qbv schedule after queue reinit */
+ igc_tsn_clear_schedule(adapter);
+ }
+
return err;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index b7b46d863bee..7aae83c108fd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -774,36 +774,43 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ u32 txstmpl_old;
u64 regval;
u32 mask;
int i;
+ /* Establish baseline of TXSTMPL_0 before checking TXTT_0.
+ * This baseline is used to detect if a new timestamp arrives in
+ * register 0 during the hardware bug workaround below.
+ */
+ txstmpl_old = rd32(IGC_TXSTMPL);
+
mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY;
if (mask & IGC_TSYNCTXCTL_TXTT_0) {
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
} else {
- /* There's a bug in the hardware that could cause
- * missing interrupts for TX timestamping. The issue
- * is that for new interrupts to be triggered, the
- * IGC_TXSTMPH_0 register must be read.
+ /* TXTT_0 not set - register 0 has no new timestamp initially.
+ *
+ * Hardware bug: Future timestamp interrupts won't fire unless
+ * TXSTMPH_0 is read, even if the timestamp was captured in
+ * registers 1-3.
*
- * To avoid discarding a valid timestamp that just
- * happened at the "wrong" time, we need to confirm
- * that there was no timestamp captured, we do that by
- * assuming that no two timestamps in sequence have
- * the same nanosecond value.
+ * Workaround: Read TXSTMPH_0 here to enable future interrupts.
+ * However, this read clears TXTT_0. If a timestamp arrives in
+ * register 0 after checking TXTT_0 but before this read, it
+ * would be lost.
*
- * So, we read the "low" register, read the "high"
- * register (to latch a new timestamp) and read the
- * "low" register again, if "old" and "new" versions
- * of the "low" register are different, a valid
- * timestamp was captured, we can read the "high"
- * register again.
+ * To detect this race: We saved a baseline read of TXSTMPL_0
+ * before TXTT_0 check. After performing the workaround read of
+ * TXSTMPH_0, we read TXSTMPL_0 again. Since consecutive
+ * timestamps never share the same nanosecond value, a change
+ * between the baseline and new TXSTMPL_0 indicates a timestamp
+ * arrived during the race window. If so, read the complete
+ * timestamp.
*/
- u32 txstmpl_old, txstmpl_new;
+ u32 txstmpl_new;
- txstmpl_old = rd32(IGC_TXSTMPL);
rd32(IGC_TXSTMPH);
txstmpl_new = rd32(IGC_TXSTMPL);
@@ -818,7 +825,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
done:
/* Now that the problematic first register was handled, we can
- * use retrieve the timestamps from the other registers
+ * retrieve the timestamps from the other registers
* (starting from '1') with less complications.
*/
for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 2d78e08f985f..747fbdf2a908 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1551,8 +1551,8 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
return -ENODEV;
}
-static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
- int num_lfs, struct rsrc_attach *attach)
+static int rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1562,21 +1562,21 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
u64 cfg;
if (!num_lfs)
- return;
+ return -EINVAL;
blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
- return;
+ return -EFAULT;
block = &hw->block[blkaddr];
if (!block->lf.bmap)
- return;
+ return -ESRCH;
for (slot = 0; slot < num_lfs; slot++) {
/* Allocate the resource */
lf = rvu_alloc_rsrc(&block->lf);
if (lf < 0)
- return;
+ return -EFAULT;
cfg = (1ULL << 63) | (pcifunc << 8) | slot;
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
@@ -1587,6 +1587,8 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
/* Set start MSIX vector for this LF within this PF/VF */
rvu_set_msix_offset(rvu, pfvf, block, lf);
}
+
+ return 0;
}
static int rvu_check_rsrc_availability(struct rvu *rvu,
@@ -1724,22 +1726,31 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
int err;
/* If first request, detach all existing attached resources */
- if (!attach->modify)
- rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ if (!attach->modify) {
+ err = rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ if (err)
+ return err;
+ }
mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
if (err)
- goto exit;
+ goto fail1;
/* Now attach the requested resources */
- if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
+ if (attach->npalf) {
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
+ if (err)
+ goto fail1;
+ }
- if (attach->nixlf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
+ if (attach->nixlf) {
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
+ if (err)
+ goto fail2;
+ }
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1749,33 +1760,64 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
- attach->sso, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
+ if (err)
+ goto fail3;
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
- attach->ssow, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
+ if (err)
+ goto fail4;
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
- attach->timlfs, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
+ if (err)
+ goto fail5;
}
if (attach->cptlfs) {
if (attach->modify &&
rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
- attach->cptlfs, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
+ if (err)
+ goto fail6;
}
-exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+
+fail6:
+ if (attach->timlfs)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+
+fail5:
+ if (attach->ssow)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+
+fail4:
+ if (attach->sso)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+
+fail3:
+ if (attach->nixlf)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_NIX);
+
+fail2:
+ if (attach->npalf)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_NPA);
+
+fail1:
mutex_unlock(&rvu->rsrc_lock);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 3abd750a4bd7..3d91a34f8b57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -1222,6 +1222,9 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
u8 cgx_idx, lmac;
void *cgxd;
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index e4a5f9fa6fd4..bbfd8231aed5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -56,7 +56,7 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
- if (rvu->fwdata->channel_data.valid) {
+ if (rvu->fwdata && rvu->fwdata->channel_data.valid) {
sdp_pf_num[0] = 0;
pfvf = &rvu->pf[sdp_pf_num[0]];
pfvf->sdp_info = &rvu->fwdata->channel_data.info;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 4c7e0f345cb5..060c715ebad0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -328,7 +328,7 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
req->mask[0] = ~0ULL;
- req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
+ req->mask[0] &= ~MCS_TCAM0_MAC_DA_MASK;
req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
req->mask[1] = ~0ULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e616a727a3a9..8cdfc36d79d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -940,13 +940,8 @@ static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
size_t offset, size_t size,
enum dma_data_direction dir)
{
- dma_addr_t iova;
-
- iova = dma_map_page_attrs(pfvf->dev, page,
+ return dma_map_page_attrs(pfvf->dev, page,
offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova)))
- return (dma_addr_t)NULL;
- return iova;
}
static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a7feb4c392b3..6b2d8559f0eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3249,7 +3249,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 59e5fb2e7e05..347a0078f622 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4365,11 +4365,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
+ bool set_ic, is_last_segment;
u32 pay_len, mss, queue;
int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
dma_addr_t des;
- bool set_ic;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4557,10 +4557,16 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_tx_timestamp(priv, first);
}
+ /* If we only have one entry used, then the first entry is the last
+ * segment.
+ */
+ is_last_segment = ((tx_q->cur_tx - first_entry) &
+ (priv->dma_conf.dma_tx_size - 1)) == 1;
+
/* Complete the first descriptor before granting the DMA */
stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
- tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ is_last_segment, hdr / 4,
+ skb->len - proto_hdr_len);
/* If context desc is used to change MSS */
if (mss_desc) {
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
index 62d7f47d4f8d..f0514251d4f3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -70,7 +70,7 @@ int txgbe_test_hostif(struct wx *wx)
buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
- WX_HI_COMMAND_TIMEOUT, true);
+ WX_HI_COMMAND_TIMEOUT, false);
}
int txgbe_read_eeprom_hostif(struct wx *wx,
@@ -148,7 +148,7 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
buffer.duplex = duplex;
return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
- WX_HI_COMMAND_TIMEOUT, true);
+ WX_HI_COMMAND_TIMEOUT, false);
}
static void txgbe_get_link_capabilities(struct wx *wx, int *speed,
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 50de3ee204db..80f84fc87008 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -69,7 +69,6 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
- spinlock_t addrs_lock;
};
struct ipvl_addr {
@@ -90,6 +89,7 @@ struct ipvl_port {
struct net_device *dev;
possible_net_t pnet;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
+ spinlock_t addrs_lock; /* guards hash-table and addrs */
struct list_head ipvlans;
u16 mode;
u16 flags;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2efa3ba148aa..bdb3a46b327c 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -107,17 +107,15 @@ void ipvlan_ht_addr_del(struct ipvl_addr *addr)
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6)
{
- struct ipvl_addr *addr, *ret = NULL;
+ struct ipvl_addr *addr;
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
- if (addr_equal(is_v6, addr, iaddr)) {
- ret = addr;
- break;
- }
+ assert_spin_locked(&ipvlan->port->addrs_lock);
+
+ list_for_each_entry(addr, &ipvlan->addrs, anode) {
+ if (addr_equal(is_v6, addr, iaddr))
+ return addr;
}
- rcu_read_unlock();
- return ret;
+ return NULL;
}
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 660f3db11766..baccdad695fd 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,6 +75,7 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
+ spin_lock_init(&port->addrs_lock);
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
ida_init(&port->ida);
@@ -181,6 +182,7 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -189,10 +191,10 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
+ spin_lock_bh(&port->addrs_lock);
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
- rcu_read_unlock();
+ spin_unlock_bh(&port->addrs_lock);
return 0;
}
@@ -206,10 +208,10 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
+ spin_lock_bh(&ipvlan->port->addrs_lock);
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
- rcu_read_unlock();
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return 0;
}
@@ -579,7 +581,6 @@ int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
if (!tb[IFLA_MTU])
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
- spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -657,13 +658,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
ida_free(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
@@ -817,6 +818,8 @@ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
+ assert_spin_locked(&ipvlan->port->addrs_lock);
+
addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
@@ -847,16 +850,16 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr) {
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return;
}
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
kfree_rcu(addr, rcu);
}
@@ -878,14 +881,14 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -924,21 +927,24 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) {
NL_SET_ERR_MSG(i6vi->extack,
"Address already assigned to an ipvlan device");
- return notifier_from_errno(-EADDRINUSE);
+ ret = notifier_from_errno(-EADDRINUSE);
}
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
- return NOTIFY_OK;
+ return ret;
}
#endif
@@ -946,14 +952,14 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -995,21 +1001,24 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
struct in_validator_info *ivi = (struct in_validator_info *)ptr;
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) {
NL_SET_ERR_MSG(ivi->extack,
"Address already assigned to an ipvlan device");
- return notifier_from_errno(-EADDRINUSE);
+ ret = notifier_from_errno(-EADDRINUSE);
}
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 49537d3c4120..5f17f68f3c08 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -244,7 +244,9 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+ mutex_lock(&nsim_dev->progs_list_lock);
list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
+ mutex_unlock(&nsim_dev->progs_list_lock);
prog->aux->offload->dev_priv = state;
@@ -273,12 +275,16 @@ static int nsim_bpf_translate(struct bpf_prog *prog)
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
+ struct nsim_dev *nsim_dev;
state = prog->aux->offload->dev_priv;
+ nsim_dev = state->nsim_dev;
WARN(state->is_loaded,
"offload state destroyed while program still bound");
debugfs_remove_recursive(state->ddir);
+ mutex_lock(&nsim_dev->progs_list_lock);
list_del(&state->l);
+ mutex_unlock(&nsim_dev->progs_list_lock);
kfree(state);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 2683a989873e..dfd571b22107 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1647,6 +1647,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
+ mutex_init(&nsim_dev->progs_list_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
@@ -1785,6 +1786,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
devl_unregister(devlink);
kfree(nsim_dev->vfconfigs);
kfree(nsim_dev->fa_cookie);
+ mutex_destroy(&nsim_dev->progs_list_lock);
devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index d1a941e2b18f..46c67983c517 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -324,6 +324,7 @@ struct nsim_dev {
u32 prog_id_gen;
struct list_head bpf_bound_progs;
struct list_head bpf_bound_maps;
+ struct mutex progs_list_lock;
struct netdev_phys_item_id switch_id;
struct list_head port_list;
bool fw_update_status;
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 74dbce205f71..44006bb6ac0b 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -96,12 +96,10 @@ static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
{
switch (interface) {
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
- case PHY_INTERFACE_MODE_2500BASEX:
- return LINK_INBAND_DISABLE;
-
default:
return 0;
}
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 9766dd99afaa..12ff4c1f285d 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -277,7 +277,7 @@ static int xway_gphy_init_leds(struct phy_device *phydev)
static int xway_gphy_config_init(struct phy_device *phydev)
{
- struct device_node *np = phydev->mdio.dev.of_node;
+ struct device_node *np;
int err;
/* Mask all interrupts */
@@ -286,7 +286,10 @@ static int xway_gphy_config_init(struct phy_device *phydev)
return err;
/* Use default LED configuration if 'leds' node isn't defined */
- if (!of_get_child_by_name(np, "leds"))
+ np = of_get_child_by_name(phydev->mdio.dev.of_node, "leds");
+ if (np)
+ of_node_put(np);
+ else
xway_gphy_init_leds(phydev);
/* Clear all pending interrupts */
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 84bef5099dda..47f095bd91ce 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -519,6 +519,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+ SFP_QUIRK_F("H-COM", "SPP425H-GAB4", sfp_fixup_potron),
+
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
// 2600MBd in their EERPOM
SFP_QUIRK_S("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 8b6d6a1b3c2e..2b4716ccf0c5 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -604,10 +604,6 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
- USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
- .driver_info = (unsigned long)&dm9601_info,
- },
- {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 36742e64cff7..9280ef544bbb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1821,9 +1821,12 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
net->flags |= IFF_NOARP;
- /* maybe the remote can't receive an Ethernet MTU */
- if (net->mtu > (dev->hard_mtu - net->hard_header_len))
- net->mtu = dev->hard_mtu - net->hard_header_len;
+ if (net->max_mtu > (dev->hard_mtu - net->hard_header_len))
+ net->max_mtu = dev->hard_mtu - net->hard_header_len;
+
+ if (net->mtu > net->max_mtu)
+ net->mtu = net->max_mtu;
+
} else if (!info->in || !info->out)
status = usbnet_get_endpoints(dev, udev);
else {
@@ -1984,6 +1987,7 @@ int usbnet_resume(struct usb_interface *intf)
} else {
netif_trans_update(dev->net);
__skb_queue_tail(&dev->txq, skb);
+ netdev_sent_queue(dev->net, skb->len);
}
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 14e6f2a2fb77..9982412fd7f2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -228,16 +228,20 @@ static void veth_get_ethtool_stats(struct net_device *dev,
const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
const void *base = (void *)&rq_stats->vs;
unsigned int start, tx_idx = idx;
+ u64 buf[VETH_TQ_STATS_LEN];
size_t offset;
- tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
do {
start = u64_stats_fetch_begin(&rq_stats->syncp);
for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
offset = veth_tq_stats_desc[j].offset;
- data[tx_idx + j] += *(u64 *)(base + offset);
+ buf[j] = *(u64 *)(base + offset);
}
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
+
+ tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++)
+ data[tx_idx + j] += buf[j];
}
pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 7bbda46cfd93..82f120ee1c66 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1727,8 +1727,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ ce_state->src_ring->base_addr_owner_space_unaligned,
+ ce_state->src_ring->base_addr_ce_space_unaligned);
kfree(ce_state->src_ring);
}
@@ -1737,8 +1737,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ ce_state->dest_ring->base_addr_owner_space_unaligned,
+ ce_state->dest_ring->base_addr_ce_space_unaligned);
kfree(ce_state->dest_ring);
}
@@ -1758,8 +1758,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
(ce_state->src_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ ce_state->src_ring->base_addr_owner_space_unaligned,
+ ce_state->src_ring->base_addr_ce_space_unaligned);
kfree(ce_state->src_ring);
}
@@ -1768,8 +1768,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ ce_state->dest_ring->base_addr_owner_space_unaligned,
+ ce_state->dest_ring->base_addr_ce_space_unaligned);
kfree(ce_state->dest_ring);
}
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index 9f9d2f2477c7..f13b260c5c96 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -686,8 +686,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->src_ring->base_addr_owner_space,
- pipe->src_ring->base_addr_ce_space);
+ pipe->src_ring->base_addr_owner_space_unaligned,
+ pipe->src_ring->base_addr_ce_space_unaligned);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
@@ -698,8 +698,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->dest_ring->base_addr_owner_space,
- pipe->dest_ring->base_addr_ce_space);
+ pipe->dest_ring->base_addr_owner_space_unaligned,
+ pipe->dest_ring->base_addr_ce_space_unaligned);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
@@ -711,8 +711,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->status_ring->base_addr_owner_space,
- pipe->status_ring->base_addr_ce_space);
+ pipe->status_ring->base_addr_owner_space_unaligned,
+ pipe->status_ring->base_addr_ce_space_unaligned);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 2f4daee9e2f0..5cd10752b22e 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -5606,7 +5606,8 @@ void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
for_each_set_bit(link_id, &links_map, ATH12K_NUM_MAX_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- if (!arvif || arvif->is_started)
+ if (!arvif || !arvif->is_created ||
+ arvif->ar->scan.arvif != arvif)
continue;
ar = arvif->ar;
@@ -12214,6 +12215,9 @@ void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (drop)
return;
+ for_each_ar(ah, ar, i)
+ wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
+
/* vif can be NULL when flush() is considered for hw */
if (!vif) {
for_each_ar(ah, ar, i)
@@ -12221,9 +12225,6 @@ void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
}
- for_each_ar(ah, ar, i)
- wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
-
ahvif = ath12k_vif_to_ahvif(vif);
links = ahvif->links_map;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
@@ -13448,7 +13449,7 @@ int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
ath12k_scan_abort(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
- wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+ wiphy_work_flush(hw->wiphy, &ar->scan.vdev_clean_wk);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/wifi7/hw.c b/drivers/net/wireless/ath/ath12k/wifi7/hw.c
index 1f5dda73230d..8ac06b2fc18f 100644
--- a/drivers/net/wireless/ath/ath12k/wifi7/hw.c
+++ b/drivers/net/wireless/ath/ath12k/wifi7/hw.c
@@ -705,7 +705,10 @@ static void ath12k_wifi7_mac_op_tx(struct ieee80211_hw *hw,
return;
}
} else {
- link_id = 0;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ link_id = ATH12K_FIRST_SCAN_LINK;
+ else
+ link_id = 0;
}
arvif = rcu_dereference(ahvif->link[link_id]);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 150b04d0a21c..17ffc4822741 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -6479,16 +6479,9 @@ static int freq_to_idx(struct ath12k *ar, int freq)
if (!sband)
continue;
- for (ch = 0; ch < sband->n_channels; ch++, idx++) {
- if (sband->channels[ch].center_freq <
- KHZ_TO_MHZ(ar->freq_range.start_freq) ||
- sband->channels[ch].center_freq >
- KHZ_TO_MHZ(ar->freq_range.end_freq))
- continue;
-
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
if (sband->channels[ch].center_freq == freq)
goto exit;
- }
}
exit:
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 354c5ce66045..f3397dc6c422 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -825,7 +825,7 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
bool coex_flag)
{
- u8 i;
+ u8 i, j;
u32 rx_win_size;
struct mwifiex_private *priv;
@@ -863,8 +863,8 @@ static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
if (rx_win_size != priv->add_ba_param.rx_win_size) {
if (!priv->media_connected)
continue;
- for (i = 0; i < MAX_NUM_TID; i++)
- mwifiex_11n_delba(priv, i);
+ for (j = 0; j < MAX_NUM_TID; j++)
+ mwifiex_11n_delba(priv, j);
}
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index f3a853edfc11..8c8e074a3a70 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -2035,6 +2035,7 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->queues = MAX_HW_QUEUES;
hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
+ hw->vif_data_size = sizeof(struct vif_priv);
hw->max_rates = 1;
hw->max_rate_tries = MAX_RETRIES;
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index cf6d3e2a007b..1d7e3ad900c1 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -78,9 +78,8 @@ struct mhi_mbim_context {
struct mbim_tx_hdr {
struct usb_cdc_ncm_nth16 nth16;
-
- /* Must be last as it ends in a flexible-array member. */
struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[2];
} __packed;
static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
@@ -109,20 +108,20 @@ static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
- DEFINE_RAW_FLEX(struct mbim_tx_hdr, mbim_hdr, ndp16.dpe16, 2);
unsigned int dgram_size = skb->len;
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
+ struct mbim_tx_hdr *mbim_hdr;
/* Only one NDP is sent, containing the IP packet (no aggregation) */
/* Ensure we have enough headroom for crafting MBIM header */
- if (skb_cow_head(skb, __struct_size(mbim_hdr))) {
+ if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
dev_kfree_skb_any(skb);
return NULL;
}
- mbim_hdr = skb_push(skb, __struct_size(mbim_hdr));
+ mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
/* Fill NTB header */
nth16 = &mbim_hdr->nth16;
@@ -135,11 +134,12 @@ static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
/* Fill the unique NDP */
ndp16 = &mbim_hdr->ndp16;
ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
- ndp16->wLength = cpu_to_le16(struct_size(ndp16, dpe16, 2));
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ + sizeof(struct usb_cdc_ncm_dpe16) * 2);
ndp16->wNextNdpIndex = 0;
/* Datagram follows the mbim header */
- ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(__struct_size(mbim_hdr));
+ ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
/* null termination */
@@ -585,8 +585,7 @@ static void mhi_mbim_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
ndev->type = ARPHRD_RAWIP;
- ndev->needed_headroom =
- struct_size_t(struct mbim_tx_hdr, ndp16.dpe16, 2);
+ ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 9ef8ef2d4363..b957fce83b7c 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -125,10 +125,6 @@ static ssize_t virtual_ncidev_write(struct file *file,
kfree_skb(skb);
return -EFAULT;
}
- if (strnlen(skb->data, count) != count) {
- kfree_skb(skb);
- return -EINVAL;
- }
nci_recv_frame(vdev->ndev, skb);
return count;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 15b3d07f8ccd..ed61b97fde59 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1704,6 +1704,7 @@ static const struct apple_nvme_hw apple_nvme_t8103_hw = {
static const struct of_device_id apple_nvme_of_match[] = {
{ .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
+ { .compatible = "apple,t8103-nvme-ans2", .data = &apple_nvme_t8103_hw },
{ .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
{},
};
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index bc455fa98246..6948de3f438a 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3587,6 +3587,8 @@ fail_ctrl:
ctrl->ctrl.opts = NULL;
+ if (ctrl->ctrl.admin_tagset)
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0e4caeab739c..58f3097888a7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1532,7 +1532,10 @@ static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
}
writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
- nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE);
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
+ !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
+ goto unlock;
/*
* Read controller status to flush the previous write and trigger a
@@ -3999,6 +4002,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1fa0, 0x2283), /* Wodposit WPBSNM8-256GTP */
+ .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
{ PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 96648ec2fadb..67c423a8b052 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -150,7 +150,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
- memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/* use fabric id-ctrl values */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 15416ff0eac4..549a4786d1c3 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -982,6 +982,18 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
goto err_proto;
}
+ /*
+ * Ensure command data structures are initialized. We must check both
+ * cmd->req.sg and cmd->iov because they can have different NULL states:
+ * - Uninitialized commands: both NULL
+ * - READ commands: cmd->req.sg allocated, cmd->iov NULL
+ * - WRITE commands: both allocated
+ */
+ if (unlikely(!cmd->req.sg || !cmd->iov)) {
+ pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n",
+ queue->idx, data->ttag);
+ goto err_proto;
+ }
cmd->pdu_recv = 0;
nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
@@ -1992,14 +2004,13 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
trace_sk_data_ready(sk);
+ if (sk->sk_state != TCP_LISTEN)
+ return;
+
read_lock_bh(&sk->sk_callback_lock);
port = sk->sk_user_data;
- if (!port)
- goto out;
-
- if (sk->sk_state == TCP_LISTEN)
+ if (port)
queue_work(nvmet_wq, &port->accept_work);
-out:
read_unlock_bh(&sk->sk_callback_lock);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0b65039ece53..57420806c1a2 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1942,13 +1942,17 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
end--;
len = end - start;
- if (kstrtoint(end, 10, &id) < 0)
+ if (kstrtoint(end, 10, &id) < 0) {
+ of_node_put(np);
continue;
+ }
/* Allocate an alias_prop with enough space for the stem */
ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
- if (!ap)
+ if (!ap) {
+ of_node_put(np);
continue;
+ }
memset(ap, 0, sizeof(*ap) + len + 1);
ap->alias = start;
of_alias_add(ap, np, id, start, len);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f77cb19973a5..a6dca3a005aa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -569,7 +569,7 @@ static int __init of_platform_default_populate_init(void)
node = of_find_node_by_path("/firmware");
if (node) {
- of_platform_populate(node, NULL, NULL, NULL);
+ of_platform_default_populate(node, NULL, NULL);
of_node_put(node);
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 00b0210e1f1d..e3f848ffb52a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -225,12 +225,6 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
- Enabling this option will reduce the entropy of x86 KASLR memory
- regions. For example - on a 46 bit system, the entropy goes down
- from 16 bits to 15 bits. The actual reduction in entropy depends
- on the physical address bits, on processor features, kernel config
- (5 level page table) and physical memory present on the system.
-
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 9f995e156f75..6e56498d0644 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -203,7 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
usb3->dev = dev;
usb3->mdiodev = mdiodev;
- usb3->family = (enum bcm_ns_family)device_get_match_data(dev);
+ usb3->family = (unsigned long)device_get_match_data(dev);
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
err = of_address_to_resource(syscon_np, 0, &res);
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index 68fcc8114d75..7f5600103a00 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -89,7 +89,8 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
writel(imx8_phy->tx_deemph_gen2,
imx8_phy->base + PCIE_PHY_TRSV_REG6);
break;
- case IMX8MP: /* Do nothing. */
+ case IMX8MP:
+ reset_control_assert(imx8_phy->reset);
break;
}
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index ad8a55012e42..91b3e62743d3 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -126,8 +126,6 @@ struct imx8mq_usb_phy {
static void tca_blk_orientation_set(struct tca_blk *tca,
enum typec_orientation orientation);
-#ifdef CONFIG_TYPEC
-
static int tca_blk_typec_switch_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
@@ -175,18 +173,6 @@ static void tca_blk_put_typec_switch(struct typec_switch_dev *sw)
typec_switch_unregister(sw);
}
-#else
-
-static struct typec_switch_dev *tca_blk_get_typec_switch(struct platform_device *pdev,
- struct imx8mq_usb_phy *imx_phy)
-{
- return NULL;
-}
-
-static void tca_blk_put_typec_switch(struct typec_switch_dev *sw) {}
-
-#endif /* CONFIG_TYPEC */
-
static void tca_blk_orientation_set(struct tca_blk *tca,
enum typec_orientation orientation)
{
@@ -504,6 +490,7 @@ static void imx8m_phy_tune(struct imx8mq_usb_phy *imx_phy)
if (imx_phy->pcs_tx_swing_full != PHY_TUNE_DEFAULT) {
value = readl(imx_phy->base + PHY_CTRL5);
+ value &= ~PHY_CTRL5_PCS_TX_SWING_FULL_MASK;
value |= FIELD_PREP(PHY_CTRL5_PCS_TX_SWING_FULL_MASK,
imx_phy->pcs_tx_swing_full);
writel(value, imx_phy->base + PHY_CTRL5);
diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig
index 2f0045e874ac..2e6d1224711e 100644
--- a/drivers/phy/microchip/Kconfig
+++ b/drivers/phy/microchip/Kconfig
@@ -6,7 +6,7 @@
config PHY_SPARX5_SERDES
tristate "Microchip Sparx5 SerDes PHY driver"
select GENERIC_PHY
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || ARCH_LAN969X || COMPILE_TEST
depends on OF
depends on HAS_IOMEM
help
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index b5514a32ff8f..eb93015be841 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -1093,29 +1093,29 @@ static int qusb2_phy_probe(struct platform_device *pdev)
or->hsdisc_trim.override = true;
}
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ dev_set_drvdata(dev, qphy);
+
/*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
+ * Enable runtime PM support, but forbid it by default.
+ * Users can allow it again via the power/control attribute in sysfs.
*/
+ pm_runtime_set_active(dev);
pm_runtime_forbid(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
generic_phy = devm_phy_create(dev, NULL, &qusb2_phy_gen_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create phy, %d\n", ret);
- pm_runtime_disable(dev);
return ret;
}
qphy->phy = generic_phy;
- dev_set_drvdata(dev, qphy);
phy_set_drvdata(generic_phy, qphy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
}
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index b0f23690ec30..8f4c08e599aa 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -821,17 +821,20 @@ static void rockchip_chg_detect_work(struct work_struct *work)
container_of(work, struct rockchip_usb2phy_port, chg_work.work);
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
struct regmap *base = get_reg_base(rphy);
- bool is_dcd, tmout, vout;
+ bool is_dcd, tmout, vout, vbus_attach;
unsigned long delay;
+ vbus_attach = property_enabled(rphy->grf, &rport->port_cfg->utmi_bvalid);
+
dev_dbg(&rport->phy->dev, "chg detection work state = %d\n",
rphy->chg_state);
switch (rphy->chg_state) {
case USB_CHG_STATE_UNDEFINED:
- if (!rport->suspended)
+ if (!rport->suspended && !vbus_attach)
rockchip_usb2phy_power_off(rport->phy);
/* put the controller in non-driving mode */
- property_enable(base, &rphy->phy_cfg->chg_det.opmode, false);
+ if (!vbus_attach)
+ property_enable(base, &rphy->phy_cfg->chg_det.opmode, false);
/* Start DCD processing stage 1 */
rockchip_chg_enable_dcd(rphy, true);
rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
@@ -894,7 +897,8 @@ static void rockchip_chg_detect_work(struct work_struct *work)
fallthrough;
case USB_CHG_STATE_DETECTED:
/* put the controller in normal mode */
- property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
+ if (!vbus_attach)
+ property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
dev_dbg(&rport->phy->dev, "charger = %s\n",
chg_to_string(rphy->chg_type));
@@ -1491,7 +1495,7 @@ next_child:
rphy);
if (ret) {
dev_err_probe(rphy->dev, ret, "failed to request usb2phy irq handle\n");
- goto put_child;
+ return ret;
}
}
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 27fe92f73f33..b44afbff8616 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -712,7 +712,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(child, "reg", &index);
- if (ret || index > usbphyc->nphys) {
+ if (ret || index >= usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
if (!ret)
ret = -EINVAL;
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index e818f6c3980e..bec9616c4a2e 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -84,6 +84,7 @@
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
#define BIAS_PAD_PD BIT(11)
#define HS_SQUELCH_LEVEL(x) (((x) & 0x7) << 0)
+#define HS_DISCON_LEVEL(x) (((x) & 0x7) << 3)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x288
#define USB2_TRK_START_TIMER(x) (((x) & 0x7f) << 12)
@@ -623,6 +624,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
value &= ~BIAS_PAD_PD;
value &= ~HS_SQUELCH_LEVEL(~0);
value |= HS_SQUELCH_LEVEL(priv->calib.hs_squelch);
+ value &= ~HS_DISCON_LEVEL(~0);
+ value |= HS_DISCON_LEVEL(0x7);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
udelay(1);
diff --git a/drivers/phy/ti/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c
index 1d81a1e6ec6b..62fa6f89c0e6 100644
--- a/drivers/phy/ti/phy-da8xx-usb.c
+++ b/drivers/phy/ti/phy-da8xx-usb.c
@@ -180,6 +180,7 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
struct da8xx_usb_phy_platform_data *pdata = dev->platform_data;
struct device_node *node = dev->of_node;
struct da8xx_usb_phy *d_phy;
+ int ret;
d_phy = devm_kzalloc(dev, sizeof(*d_phy), GFP_KERNEL);
if (!d_phy)
@@ -233,8 +234,6 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(d_phy->phy_provider);
}
} else {
- int ret;
-
ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy",
"ohci-da8xx");
if (ret)
@@ -249,7 +248,9 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
PHY_INIT_BITS, PHY_INIT_BITS);
pm_runtime_set_active(dev);
- devm_pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* Prevent runtime pm from being ON by default. Users can enable
* it using power/control in sysfs.
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 6cfe2538d15b..6213c2b6005a 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -512,7 +512,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(base),
"failed to get base memory resource\n");
- priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg);
+ priv->regmap = devm_regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg);
if (IS_ERR(priv->regmap))
return dev_err_probe(dev, PTR_ERR(priv->regmap),
"Failed to get syscon\n");
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index a8b37037c6fe..19849703be4a 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -246,6 +246,8 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
[SC8280XP_MMCX_AO] = &mmcx_ao,
[SC8280XP_MX] = &mx,
[SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_MXC] = &mxc,
+ [SC8280XP_MXC_AO] = &mxc_ao,
[SC8280XP_NSP] = &nsp,
};
@@ -700,6 +702,8 @@ static struct rpmhpd *sc8280xp_rpmhpds[] = {
[SC8280XP_MMCX_AO] = &mmcx_ao,
[SC8280XP_MX] = &mx,
[SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_MXC] = &mxc,
+ [SC8280XP_MXC_AO] = &mxc_ao,
[SC8280XP_NSP] = &nsp,
[SC8280XP_QPHY] = &qphy,
};
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index cd06229db394..ec8731515333 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -2295,8 +2295,9 @@ static long pwm_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long ar
.duty_offset_ns = wf.duty_offset_ns,
};
- return copy_to_user((struct pwmchip_waveform __user *)arg,
- &cwf, sizeof(cwf));
+ ret = copy_to_user((struct pwmchip_waveform __user *)arg,
+ &cwf, sizeof(cwf));
+ return ret ? -EFAULT : 0;
}
case PWM_IOCTL_GETWF:
@@ -2329,8 +2330,9 @@ static long pwm_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long ar
.duty_offset_ns = wf.duty_offset_ns,
};
- return copy_to_user((struct pwmchip_waveform __user *)arg,
- &cwf, sizeof(cwf));
+ ret = copy_to_user((struct pwmchip_waveform __user *)arg,
+ &cwf, sizeof(cwf));
+ return ret ? -EFAULT : 0;
}
case PWM_IOCTL_SETROUNDEDWF:
diff --git a/drivers/pwm/pwm-max7360.c b/drivers/pwm/pwm-max7360.c
index 16261958ce7f..732969303dd7 100644
--- a/drivers/pwm/pwm-max7360.c
+++ b/drivers/pwm/pwm-max7360.c
@@ -153,6 +153,7 @@ static int max7360_pwm_read_waveform(struct pwm_chip *chip,
}
static const struct pwm_ops max7360_pwm_ops = {
+ .sizeof_wfhw = sizeof(struct max7360_pwm_waveform),
.request = max7360_pwm_request,
.round_waveform_tohw = max7360_pwm_round_waveform_tohw,
.round_waveform_fromhw = max7360_pwm_round_waveform_fromhw,
diff --git a/drivers/resctrl/mpam_internal.h b/drivers/resctrl/mpam_internal.h
index e79c3c47259c..e8971842b124 100644
--- a/drivers/resctrl/mpam_internal.h
+++ b/drivers/resctrl/mpam_internal.h
@@ -12,7 +12,6 @@
#include <linux/jump_label.h>
#include <linux/llist.h>
#include <linux/mutex.h>
-#include <linux/srcu.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/types.h>
@@ -201,8 +200,12 @@ struct mpam_props {
} PACKED_FOR_KUNIT;
#define mpam_has_feature(_feat, x) test_bit(_feat, (x)->features)
-#define mpam_set_feature(_feat, x) set_bit(_feat, (x)->features)
-#define mpam_clear_feature(_feat, x) clear_bit(_feat, (x)->features)
+/*
+ * The non-atomic get/set operations are used because if struct mpam_props is
+ * packed, the alignment requirements for atomics aren't met.
+ */
+#define mpam_set_feature(_feat, x) __set_bit(_feat, (x)->features)
+#define mpam_clear_feature(_feat, x) __clear_bit(_feat, (x)->features)
/* The values for MSMON_CFG_MBWU_FLT.RWBW */
enum mon_filter_options {
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 91e70cb46fb5..5c67c13e5735 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -105,7 +105,7 @@ static int sdw_drv_probe(struct device *dev)
if (ret)
return ret;
- ret = ida_alloc_max(&slave->bus->slave_ida, SDW_FW_MAX_DEVICES, GFP_KERNEL);
+ ret = ida_alloc_max(&slave->bus->slave_ida, SDW_FW_MAX_DEVICES - 1, GFP_KERNEL);
if (ret < 0) {
dev_err(dev, "Failed to allocated ID: %d\n", ret);
return ret;
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 3d4d00188c26..d933cebad52b 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -23,6 +23,7 @@ const struct device_type sdw_slave_type = {
.release = sdw_slave_release,
.uevent = sdw_slave_uevent,
};
+EXPORT_SYMBOL_GPL(sdw_slave_type);
int sdw_slave_add(struct sdw_bus *bus,
struct sdw_slave_id *id, struct fwnode_handle *fwnode)
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index baf5bc844b6f..2bb1ceb9d621 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1040,6 +1040,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
__u8 cap_type;
int ret;
+ if (dev->quirks & USB_QUIRK_NO_BOS) {
+ dev_dbg(ddev, "skipping BOS descriptor\n");
+ return -ENOMSG;
+ }
+
bos = kzalloc(sizeof(*bos), GFP_KERNEL);
if (!bos)
return -ENOMEM;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 47f589c4104a..c4d85089d19b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -450,6 +450,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Elgato 4K X - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x0fd9, 0x009b), .driver_info = USB_QUIRK_NO_BOS },
+
/* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
{ USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ec8407972b9d..93fd5fdf95cb 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -993,6 +993,8 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
dwc->ip = DWC3_GSNPS_ID(reg);
+ if (dwc->ip == DWC4_IP)
+ dwc->ip = DWC32_IP;
/* This should read as U3 followed by revision number */
if (DWC3_IP_IS(DWC3)) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index a5fc92c4ffa3..45757169b672 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1265,6 +1265,7 @@ struct dwc3 {
#define DWC3_IP 0x5533
#define DWC31_IP 0x3331
#define DWC32_IP 0x3332
+#define DWC4_IP 0x3430
u32 revision;
diff --git a/drivers/usb/dwc3/dwc3-apple.c b/drivers/usb/dwc3/dwc3-apple.c
index cc47cad232e3..40c3ccfddb67 100644
--- a/drivers/usb/dwc3/dwc3-apple.c
+++ b/drivers/usb/dwc3/dwc3-apple.c
@@ -218,25 +218,31 @@ static int dwc3_apple_core_init(struct dwc3_apple *appledwc)
return ret;
}
-static void dwc3_apple_phy_set_mode(struct dwc3_apple *appledwc, enum phy_mode mode)
-{
- lockdep_assert_held(&appledwc->lock);
-
- /*
- * This platform requires SUSPHY to be enabled here already in order to properly configure
- * the PHY and switch dwc3's PIPE interface to USB3 PHY.
- */
- dwc3_enable_susphy(&appledwc->dwc, true);
- phy_set_mode(appledwc->dwc.usb2_generic_phy[0], mode);
- phy_set_mode(appledwc->dwc.usb3_generic_phy[0], mode);
-}
-
static int dwc3_apple_init(struct dwc3_apple *appledwc, enum dwc3_apple_state state)
{
int ret, ret_reset;
lockdep_assert_held(&appledwc->lock);
+ /*
+ * The USB2 PHY on this platform must be configured for host or device mode while it is
+ * still powered off and before dwc3 tries to access it. Otherwise, the new configuration
+ * will sometimes only take affect after the *next* time dwc3 is brought up which causes
+ * the connected device to just not work.
+ * The USB3 PHY must be configured later after dwc3 has already been initialized.
+ */
+ switch (state) {
+ case DWC3_APPLE_HOST:
+ phy_set_mode(appledwc->dwc.usb2_generic_phy[0], PHY_MODE_USB_HOST);
+ break;
+ case DWC3_APPLE_DEVICE:
+ phy_set_mode(appledwc->dwc.usb2_generic_phy[0], PHY_MODE_USB_DEVICE);
+ break;
+ default:
+ /* Unreachable unless there's a bug in this driver */
+ return -EINVAL;
+ }
+
ret = reset_control_deassert(appledwc->reset);
if (ret) {
dev_err(appledwc->dev, "Failed to deassert reset, err=%d\n", ret);
@@ -257,7 +263,13 @@ static int dwc3_apple_init(struct dwc3_apple *appledwc, enum dwc3_apple_state st
case DWC3_APPLE_HOST:
appledwc->dwc.dr_mode = USB_DR_MODE_HOST;
dwc3_apple_set_ptrcap(appledwc, DWC3_GCTL_PRTCAP_HOST);
- dwc3_apple_phy_set_mode(appledwc, PHY_MODE_USB_HOST);
+ /*
+ * This platform requires SUSPHY to be enabled here already in order to properly
+ * configure the PHY and switch dwc3's PIPE interface to USB3 PHY. The USB2 PHY
+ * has already been configured to the correct mode earlier.
+ */
+ dwc3_enable_susphy(&appledwc->dwc, true);
+ phy_set_mode(appledwc->dwc.usb3_generic_phy[0], PHY_MODE_USB_HOST);
ret = dwc3_host_init(&appledwc->dwc);
if (ret) {
dev_err(appledwc->dev, "Failed to initialize host, ret=%d\n", ret);
@@ -268,7 +280,13 @@ static int dwc3_apple_init(struct dwc3_apple *appledwc, enum dwc3_apple_state st
case DWC3_APPLE_DEVICE:
appledwc->dwc.dr_mode = USB_DR_MODE_PERIPHERAL;
dwc3_apple_set_ptrcap(appledwc, DWC3_GCTL_PRTCAP_DEVICE);
- dwc3_apple_phy_set_mode(appledwc, PHY_MODE_USB_DEVICE);
+ /*
+ * This platform requires SUSPHY to be enabled here already in order to properly
+ * configure the PHY and switch dwc3's PIPE interface to USB3 PHY. The USB2 PHY
+ * has already been configured to the correct mode earlier.
+ */
+ dwc3_enable_susphy(&appledwc->dwc, true);
+ phy_set_mode(appledwc->dwc.usb3_generic_phy[0], PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(&appledwc->dwc);
if (ret) {
dev_err(appledwc->dev, "Failed to initialize gadget, ret=%d\n", ret);
@@ -340,6 +358,22 @@ static int dwc3_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role ro
guard(mutex)(&appledwc->lock);
/*
+ * Skip role switches if appledwc is already in the desired state. The
+ * USB-C port controller on M2 and M1/M2 Pro/Max/Ultra devices issues
+ * additional interrupts which results in usb_role_switch_set_role()
+ * calls with the current role.
+ * Ignore those calls here to ensure the USB-C port controller and
+ * appledwc are in a consistent state.
+ * This matches the behaviour in __dwc3_set_mode().
+ * Do no handle USB_ROLE_NONE for DWC3_APPLE_NO_CABLE and
+ * DWC3_APPLE_PROBE_PENDING since that is no-op anyway.
+ */
+ if (appledwc->state == DWC3_APPLE_HOST && role == USB_ROLE_HOST)
+ return 0;
+ if (appledwc->state == DWC3_APPLE_DEVICE && role == USB_ROLE_DEVICE)
+ return 0;
+
+ /*
* We need to tear all of dwc3 down and re-initialize it every time a cable is
* connected or disconnected or when the mode changes. See the documentation for enum
* dwc3_apple_state for details.
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index aa6ab666741a..a96476507d2f 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -362,6 +362,10 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
return ret;
usb_ep_enable(uvc->video.ep);
+ uvc->video.max_req_size = uvc->video.ep->maxpacket
+ * max_t(unsigned int, uvc->video.ep->maxburst, 1)
+ * (uvc->video.ep->mult);
+
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMON;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 9e79cbe50715..676419a04976 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -107,7 +107,7 @@ struct uvc_video {
unsigned int width;
unsigned int height;
unsigned int imagesize;
- unsigned int interval;
+ unsigned int interval; /* in 100ns units */
struct mutex mutex; /* protects frame parameters */
unsigned int uvc_num_requests;
@@ -117,6 +117,7 @@ struct uvc_video {
/* Requests */
bool is_enabled; /* tracks whether video stream is enabled */
unsigned int req_size;
+ unsigned int max_req_size;
struct list_head ureqs; /* all uvc_requests allocated by uvc_video */
/* USB requests that the video pump thread can encode into */
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 9a1bbd79ff5a..586e5524c171 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -86,10 +86,17 @@ static int uvc_buffer_prepare(struct vb2_buffer *vb)
buf->bytesused = 0;
} else {
buf->bytesused = vb2_get_plane_payload(vb, 0);
- buf->req_payload_size =
- DIV_ROUND_UP(buf->bytesused +
- (video->reqs_per_frame * UVCG_REQUEST_HEADER_LEN),
- video->reqs_per_frame);
+
+ if (video->reqs_per_frame != 0) {
+ buf->req_payload_size =
+ DIV_ROUND_UP(buf->bytesused +
+ (video->reqs_per_frame * UVCG_REQUEST_HEADER_LEN),
+ video->reqs_per_frame);
+ if (buf->req_payload_size > video->req_size)
+ buf->req_payload_size = video->req_size;
+ } else {
+ buf->req_payload_size = video->max_req_size;
+ }
}
return 0;
@@ -175,7 +182,15 @@ int uvcg_alloc_buffers(struct uvc_video_queue *queue,
{
int ret;
+retry:
ret = vb2_reqbufs(&queue->queue, rb);
+ if (ret < 0 && queue->use_sg) {
+ uvc_trace(UVC_TRACE_IOCTL,
+ "failed to alloc buffer with sg enabled, try non-sg mode\n");
+ queue->use_sg = 0;
+ queue->queue.mem_ops = &vb2_vmalloc_memops;
+ goto retry;
+ }
return ret ? ret : rb->count;
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index fb77b0b21790..f568dee08b3b 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -499,13 +499,11 @@ uvc_video_prep_requests(struct uvc_video *video)
{
struct uvc_device *uvc = container_of(video, struct uvc_device, video);
struct usb_composite_dev *cdev = uvc->func.config->cdev;
- unsigned int interval_duration = video->ep->desc->bInterval * 1250;
+ unsigned int interval_duration;
unsigned int max_req_size, req_size, header_size;
unsigned int nreq;
- max_req_size = video->ep->maxpacket
- * max_t(unsigned int, video->ep->maxburst, 1)
- * (video->ep->mult);
+ max_req_size = video->max_req_size;
if (!usb_endpoint_xfer_isoc(video->ep->desc)) {
video->req_size = max_req_size;
@@ -515,8 +513,11 @@ uvc_video_prep_requests(struct uvc_video *video)
return;
}
+ interval_duration = 2 << (video->ep->desc->bInterval - 1);
if (cdev->gadget->speed < USB_SPEED_HIGH)
- interval_duration = video->ep->desc->bInterval * 10000;
+ interval_duration *= 10000;
+ else
+ interval_duration *= 1250;
nreq = DIV_ROUND_UP(video->interval, interval_duration);
@@ -837,7 +838,6 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
video->interval = 666666;
/* Initialize the video buffers queue. */
- uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
+ return uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
- return 0;
}
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 2e4bb5cc2165..c801527d5bd2 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -392,3 +392,4 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_AUTHOR("Alan Stern");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: ehci_platform");
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 5e02f2ceafb6..f4419d4526c4 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -211,3 +211,4 @@ static struct platform_driver uhci_platform_driver = {
.of_match_table = platform_uhci_ids,
},
};
+MODULE_SOFTDEP("pre: ehci_platform");
diff --git a/drivers/usb/host/xhci-sideband.c b/drivers/usb/host/xhci-sideband.c
index a85f62a73313..2bd77255032b 100644
--- a/drivers/usb/host/xhci-sideband.c
+++ b/drivers/usb/host/xhci-sideband.c
@@ -210,7 +210,6 @@ xhci_sideband_remove_endpoint(struct xhci_sideband *sb,
return -ENODEV;
__xhci_sideband_remove_endpoint(sb, ep);
- xhci_initialize_ring_info(ep->ring);
return 0;
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 31ccced5125e..8b492871d21d 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1563,7 +1563,7 @@ static int tegra_xusb_setup_wakeup(struct platform_device *pdev, struct tegra_xu
for (i = 0; i < tegra->soc->max_num_wakes; i++) {
struct irq_data *data;
- tegra->wake_irqs[i] = platform_get_irq(pdev, i + WAKE_IRQ_START_INDEX);
+ tegra->wake_irqs[i] = platform_get_irq_optional(pdev, i + WAKE_IRQ_START_INDEX);
if (tegra->wake_irqs[i] < 0)
break;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 02c9bfe21ae2..b3ba16b9718c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2898,16 +2898,25 @@ int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int
gfp_t gfp_flags)
{
struct xhci_command *command;
+ struct xhci_ep_ctx *ep_ctx;
unsigned long flags;
- int ret;
+ int ret = -ENODEV;
command = xhci_alloc_command(xhci, true, gfp_flags);
if (!command)
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
- ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
- ep->ep_index, suspend);
+
+ /* make sure endpoint exists and is running before stopping it */
+ if (ep->ring) {
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING)
+ ret = xhci_queue_stop_endpoint(xhci, command,
+ ep->vdev->slot_id,
+ ep->ep_index, suspend);
+ }
+
if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
goto out;
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 530b77fc2f78..9262a2ac97f5 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -70,7 +70,6 @@ MODULE_DEVICE_TABLE(usb, combined_id_table);
#define F81232_REGISTER_REQUEST 0xa0
#define F81232_GET_REGISTER 0xc0
#define F81232_SET_REGISTER 0x40
-#define F81534A_ACCESS_REG_RETRY 2
#define SERIAL_BASE_ADDRESS 0x0120
#define RECEIVE_BUFFER_REGISTER (0x00 + SERIAL_BASE_ADDRESS)
@@ -824,36 +823,31 @@ static void f81232_lsr_worker(struct work_struct *work)
static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
u16 size, void *val)
{
- struct usb_device *dev = interface_to_usbdev(intf);
- int retry = F81534A_ACCESS_REG_RETRY;
- int status;
-
- while (retry--) {
- status = usb_control_msg_send(dev,
- 0,
- F81232_REGISTER_REQUEST,
- F81232_SET_REGISTER,
- reg,
- 0,
- val,
- size,
- USB_CTRL_SET_TIMEOUT,
- GFP_KERNEL);
- if (status) {
- status = usb_translate_errors(status);
- if (status == -EIO)
- continue;
- }
-
- break;
- }
-
- if (status) {
- dev_err(&intf->dev, "failed to set register 0x%x: %d\n",
- reg, status);
- }
+ return usb_control_msg_send(interface_to_usbdev(intf),
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_SET_REGISTER,
+ reg,
+ 0,
+ val,
+ size,
+ USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
+}
- return status;
+static int f81534a_ctrl_get_register(struct usb_interface *intf, u16 reg,
+ u16 size, void *val)
+{
+ return usb_control_msg_recv(interface_to_usbdev(intf),
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_GET_REGISTER,
+ reg,
+ 0,
+ val,
+ size,
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
}
static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en)
@@ -869,6 +863,29 @@ static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en)
* bit 0~11 : Serial port enable bit.
*/
if (en) {
+ /*
+ * The Fintek F81532A/534A/535/536 family relies on the
+ * F81534A_CTRL_CMD_ENABLE_PORT (116h) register during
+ * initialization to both determine serial port status and
+ * control port creation.
+ *
+ * If the driver experiences fast load/unload cycles, the
+ * device state may becomes unstable, resulting in the
+ * incomplete generation of serial ports.
+ *
+ * Performing a dummy read operation on the register prior
+ * to the initial write command resolves the issue.
+ *
+ * This clears the device's stale internal state. Subsequent
+ * write operations will correctly generate all serial ports.
+ */
+ status = f81534a_ctrl_get_register(intf,
+ F81534A_CTRL_CMD_ENABLE_PORT,
+ sizeof(enable),
+ enable);
+ if (status)
+ return status;
+
enable[0] = 0xff;
enable[1] = 0x8f;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index fe2f21d85737..acb48b1c83f7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -848,6 +848,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID, 1) },
+ { USB_DEVICE(FTDI_VID, FTDI_AXE027_PID) },
{ USB_DEVICE_INTERFACE_NUMBER(FTDI_VID, FTDI_TURTELIZER_PID, 1) },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 2539b9e2f712..6c76cfebfd0e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -96,6 +96,8 @@
#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
#define LMI_LM3S_ICDI_BOARD_PID 0xbcda
+#define FTDI_AXE027_PID 0xBD90 /* PICAXE AXE027 USB download cable */
+
#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
/* OpenDCC (www.opendcc.de) product id */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 4c0e5a3ab557..9f2cc5fb9f45 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1505,6 +1505,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1252, 0xff) }, /* Telit LE910Cx (MBIM) */
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4ca2746ce16b..be49a976428f 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7890,7 +7890,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
- if (!port->role_sw)
+ if (IS_ERR_OR_NULL(port->role_sw))
port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 4438637c8900..6d6fc85835d4 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -115,6 +115,10 @@ config BTRFS_EXPERIMENTAL
- extent tree v2 - complex rework of extent tracking
- - large folio support
+ - large folio and block size (> page size) support
+
+ - shutdown ioctl and auto-degradation support
+
+ - asynchronous checksum generation for data writes
If unsure, say N.
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d8ca5b6e88e0..89022e9f393b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1661,7 +1661,7 @@ static void backup_super_roots(struct btrfs_fs_info *info)
btrfs_set_backup_chunk_root_level(root_backup,
btrfs_header_level(info->chunk_root->node));
- if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
+ if (!btrfs_fs_incompat(info, EXTENT_TREE_V2)) {
struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
@@ -3255,6 +3255,15 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
return 0;
}
+static bool fs_is_full_ro(const struct btrfs_fs_info *fs_info)
+{
+ if (!sb_rdonly(fs_info->sb))
+ return false;
+ if (unlikely(fs_info->mount_opt & BTRFS_MOUNT_FULL_RO_MASK))
+ return true;
+ return false;
+}
+
int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
{
u32 sectorsize;
@@ -3363,6 +3372,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
+ /* If the fs has any rescue options, no transaction is allowed. */
+ if (fs_is_full_ro(fs_info))
+ WRITE_ONCE(fs_info->fs_error, -EROFS);
+
/* Set up fs_info before parsing mount options */
nodesize = btrfs_super_nodesize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
@@ -3489,6 +3502,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
+ if (unlikely(btrfs_verify_dev_items(fs_info))) {
+ ret = -EUCLEAN;
+ goto fail_block_groups;
+ }
ret = btrfs_verify_dev_extents(fs_info);
if (ret) {
btrfs_err(fs_info,
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 0f7e1ef27891..8ffbc40ebe45 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -264,6 +264,14 @@ enum {
BTRFS_MOUNT_REF_TRACKER = (1ULL << 33),
};
+/* These mount options require a full read-only fs, no new transaction is allowed. */
+#define BTRFS_MOUNT_FULL_RO_MASK \
+ (BTRFS_MOUNT_NOLOGREPLAY | \
+ BTRFS_MOUNT_IGNOREBADROOTS | \
+ BTRFS_MOUNT_IGNOREDATACSUMS | \
+ BTRFS_MOUNT_IGNOREMETACSUMS | \
+ BTRFS_MOUNT_IGNORESUPERFLAGS)
+
/*
* Compat flags that we support. If any incompat flags are set other than the
* ones specified below then we will fail to mount
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d2b302ac6af9..a2b5b440637e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4180,6 +4180,15 @@ cache_acl:
return 0;
out:
+ /*
+ * We may have a read locked leaf and iget_failed() triggers inode
+ * eviction which needs to release the delayed inode and that needs
+ * to lock the delayed inode's mutex. This can cause a ABBA deadlock
+ * with a task running delayed items, as that require first locking
+ * the delayed inode's mutex and then modifying its subvolume btree.
+ * So release the path before iget_failed().
+ */
+ btrfs_release_path(path);
iget_failed(vfs_inode);
return ret;
}
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index b5fe95baf92e..58dc3e5057ce 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -705,7 +705,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
struct inode *src = file_inode(file_src);
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
int ret;
- int wb_ret;
u64 len = olen;
u64 bs = fs_info->sectorsize;
u64 end;
@@ -750,25 +749,29 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+ if (ret < 0)
+ return ret;
/*
* We may have copied an inline extent into a page of the destination
- * range, so wait for writeback to complete before truncating pages
+ * range, so wait for writeback to complete before invalidating pages
* from the page cache. This is a rare case.
*/
- wb_ret = btrfs_wait_ordered_range(BTRFS_I(inode), destoff, len);
- ret = ret ? ret : wb_ret;
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), destoff, len);
+ if (ret < 0)
+ return ret;
+
/*
- * Truncate page cache pages so that future reads will see the cloned
- * data immediately and not the previous data.
+ * Invalidate page cache so that future reads will see the cloned data
+ * immediately and not the previous data.
*/
- truncate_inode_pages_range(&inode->i_data,
- round_down(destoff, PAGE_SIZE),
- round_up(destoff + len, PAGE_SIZE) - 1);
+ ret = filemap_invalidate_inode(inode, false, destoff, end);
+ if (ret < 0)
+ return ret;
btrfs_btree_balance_dirty(fs_info);
- return ret;
+ return 0;
}
static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 2522faa97478..d8127a7120c2 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6383,6 +6383,8 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
extent_end = btrfs_file_extent_end(path);
if (extent_end <= start)
goto next;
+ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE)
+ return 0;
if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
search_start = extent_end;
goto next;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 6babbe333741..3f08e450f796 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -306,18 +306,22 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
0);
if (ret)
- return ret;
+ goto out_free;
}
ret = btrfs_sysfs_add_space_info_type(space_info);
if (ret)
- return ret;
+ goto out_free;
list_add(&space_info->list, &info->space_info);
if (flags & BTRFS_BLOCK_GROUP_DATA)
info->data_sinfo = space_info;
return ret;
+
+out_free:
+ kfree(space_info);
+ return ret;
}
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 1f64c132b387..4b3c2acac51a 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -26,7 +26,6 @@
#include "misc.h"
#include "fs.h"
#include "accessors.h"
-#include "zoned.h"
/*
* Structure name Path
@@ -1189,56 +1188,6 @@ static ssize_t btrfs_commit_stats_store(struct kobject *kobj,
}
BTRFS_ATTR_RW(, commit_stats, btrfs_commit_stats_show, btrfs_commit_stats_store);
-static ssize_t btrfs_zoned_stats_show(struct kobject *kobj,
- struct kobj_attribute *a, char *buf)
-{
- struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- struct btrfs_block_group *bg;
- size_t ret = 0;
-
-
- if (!btrfs_is_zoned(fs_info))
- return ret;
-
- spin_lock(&fs_info->zone_active_bgs_lock);
- ret += sysfs_emit_at(buf, ret, "active block-groups: %zu\n",
- list_count_nodes(&fs_info->zone_active_bgs));
- spin_unlock(&fs_info->zone_active_bgs_lock);
-
- mutex_lock(&fs_info->reclaim_bgs_lock);
- spin_lock(&fs_info->unused_bgs_lock);
- ret += sysfs_emit_at(buf, ret, "\treclaimable: %zu\n",
- list_count_nodes(&fs_info->reclaim_bgs));
- ret += sysfs_emit_at(buf, ret, "\tunused: %zu\n",
- list_count_nodes(&fs_info->unused_bgs));
- spin_unlock(&fs_info->unused_bgs_lock);
- mutex_unlock(&fs_info->reclaim_bgs_lock);
-
- ret += sysfs_emit_at(buf, ret, "\tneed reclaim: %s\n",
- str_true_false(btrfs_zoned_should_reclaim(fs_info)));
-
- if (fs_info->data_reloc_bg)
- ret += sysfs_emit_at(buf, ret,
- "data relocation block-group: %llu\n",
- fs_info->data_reloc_bg);
- if (fs_info->treelog_bg)
- ret += sysfs_emit_at(buf, ret,
- "tree-log block-group: %llu\n",
- fs_info->treelog_bg);
-
- spin_lock(&fs_info->zone_active_bgs_lock);
- ret += sysfs_emit_at(buf, ret, "active zones:\n");
- list_for_each_entry(bg, &fs_info->zone_active_bgs, active_bg_list) {
- ret += sysfs_emit_at(buf, ret,
- "\tstart: %llu, wp: %llu used: %llu, reserved: %llu, unusable: %llu\n",
- bg->start, bg->alloc_offset, bg->used,
- bg->reserved, bg->zone_unusable);
- }
- spin_unlock(&fs_info->zone_active_bgs_lock);
- return ret;
-}
-BTRFS_ATTR(, zoned_stats, btrfs_zoned_stats_show);
-
static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
@@ -1651,7 +1600,6 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
BTRFS_ATTR_PTR(, commit_stats),
BTRFS_ATTR_PTR(, temp_fsid),
- BTRFS_ATTR_PTR(, zoned_stats),
#ifdef CONFIG_BTRFS_EXPERIMENTAL
BTRFS_ATTR_PTR(, offload_csum),
#endif
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 0b9f25dd1a68..aabf825e8d7b 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -1059,6 +1059,7 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
if (out_stripe_len != BTRFS_STRIPE_LEN) {
test_err("calculated stripe length doesn't match");
+ ret = -EINVAL;
goto out;
}
@@ -1066,12 +1067,14 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
for (i = 0; i < out_ndaddrs; i++)
test_msg("mapped %llu", logical[i]);
test_err("unexpected number of mapped addresses: %d", out_ndaddrs);
+ ret = -EINVAL;
goto out;
}
for (i = 0; i < out_ndaddrs; i++) {
if (logical[i] != test->mapped_logical[i]) {
test_err("unexpected logical address mapped");
+ ret = -EINVAL;
goto out;
}
}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index e9124605974b..0fcc31beeffe 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -517,11 +517,11 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
root->fs_info->fs_root = tmp_root;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
+ btrfs_put_root(tmp_root);
if (ret) {
test_err("couldn't insert fs root %d", ret);
goto out;
}
- btrfs_put_root(tmp_root);
tmp_root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(tmp_root)) {
@@ -532,11 +532,11 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
+ btrfs_put_root(tmp_root);
if (ret) {
- test_err("couldn't insert fs root %d", ret);
+ test_err("couldn't insert subvolume root %d", ret);
goto out;
}
- btrfs_put_root(tmp_root);
test_msg("running qgroup tests");
ret = test_no_shared_qgroup(root, sectorsize, nodesize);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2d9d38b82daa..6cffcf0c3e7a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2798,7 +2798,7 @@ static int replay_one_buffer(struct extent_buffer *eb,
nritems = btrfs_header_nritems(eb);
for (wc->log_slot = 0; wc->log_slot < nritems; wc->log_slot++) {
- struct btrfs_inode_item *inode_item;
+ struct btrfs_inode_item *inode_item = NULL;
btrfs_item_key_to_cpu(eb, &wc->log_key, wc->log_slot);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 13c514684cfb..8a08412f3529 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1364,7 +1364,9 @@ struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
(bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
}
+ filemap_invalidate_lock(mapping);
page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
+ filemap_invalidate_unlock(mapping);
if (IS_ERR(page))
return ERR_CAST(page);
@@ -7257,6 +7259,7 @@ static int read_one_dev(struct extent_buffer *leaf,
return -EINVAL;
}
}
+ set_bit(BTRFS_DEV_STATE_ITEM_FOUND, &device->dev_state);
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
@@ -8083,6 +8086,45 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
}
/*
+ * Ensure that all devices registered in the fs have their device items in the
+ * chunk tree.
+ *
+ * Return true if unexpected device is found.
+ * Return false otherwise.
+ */
+bool btrfs_verify_dev_items(const struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *seed_devs;
+ struct btrfs_device *dev;
+ bool ret = false;
+
+ mutex_lock(&uuid_mutex);
+ list_for_each_entry(dev, &fs_info->fs_devices->devices, dev_list) {
+ if (!test_bit(BTRFS_DEV_STATE_ITEM_FOUND, &dev->dev_state)) {
+ btrfs_err(fs_info,
+ "devid %llu path %s is registered but not found in chunk tree",
+ dev->devid, btrfs_dev_name(dev));
+ ret = true;
+ }
+ }
+ list_for_each_entry(seed_devs, &fs_info->fs_devices->seed_list, seed_list) {
+ list_for_each_entry(dev, &seed_devs->devices, dev_list) {
+ if (!test_bit(BTRFS_DEV_STATE_ITEM_FOUND, &dev->dev_state)) {
+ btrfs_err(fs_info,
+ "devid %llu path %s is registered but not found in chunk tree",
+ dev->devid, btrfs_dev_name(dev));
+ ret = true;
+ }
+ }
+ }
+ mutex_unlock(&uuid_mutex);
+ if (ret)
+ btrfs_err(fs_info,
+"remove the above devices or use 'btrfs device scan --forget <dev>' to unregister them before mount");
+ return ret;
+}
+
+/*
* Check whether the given block group or device is pinned by any inode being
* used as a swapfile.
*/
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 34b854c1a303..f20abeb16bce 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -100,6 +100,9 @@ enum btrfs_raid_types {
#define BTRFS_DEV_STATE_FLUSH_SENT (4)
#define BTRFS_DEV_STATE_NO_READA (5)
+/* Set when the device item is found in chunk tree, used to catch unexpected registered device. */
+#define BTRFS_DEV_STATE_ITEM_FOUND (7)
+
/* Special value encoding failure to write primary super block. */
#define BTRFS_SUPER_PRIMARY_WRITE_ERROR (INT_MAX / 2)
@@ -893,6 +896,7 @@ enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags
int btrfs_bg_type_to_factor(u64 flags);
const char *btrfs_bg_type_to_raid_name(u64 flags);
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+bool btrfs_verify_dev_items(const struct btrfs_fs_info *fs_info);
bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 0550fd30fd10..635fb8a52e0c 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -393,9 +393,11 @@ out:
repair_branches:
ret2 = 0;
+ ext4_double_down_write_data_sem(orig_inode, donor_inode);
r_len = ext4_swap_extents(handle, donor_inode, orig_inode,
mext->donor_lblk, orig_map->m_lblk,
*m_len, 0, &ret2);
+ ext4_double_up_write_data_sem(orig_inode, donor_inode);
if (ret2 || r_len != *m_len) {
ext4_error_inode_block(orig_inode, (sector_t)(orig_map->m_lblk),
EIO, "Unable to copy data block, data will be lost!");
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 2e02efbddaac..4ed8ddf2a60b 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1037,6 +1037,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
ext4_error_inode(ea_inode, __func__, __LINE__, 0,
"EA inode %lu ref wraparound: ref_count=%lld ref_change=%d",
ea_inode->i_ino, ref_count, ref_change);
+ brelse(iloc.bh);
ret = -EFSCORRUPTED;
goto out;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6800886c4d10..baa2f2141146 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2750,8 +2750,13 @@ static void wait_sb_inodes(struct super_block *sb)
* The mapping can appear untagged while still on-list since we
* do not have the mapping lock. Skip it here, wb completion
* will remove it.
+ *
+ * If the mapping does not have data integrity semantics,
+ * there's no need to wait for the writeout to complete, as the
+ * mapping cannot guarantee that data is persistently stored.
*/
- if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK) ||
+ mapping_no_data_integrity(mapping))
continue;
spin_unlock_irq(&sb->s_inode_wblist_lock);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 01bc894e9c2b..3b2a171e652f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -3200,8 +3200,10 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags)
inode->i_fop = &fuse_file_operations;
inode->i_data.a_ops = &fuse_file_aops;
- if (fc->writeback_cache)
+ if (fc->writeback_cache) {
mapping_set_writeback_may_deadlock_on_reclaim(&inode->i_data);
+ mapping_set_no_data_integrity(&inode->i_data);
+ }
INIT_LIST_HEAD(&fi->write_files);
INIT_LIST_HEAD(&fi->queued_writes);
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index ab76120705e2..134d7f760a33 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -417,8 +417,10 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
d->map = bl_map_simple;
d->pr_key = v->scsi.pr_key;
- if (d->len == 0)
- return -ENODEV;
+ if (d->len == 0) {
+ error = -ENODEV;
+ goto out_blkdev_put;
+ }
ops = bdev->bd_disk->fops->pr_ops;
if (!ops) {
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 2248e3ad089a..8a3857a49d84 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -149,7 +149,7 @@ static int nfs4_do_check_delegation(struct inode *inode, fmode_t type,
int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags)
{
if (S_ISDIR(inode->i_mode) && !directory_delegations)
- nfs_inode_evict_delegation(inode);
+ nfs4_inode_set_return_delegation_on_close(inode);
return nfs4_do_check_delegation(inode, type, flags, true);
}
@@ -581,6 +581,10 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
if (delegation == NULL)
return 0;
+ /* Directory delegations don't require any state recovery */
+ if (!S_ISREG(inode->i_mode))
+ goto out_return;
+
if (!issync)
mode |= O_NONBLOCK;
/* Recall of any remaining application leases */
@@ -604,6 +608,7 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
goto out;
}
+out_return:
err = nfs_do_return_delegation(inode, delegation, issync);
out:
/* Refcount matched in nfs_start_delegation_return_locked() */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 23a78a742b61..8f9ea79b7882 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1440,7 +1440,8 @@ static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
if (!dir || !nfs_verify_change_attribute(dir, verf))
return;
- if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0))
+ if (NFS_PROTO(dir)->have_delegation(dir, FMODE_READ, 0) ||
+ (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0)))
nfs_set_verifier_delegated(&verf);
dentry->d_time = verf;
}
@@ -1465,6 +1466,49 @@ void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
EXPORT_SYMBOL_GPL(nfs_set_verifier);
#if IS_ENABLED(CONFIG_NFS_V4)
+static void nfs_clear_verifier_file(struct inode *inode)
+{
+ struct dentry *alias;
+ struct inode *dir;
+
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&alias->d_lock);
+ dir = d_inode_rcu(alias->d_parent);
+ if (!dir ||
+ !NFS_PROTO(dir)->have_delegation(dir, FMODE_READ, 0))
+ nfs_unset_verifier_delegated(&alias->d_time);
+ spin_unlock(&alias->d_lock);
+ }
+}
+
+static void nfs_clear_verifier_directory(struct inode *dir)
+{
+ struct dentry *this_parent;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ if (hlist_empty(&dir->i_dentry))
+ return;
+ this_parent =
+ hlist_entry(dir->i_dentry.first, struct dentry, d_u.d_alias);
+
+ spin_lock(&this_parent->d_lock);
+ nfs_unset_verifier_delegated(&this_parent->d_time);
+ dentry = d_first_child(this_parent);
+ hlist_for_each_entry_from(dentry, d_sib) {
+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+ continue;
+ inode = d_inode_rcu(dentry);
+ if (inode &&
+ NFS_PROTO(inode)->have_delegation(inode, FMODE_READ, 0))
+ continue;
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ nfs_unset_verifier_delegated(&dentry->d_time);
+ spin_unlock(&dentry->d_lock);
+ }
+ spin_unlock(&this_parent->d_lock);
+}
+
/**
* nfs_clear_verifier_delegated - clear the dir verifier delegation tag
* @inode: pointer to inode
@@ -1477,16 +1521,13 @@ EXPORT_SYMBOL_GPL(nfs_set_verifier);
*/
void nfs_clear_verifier_delegated(struct inode *inode)
{
- struct dentry *alias;
-
if (!inode)
return;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&alias->d_lock);
- nfs_unset_verifier_delegated(&alias->d_time);
- spin_unlock(&alias->d_lock);
- }
+ if (S_ISREG(inode->i_mode))
+ nfs_clear_verifier_file(inode);
+ else if (S_ISDIR(inode->i_mode))
+ nfs_clear_verifier_directory(inode);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
@@ -1516,14 +1557,6 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
if (!nfs_dentry_verify_change(dir, dentry))
return 0;
- /*
- * If we have a directory delegation then we don't need to revalidate
- * the directory. The delegation will either get recalled or we will
- * receive a notification when it changes.
- */
- if (nfs_have_directory_delegation(dir))
- return 0;
-
/* Revalidate nfsi->cache_change_attribute before we declare a match */
if (nfs_mapping_need_revalidate_inode(dir)) {
if (rcu_walk)
@@ -2217,13 +2250,6 @@ no_open:
EXPORT_SYMBOL_GPL(nfs_atomic_open);
static int
-nfs_lookup_revalidate_delegated_parent(struct inode *dir, struct dentry *dentry,
- struct inode *inode)
-{
- return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
-}
-
-static int
nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *dentry, unsigned int flags)
{
@@ -2247,12 +2273,10 @@ nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name,
if (inode == NULL)
goto full_reval;
- if (nfs_verifier_is_delegated(dentry))
+ if (nfs_verifier_is_delegated(dentry) ||
+ nfs_have_directory_delegation(inode))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
- if (nfs_have_directory_delegation(dir))
- return nfs_lookup_revalidate_delegated_parent(dir, dentry, inode);
-
/* NFS only supports OPEN on regular files */
if (!S_ISREG(inode->i_mode))
goto full_reval;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index d020aab40c64..d1c138a416cf 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -511,7 +511,8 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
current_is_kswapd() || current_is_kcompactd())
return false;
- if (nfs_wb_folio(folio->mapping->host, folio) < 0)
+ if (nfs_wb_folio_reclaim(folio->mapping->host, folio) < 0 ||
+ folio_test_private(folio))
return false;
}
return nfs_fscache_release_folio(folio, gfp);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index c55ea8fa3bfa..c2d8a13a9dbd 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -103,7 +103,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
sizeof(struct nfs4_ff_ds_version),
gfp_flags);
if (!ds_versions)
- goto out_scratch;
+ goto out_err_drain_dsaddrs;
for (i = 0; i < version_count; i++) {
/* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 84049f3cd340..de2cce1d08f4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -716,7 +716,7 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
- loff_t oldsize = i_size_read(inode);
+ loff_t oldsize;
int error = 0;
kuid_t task_uid = current_fsuid();
kuid_t owner_uid = inode->i_uid;
@@ -727,6 +727,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
attr->ia_valid &= ~ATTR_MODE;
+ if (S_ISREG(inode->i_mode))
+ nfs_file_block_o_direct(NFS_I(inode));
+
+ oldsize = i_size_read(inode);
if (attr->ia_valid & ATTR_SIZE) {
BUG_ON(!S_ISREG(inode->i_mode));
@@ -774,10 +778,8 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode)) {
- nfs_file_block_o_direct(NFS_I(inode));
+ if (S_ISREG(inode->i_mode))
nfs_sync_inode(inode);
- }
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
if (fattr == NULL) {
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index d275b0a250bf..8337f0ae852d 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -84,6 +84,7 @@ nfs_start_io_write(struct inode *inode)
nfs_file_block_o_direct(NFS_I(inode));
return err;
}
+EXPORT_SYMBOL_GPL(nfs_start_io_write);
/**
* nfs_end_io_write - declare that the buffered write operation is done
@@ -97,6 +98,7 @@ nfs_end_io_write(struct inode *inode)
{
up_write(&inode->i_rwsem);
}
+EXPORT_SYMBOL_GPL(nfs_end_io_write);
/* Call with exclusively locked inode->i_rwsem */
static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index a113bfdacfd6..41fbcb3f9167 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -461,6 +461,8 @@ nfs_local_iters_init(struct nfs_local_kiocb *iocb, int rw)
v = 0;
total = hdr->args.count;
base = hdr->args.pgbase;
+ pagevec += base >> PAGE_SHIFT;
+ base &= ~PAGE_MASK;
while (total && v < hdr->page_array.npages) {
len = min_t(size_t, total, PAGE_SIZE - base);
bvec_set_page(&iocb->bvec[v], *pagevec, len, base);
@@ -618,7 +620,6 @@ static void nfs_local_call_read(struct work_struct *work)
struct nfs_local_kiocb *iocb =
container_of(work, struct nfs_local_kiocb, work);
struct file *filp = iocb->kiocb.ki_filp;
- bool force_done = false;
ssize_t status;
int n_iters;
@@ -637,13 +638,13 @@ static void nfs_local_call_read(struct work_struct *work)
scoped_with_creds(filp->f_cred)
status = filp->f_op->read_iter(&iocb->kiocb, &iocb->iters[i]);
- if (status != -EIOCBQUEUED) {
- if (unlikely(status >= 0 && status < iocb->iters[i].count))
- force_done = true; /* Partial read */
- if (nfs_local_pgio_done(iocb, status, force_done)) {
- nfs_local_read_iocb_done(iocb);
- break;
- }
+ if (status == -EIOCBQUEUED)
+ continue;
+ /* Break on completion, errors, or short reads */
+ if (nfs_local_pgio_done(iocb, status, false) || status < 0 ||
+ (size_t)status < iov_iter_count(&iocb->iters[i])) {
+ nfs_local_read_iocb_done(iocb);
+ break;
}
}
}
@@ -821,7 +822,6 @@ static void nfs_local_call_write(struct work_struct *work)
container_of(work, struct nfs_local_kiocb, work);
struct file *filp = iocb->kiocb.ki_filp;
unsigned long old_flags = current->flags;
- bool force_done = false;
ssize_t status;
int n_iters;
@@ -843,13 +843,13 @@ static void nfs_local_call_write(struct work_struct *work)
scoped_with_creds(filp->f_cred)
status = filp->f_op->write_iter(&iocb->kiocb, &iocb->iters[i]);
- if (status != -EIOCBQUEUED) {
- if (unlikely(status >= 0 && status < iocb->iters[i].count))
- force_done = true; /* Partial write */
- if (nfs_local_pgio_done(iocb, status, force_done)) {
- nfs_local_write_iocb_done(iocb);
- break;
- }
+ if (status == -EIOCBQUEUED)
+ continue;
+ /* Break on completion, errors, or short writes */
+ if (nfs_local_pgio_done(iocb, status, false) || status < 0 ||
+ (size_t)status < iov_iter_count(&iocb->iters[i])) {
+ nfs_local_write_iocb_done(iocb);
+ break;
}
}
file_end_write(filp);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index d537fb0c230e..c08520828708 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -114,7 +114,6 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
exception.inode = inode;
exception.state = lock->open_context->state;
- nfs_file_block_o_direct(NFS_I(inode));
err = nfs_sync_inode(inode);
if (err)
goto out;
@@ -138,13 +137,17 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
};
struct inode *inode = file_inode(filep);
- loff_t oldsize = i_size_read(inode);
+ loff_t oldsize;
int err;
if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
return -EOPNOTSUPP;
- inode_lock(inode);
+ err = nfs_start_io_write(inode);
+ if (err)
+ return err;
+
+ oldsize = i_size_read(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
@@ -155,7 +158,7 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE |
NFS_CAP_ZERO_RANGE);
- inode_unlock(inode);
+ nfs_end_io_write(inode);
return err;
}
@@ -170,7 +173,9 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
return -EOPNOTSUPP;
- inode_lock(inode);
+ err = nfs_start_io_write(inode);
+ if (err)
+ return err;
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == 0)
@@ -179,7 +184,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
NFS_SERVER(inode)->caps &= ~(NFS_CAP_DEALLOCATE |
NFS_CAP_ZERO_RANGE);
- inode_unlock(inode);
+ nfs_end_io_write(inode);
return err;
}
@@ -189,14 +194,17 @@ int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE],
};
struct inode *inode = file_inode(filep);
- loff_t oldsize = i_size_read(inode);
+ loff_t oldsize;
int err;
if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE))
return -EOPNOTSUPP;
- inode_lock(inode);
+ err = nfs_start_io_write(inode);
+ if (err)
+ return err;
+ oldsize = i_size_read(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == 0) {
nfs_truncate_last_folio(inode->i_mapping, oldsize,
@@ -205,7 +213,7 @@ int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
} else if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE;
- inode_unlock(inode);
+ nfs_end_io_write(inode);
return err;
}
@@ -416,7 +424,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
struct nfs_server *src_server = NFS_SERVER(src_inode);
loff_t pos_src = args->src_pos;
loff_t pos_dst = args->dst_pos;
- loff_t oldsize_dst = i_size_read(dst_inode);
+ loff_t oldsize_dst;
size_t count = args->count;
ssize_t status;
@@ -461,6 +469,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
&src_lock->open_context->state->flags);
set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
&dst_lock->open_context->state->flags);
+ oldsize_dst = i_size_read(dst_inode);
status = nfs4_call_sync(dst_server->client, dst_server, &msg,
&args->seq_args, &res->seq_res, 0);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ec1ce593dea2..a0885ae55abc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3894,8 +3894,8 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
- calldata->lr.roc = pnfs_roc(state->inode,
- &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
+ calldata->lr.roc = pnfs_roc(state->inode, &calldata->lr.arg,
+ &calldata->lr.res, msg.rpc_cred, wait);
if (calldata->lr.roc) {
calldata->arg.lr_args = &calldata->lr.arg;
calldata->res.lr_res = &calldata->lr.res;
@@ -4494,6 +4494,25 @@ static bool should_request_dir_deleg(struct inode *inode)
}
#endif /* CONFIG_NFS_V4_1 */
+static void nfs4_call_getattr_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_call_sync_data *data = calldata;
+ nfs4_setup_sequence(data->seq_server->nfs_client, data->seq_args,
+ data->seq_res, task);
+}
+
+static void nfs4_call_getattr_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_call_sync_data *data = calldata;
+
+ nfs4_sequence_process(task, data->seq_res);
+}
+
+static const struct rpc_call_ops nfs4_call_getattr_ops = {
+ .rpc_call_prepare = nfs4_call_getattr_prepare,
+ .rpc_call_done = nfs4_call_getattr_done,
+};
+
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct inode *inode)
{
@@ -4511,16 +4530,26 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
+ struct nfs4_call_sync_data data = {
+ .seq_server = server,
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_call_getattr_ops,
+ .callback_data = &data,
+ };
struct nfs4_gdd_res gdd_res;
- unsigned short task_flags = 0;
int status;
if (nfs4_has_session(server->nfs_client))
- task_flags = RPC_TASK_MOVEABLE;
+ task_setup.flags = RPC_TASK_MOVEABLE;
/* Is this is an attribute revalidation, subject to softreval? */
if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
- task_flags |= RPC_TASK_TIMEOUT;
+ task_setup.flags |= RPC_TASK_TIMEOUT;
args.get_dir_deleg = should_request_dir_deleg(inode);
if (args.get_dir_deleg)
@@ -4530,22 +4559,24 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
nfs_fattr_init(fattr);
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
- status = nfs4_do_call_sync(server->client, server, &msg,
- &args.seq_args, &res.seq_res, task_flags);
+ status = nfs4_call_sync_custom(&task_setup);
+
if (args.get_dir_deleg) {
switch (status) {
case 0:
if (gdd_res.status != GDD4_OK)
break;
- status = nfs_inode_set_delegation(
- inode, current_cred(), FMODE_READ,
- &gdd_res.deleg, 0, NFS4_OPEN_DELEGATE_READ);
+ nfs_inode_set_delegation(inode, current_cred(),
+ FMODE_READ, &gdd_res.deleg, 0,
+ NFS4_OPEN_DELEGATE_READ);
break;
case -ENOTSUPP:
case -EOPNOTSUPP:
server->caps &= ~NFS_CAP_DIR_DELEG;
}
}
+
+ nfs4_sequence_free_slot(&res.seq_res);
return status;
}
@@ -7005,7 +7036,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data->inode = nfs_igrab_and_active(inode);
if (data->inode || issync) {
data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
- cred);
+ cred, issync);
if (data->lr.roc) {
data->args.lr_args = &data->lr.arg;
data->res.lr_res = &data->lr.res;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 01179f7de322..dba51c622cf3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1445,6 +1445,8 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
struct nfs4_state *state;
bool found = false;
+ if (!S_ISREG(inode->i_mode))
+ goto out;
rcu_read_lock();
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
state = ctx->state;
@@ -1466,7 +1468,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
found = true;
}
rcu_read_unlock();
-
+out:
nfs_inode_find_delegation_state_and_recover(inode, stateid);
if (found)
nfs4_schedule_state_manager(clp);
@@ -1478,6 +1480,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *ctx;
+ if (!S_ISREG(inode->i_mode))
+ return;
rcu_read_lock();
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 6ce55e8e6b67..9f9ce4a565ea 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1062,6 +1062,9 @@ DECLARE_EVENT_CLASS(nfs_folio_event_done,
DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done);
+DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio_reclaim);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_reclaim_done);
+
DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b72d7cc36766..cff225721d1c 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1533,10 +1533,9 @@ static int pnfs_layout_return_on_reboot(struct pnfs_layout_hdr *lo)
PNFS_FL_LAYOUTRETURN_PRIVILEGED);
}
-bool pnfs_roc(struct inode *ino,
- struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- const struct cred *cred)
+bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res, const struct cred *cred,
+ bool sync)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_open_context *ctx;
@@ -1547,7 +1546,7 @@ bool pnfs_roc(struct inode *ino,
nfs4_stateid stateid;
enum pnfs_iomode iomode = 0;
bool layoutreturn = false, roc = false;
- bool skip_read = false;
+ bool skip_read;
if (!nfs_have_layout(ino))
return false;
@@ -1560,20 +1559,14 @@ retry:
lo = NULL;
goto out_noroc;
}
- pnfs_get_layout_hdr(lo);
- if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
- spin_unlock(&ino->i_lock);
- rcu_read_unlock();
- wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
- TASK_UNINTERRUPTIBLE);
- pnfs_put_layout_hdr(lo);
- goto retry;
- }
/* no roc if we hold a delegation */
+ skip_read = false;
if (nfs4_check_delegation(ino, FMODE_READ)) {
- if (nfs4_check_delegation(ino, FMODE_WRITE))
+ if (nfs4_check_delegation(ino, FMODE_WRITE)) {
+ lo = NULL;
goto out_noroc;
+ }
skip_read = true;
}
@@ -1582,12 +1575,43 @@ retry:
if (state == NULL)
continue;
/* Don't return layout if there is open file state */
- if (state->state & FMODE_WRITE)
+ if (state->state & FMODE_WRITE) {
+ lo = NULL;
goto out_noroc;
+ }
if (state->state & FMODE_READ)
skip_read = true;
}
+ if (skip_read) {
+ bool writes = false;
+
+ list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode != IOMODE_READ) {
+ writes = true;
+ break;
+ }
+ }
+ if (!writes) {
+ lo = NULL;
+ goto out_noroc;
+ }
+ }
+
+ pnfs_get_layout_hdr(lo);
+ if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
+ if (!sync) {
+ pnfs_set_plh_return_info(
+ lo, skip_read ? IOMODE_RW : IOMODE_ANY, 0);
+ goto out_noroc;
+ }
+ spin_unlock(&ino->i_lock);
+ rcu_read_unlock();
+ wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
+ TASK_UNINTERRUPTIBLE);
+ pnfs_put_layout_hdr(lo);
+ goto retry;
+ }
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
@@ -1627,7 +1651,7 @@ retry:
out_noroc:
spin_unlock(&ino->i_lock);
rcu_read_unlock();
- pnfs_layoutcommit_inode(ino, true);
+ pnfs_layoutcommit_inode(ino, sync);
if (roc) {
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
if (ld->prepare_layoutreturn)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 91ff877185c8..3db8f13d8fe4 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -303,10 +303,9 @@ int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
u32 seq);
int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
struct list_head *lseg_list);
-bool pnfs_roc(struct inode *ino,
- struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- const struct cred *cred);
+bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res, const struct cred *cred,
+ bool sync);
int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
struct nfs4_layoutreturn_res **respp, int *ret);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
@@ -773,12 +772,10 @@ pnfs_layoutcommit_outstanding(struct inode *inode)
return false;
}
-
-static inline bool
-pnfs_roc(struct inode *ino,
- struct nfs4_layoutreturn_args *args,
- struct nfs4_layoutreturn_res *res,
- const struct cred *cred)
+static inline bool pnfs_roc(struct inode *ino,
+ struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res,
+ const struct cred *cred, bool sync)
{
return false;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 336c510f3750..bf412455e8ed 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2025,6 +2025,39 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
}
/**
+ * nfs_wb_folio_reclaim - Write back all requests on one page
+ * @inode: pointer to page
+ * @folio: pointer to folio
+ *
+ * Assumes that the folio has been locked by the caller
+ */
+int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio)
+{
+ loff_t range_start = folio_pos(folio);
+ size_t len = folio_size(folio);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0,
+ .range_start = range_start,
+ .range_end = range_start + len - 1,
+ .for_sync = 1,
+ };
+ int ret;
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+ if (folio_clear_dirty_for_io(folio)) {
+ trace_nfs_writeback_folio_reclaim(inode, range_start, len);
+ ret = nfs_writepage_locked(folio, &wbc);
+ trace_nfs_writeback_folio_reclaim_done(inode, range_start, len,
+ ret);
+ return ret;
+ }
+ nfs_commit_inode(inode, 0);
+ return 0;
+}
+
+/**
* nfs_wb_folio - Write back all requests on one page
* @inode: pointer to page
* @folio: pointer to folio
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index d97295eaebe6..c19d6d713780 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -848,15 +848,16 @@ sparse_alloc:
* invalid inode records, such as records that start at agbno 0
* or extend beyond the AG.
*
- * Set min agbno to the first aligned, non-zero agbno and max to
- * the last aligned agbno that is at least one full chunk from
- * the end of the AG.
+ * Set min agbno to the first chunk aligned, non-zero agbno and
+ * max to one less than the last chunk aligned agbno from the
+ * end of the AG. We subtract 1 from max so that the cluster
+ * allocation alignment takes over and allows allocation within
+ * the last full inode chunk in the AG.
*/
args.min_agbno = args.mp->m_sb.sb_inoalignmt;
args.max_agbno = round_down(xfs_ag_block_count(args.mp,
pag_agno(pag)),
- args.mp->m_sb.sb_inoalignmt) -
- igeo->ialloc_blks;
+ args.mp->m_sb.sb_inoalignmt) - 1;
error = xfs_alloc_vextent_near_bno(&args,
xfs_agbno_to_fsb(pag,
diff --git a/fs/xfs/libxfs/xfs_rtgroup.c b/fs/xfs/libxfs/xfs_rtgroup.c
index 9186c58e83d5..be16efaa6925 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.c
+++ b/fs/xfs/libxfs/xfs_rtgroup.c
@@ -48,6 +48,31 @@ xfs_rtgroup_min_block(
return 0;
}
+/* Compute the number of rt extents in this realtime group. */
+static xfs_rtxnum_t
+__xfs_rtgroup_extents(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno,
+ xfs_rgnumber_t rgcount,
+ xfs_rtbxlen_t rextents)
+{
+ ASSERT(rgno < rgcount);
+ if (rgno == rgcount - 1)
+ return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
+
+ ASSERT(xfs_has_rtgroups(mp));
+ return mp->m_sb.sb_rgextents;
+}
+
+xfs_rtxnum_t
+xfs_rtgroup_extents(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
+ mp->m_sb.sb_rextents);
+}
+
/* Precompute this group's geometry */
void
xfs_rtgroup_calc_geometry(
@@ -58,7 +83,8 @@ xfs_rtgroup_calc_geometry(
xfs_rtbxlen_t rextents)
{
rtg->rtg_extents = __xfs_rtgroup_extents(mp, rgno, rgcount, rextents);
- rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
+ rtg_group(rtg)->xg_block_count =
+ rtg->rtg_extents * mp->m_sb.sb_rextsize;
rtg_group(rtg)->xg_min_gbno = xfs_rtgroup_min_block(mp, rgno);
}
@@ -136,31 +162,6 @@ out_unwind_new_rtgs:
return error;
}
-/* Compute the number of rt extents in this realtime group. */
-xfs_rtxnum_t
-__xfs_rtgroup_extents(
- struct xfs_mount *mp,
- xfs_rgnumber_t rgno,
- xfs_rgnumber_t rgcount,
- xfs_rtbxlen_t rextents)
-{
- ASSERT(rgno < rgcount);
- if (rgno == rgcount - 1)
- return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
-
- ASSERT(xfs_has_rtgroups(mp));
- return mp->m_sb.sb_rgextents;
-}
-
-xfs_rtxnum_t
-xfs_rtgroup_extents(
- struct xfs_mount *mp,
- xfs_rgnumber_t rgno)
-{
- return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
- mp->m_sb.sb_rextents);
-}
-
/*
* Update the rt extent count of the previous tail rtgroup if it changed during
* recovery (i.e. recovery of a growfs).
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h
index 03f1e2493334..73cace4d25c7 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.h
+++ b/fs/xfs/libxfs/xfs_rtgroup.h
@@ -285,8 +285,6 @@ void xfs_free_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t first_rgno,
int xfs_initialize_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t first_rgno,
xfs_rgnumber_t end_rgno, xfs_rtbxlen_t rextents);
-xfs_rtxnum_t __xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno,
- xfs_rgnumber_t rgcount, xfs_rtbxlen_t rextents);
xfs_rtxnum_t xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno);
void xfs_rtgroup_calc_geometry(struct xfs_mount *mp, struct xfs_rtgroup *rtg,
xfs_rgnumber_t rgno, xfs_rgnumber_t rgcount,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a311385b23d8..d4544ccafea5 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1180,9 +1180,11 @@ xfs_log_cover(
int error = 0;
bool need_covered;
- ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
- !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
- xlog_is_shutdown(mp->m_log));
+ if (!xlog_is_shutdown(mp->m_log)) {
+ ASSERT(xlog_cil_empty(mp->m_log));
+ ASSERT(xlog_iclogs_empty(mp->m_log));
+ ASSERT(!xfs_ail_min_lsn(mp->m_log->l_ailp));
+ }
if (!xfs_log_writable(mp))
return 0;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index e063f4f2f2e6..a12ffed12391 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -126,7 +126,7 @@ xfs_rtcopy_summary(
error = 0;
out:
xfs_rtbuf_cache_relse(oargs);
- return 0;
+ return error;
}
/*
* Mark an extent specified by start and len allocated.
@@ -1265,7 +1265,7 @@ xfs_growfs_check_rtgeom(
uint32_t rem;
if (rextsize != 1)
- return -EINVAL;
+ goto out_inval;
div_u64_rem(nmp->m_sb.sb_rblocks, gblocks, &rem);
if (rem) {
xfs_warn(mp,
@@ -1326,7 +1326,7 @@ xfs_grow_last_rtg(
return true;
if (mp->m_sb.sb_rgcount == 0)
return false;
- return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <=
+ return xfs_rtgroup_extents(mp, mp->m_sb.sb_rgcount - 1) <
mp->m_sb.sb_rgextents;
}
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 1fff717cae51..4d679d2a206b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -46,7 +46,8 @@
*
* The mmu_gather API consists of:
*
- * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
+ * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_gather_mmu_vma() /
+ * tlb_finish_mmu()
*
* start and finish a mmu_gather
*
@@ -364,6 +365,20 @@ struct mmu_gather {
unsigned int vma_huge : 1;
unsigned int vma_pfn : 1;
+ /*
+ * Did we unshare (unmap) any shared page tables? For now only
+ * used for hugetlb PMD table sharing.
+ */
+ unsigned int unshared_tables : 1;
+
+ /*
+ * Did we unshare any page tables such that they are now exclusive
+ * and could get reused+modified by the new owner? When setting this
+ * flag, "unshared_tables" will be set as well. For now only used
+ * for hugetlb PMD table sharing.
+ */
+ unsigned int fully_unshared_tables : 1;
+
unsigned int batch_count;
#ifndef CONFIG_MMU_GATHER_NO_GATHER
@@ -400,6 +415,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->cleared_pmds = 0;
tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0;
+ tlb->unshared_tables = 0;
/*
* Do not reset mmu_gather::vma_* fields here, we do not
* call into tlb_start_vma() again to set them if there is an
@@ -484,7 +500,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
* these bits.
*/
if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
- tlb->cleared_puds || tlb->cleared_p4ds))
+ tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables))
return;
tlb_flush(tlb);
@@ -773,6 +789,63 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
}
#endif
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt,
+ unsigned long addr)
+{
+ /*
+ * The caller must make sure that concurrent unsharing + exclusive
+ * reuse is impossible until tlb_flush_unshared_tables() was called.
+ */
+ VM_WARN_ON_ONCE(!ptdesc_pmd_is_shared(pt));
+ ptdesc_pmd_pts_dec(pt);
+
+ /* Clearing a PUD pointing at a PMD table with PMD leaves. */
+ tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE);
+
+ /*
+ * If the page table is now exclusively owned, we fully unshared
+ * a page table.
+ */
+ if (!ptdesc_pmd_is_shared(pt))
+ tlb->fully_unshared_tables = true;
+ tlb->unshared_tables = true;
+}
+
+static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb)
+{
+ /*
+ * As soon as the caller drops locks to allow for reuse of
+ * previously-shared tables, these tables could get modified and
+ * even reused outside of hugetlb context, so we have to make sure that
+ * any page table walkers (incl. TLB, GUP-fast) are aware of that
+ * change.
+ *
+ * Even if we are not fully unsharing a PMD table, we must
+ * flush the TLB for the unsharer now.
+ */
+ if (tlb->unshared_tables)
+ tlb_flush_mmu_tlbonly(tlb);
+
+ /*
+ * Similarly, we must make sure that concurrent GUP-fast will not
+ * walk previously-shared page tables that are getting modified+reused
+ * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast.
+ *
+ * We only perform this when we are the last sharer of a page table,
+ * as the IPI will reach all CPUs: any GUP-fast.
+ *
+ * Note that on configs where tlb_remove_table_sync_one() is a NOP,
+ * the expectation is that the tlb_flush_mmu_tlbonly() would have issued
+ * required IPIs already for us.
+ */
+ if (tlb->fully_unshared_tables) {
+ tlb_remove_table_sync_one();
+ tlb->fully_unshared_tables = false;
+ }
+}
+#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
+
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h
index 3f461f6b9bbf..3af12f82da2c 100644
--- a/include/drm/bridge/dw_hdmi_qp.h
+++ b/include/drm/bridge/dw_hdmi_qp.h
@@ -34,5 +34,6 @@ struct dw_hdmi_qp_plat_data {
struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
struct drm_encoder *encoder,
const struct dw_hdmi_qp_plat_data *plat_data);
+void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi);
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi);
#endif /* __DW_HDMI_QP__ */
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index df2f24b950e4..14d2859f0bda 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -552,6 +552,22 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
+ * drm_dp_dpcd_readb() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
+ */
+static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read(aux, offset, valuep, 1);
+}
+
+/**
* drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -570,12 +586,29 @@ static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
void *buffer, size_t size)
{
int ret;
+ size_t i;
+ u8 *buf = buffer;
ret = drm_dp_dpcd_read(aux, offset, buffer, size);
- if (ret < 0)
- return ret;
- if (ret < size)
- return -EPROTO;
+ if (ret >= 0) {
+ if (ret < size)
+ return -EPROTO;
+ return 0;
+ }
+
+ /*
+ * Workaround for USB-C hubs/adapters with buggy firmware that fail
+ * multi-byte AUX reads but work with single-byte reads.
+ * Known affected devices:
+ * - Lenovo USB-C to VGA adapter (VIA VL817, idVendor=17ef, idProduct=7217)
+ * - Dell DA310 USB-C hub (idVendor=413c, idProduct=c010)
+ * Attempt byte-by-byte reading as a fallback.
+ */
+ for (i = 0; i < size; i++) {
+ ret = drm_dp_dpcd_readb(aux, offset + i, &buf[i]);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -610,22 +643,6 @@ static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
}
/**
- * drm_dp_dpcd_readb() - read a single byte from the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to read
- * @valuep: location where the value of the register will be stored
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure. In most of the cases you should be using
- * drm_dp_dpcd_read_byte() instead.
- */
-static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
- unsigned int offset, u8 *valuep)
-{
- return drm_dp_dpcd_read(aux, offset, valuep, 1);
-}
-
-/**
* drm_dp_dpcd_writeb() - write a single byte to the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to write
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
index 50e7c886709d..06851363ae0e 100644
--- a/include/dt-bindings/power/qcom,rpmhpd.h
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -264,5 +264,6 @@
#define SC8280XP_NSP 13
#define SC8280XP_QPHY 14
#define SC8280XP_XO 15
+#define SC8280XP_MXC_AO 16
#endif
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
index 469186df7826..08965970c17d 100644
--- a/include/hyperv/hvhdk.h
+++ b/include/hyperv/hvhdk.h
@@ -800,6 +800,53 @@ struct hv_x64_memory_intercept_message {
u8 instruction_bytes[16];
} __packed;
+#if IS_ENABLED(CONFIG_ARM64)
+union hv_arm64_vp_execution_state {
+ u16 as_uint16;
+ struct {
+ u16 cpl:2; /* Exception Level (EL) */
+ u16 debug_active:1;
+ u16 interruption_pending:1;
+ u16 vtl:4;
+ u16 virtualization_fault_active:1;
+ u16 reserved:7;
+ } __packed;
+};
+
+struct hv_arm64_intercept_message_header {
+ u32 vp_index;
+ u8 instruction_length;
+ u8 intercept_access_type;
+ union hv_arm64_vp_execution_state execution_state;
+ u64 pc;
+ u64 cpsr;
+} __packed;
+
+union hv_arm64_memory_access_info {
+ u8 as_uint8;
+ struct {
+ u8 gva_valid:1;
+ u8 gva_gpa_valid:1;
+ u8 hypercall_output_pending:1;
+ u8 reserved:5;
+ } __packed;
+};
+
+struct hv_arm64_memory_intercept_message {
+ struct hv_arm64_intercept_message_header header;
+ u32 cache_type; /* enum hv_cache_type */
+ u8 instruction_byte_count;
+ union hv_arm64_memory_access_info memory_access_info;
+ u16 reserved1;
+ u8 instruction_bytes[4];
+ u32 reserved2;
+ u64 guest_virtual_address;
+ u64 guest_physical_address;
+ u64 syndrome;
+} __packed;
+
+#endif /* CONFIG_ARM64 */
+
/*
* Dispatch state for the VP communicated by the hypervisor to the
* VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP.
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 43aa6153dc57..e7497f804644 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -18,7 +18,7 @@
* @power: The power consumed at this level (by 1 CPU or by a registered
* device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
- * energy calculation. Equal to: power * max_frequency / frequency
+ * energy calculation. Equal to: 10 * power * max_frequency / frequency
* @flags: see "em_perf_state flags" description below.
*/
struct em_perf_state {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 019a1c5281e4..e51b8ef0cebd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -240,8 +240,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
+int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
@@ -300,13 +301,17 @@ static inline struct address_space *hugetlb_folio_mapping_lock_write(
return NULL;
}
-static inline int huge_pmd_unshare(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline int huge_pmd_unshare(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
return 0;
}
+static inline void huge_pmd_unshare_flush(struct mmu_gather *tlb,
+ struct vm_area_struct *vma)
+{
+}
+
static inline void adjust_range_if_pmd_sharing_possible(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
@@ -1326,7 +1331,7 @@ static inline __init void hugetlb_cma_reserve(int order)
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
- return page_count(virt_to_page(pte)) > 1;
+ return ptdesc_pmd_is_shared(virt_to_ptdesc(pte));
}
#else
static inline bool hugetlb_pmd_shared(pte_t *pte)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 0ad1ddbb8b99..e5822f6e7f27 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -211,6 +211,7 @@ struct kmem_obj_info;
* __kfence_obj_info() - fill kmem_obj_info struct
* @kpp: kmem_obj_info to be filled
* @object: the object
+ * @slab: the slab
*
* Return:
* * false - not a KFENCE object
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6f959d8ca4b4..f0d5be9dc736 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -608,7 +608,11 @@ enum {
/*
* Flags which should result in page tables being copied on fork. These are
* flags which indicate that the VMA maps page tables which cannot be
- * reconsistuted upon page fault, so necessitate page table copying upon
+ * reconsistuted upon page fault, so necessitate page table copying upon fork.
+ *
+ * Note that these flags should be compared with the DESTINATION VMA not the
+ * source, as VM_UFFD_WP may not be propagated to destination, while all other
+ * flags will be.
*
* VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
* reasonably reconstructed on page fault.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 42af2292951d..78950eb8926d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1329,7 +1329,7 @@ struct mm_struct {
* The mm_cpumask needs to be at the end of mm_struct, because it
* is dynamically sized based on nr_cpu_ids.
*/
- unsigned long cpu_bitmap[];
+ char flexible_array[] __aligned(__alignof__(unsigned long));
};
/* Copy value to the first system word of mm flags, non-atomically. */
@@ -1366,19 +1366,24 @@ static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
+#define MM_STRUCT_FLEXIBLE_ARRAY_INIT \
+{ \
+ [0 ... sizeof(cpumask_t) + MM_CID_STATIC_SIZE - 1] = 0 \
+}
+
/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
unsigned long cpu_bitmap = (unsigned long)mm;
- cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpu_bitmap += offsetof(struct mm_struct, flexible_array);
cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return (struct cpumask *)&mm->cpu_bitmap;
+ return (struct cpumask *)&mm->flexible_array;
}
#ifdef CONFIG_LRU_GEN
@@ -1469,7 +1474,7 @@ static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
{
unsigned long bitmap = (unsigned long)mm;
- bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ bitmap += offsetof(struct mm_struct, flexible_array);
/* Skip cpu_bitmap */
bitmap += cpumask_size();
return (struct cpumask *)bitmap;
@@ -1495,7 +1500,7 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
mm_init_cid(mm, p);
return 0;
}
-#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
+# define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm)
{
@@ -1509,6 +1514,8 @@ static inline unsigned int mm_cid_size(void)
return cpumask_size() + bitmap_size(num_possible_cpus());
}
+/* Use 2 * NR_CPUS as worse case for static allocation. */
+# define MM_CID_STATIC_SIZE (2 * sizeof(cpumask_t))
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
@@ -1517,11 +1524,13 @@ static inline unsigned int mm_cid_size(void)
{
return 0;
}
+# define MM_CID_STATIC_SIZE 0
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma);
extern void tlb_finish_mmu(struct mmu_gather *tlb);
struct vm_fault;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75ef7c9f9307..fc5d6c88d2f0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1648,14 +1648,15 @@ static inline int is_highmem(const struct zone *zone)
return is_highmem_idx(zone_idx(zone));
}
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void);
-#else
+bool has_managed_zone(enum zone_type zone);
static inline bool has_managed_dma(void)
{
+#ifdef CONFIG_ZONE_DMA
+ return has_managed_zone(ZONE_DMA);
+#else
return false;
-}
#endif
+}
#ifndef CONFIG_NUMA
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a6624edb7226..8dd79a3f3d66 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -637,6 +637,7 @@ extern int nfs_update_folio(struct file *file, struct folio *folio,
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+extern int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index cf3c6ab408aa..207156f2143c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -83,6 +83,7 @@ static inline void reset_hung_task_detector(void) { }
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
extern unsigned int hardlockup_panic;
+extern unsigned long hardlockup_si_mask;
#else
static inline void hardlockup_detector_disable(void) {}
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 31a848485ad9..ec442af3f886 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -210,6 +210,7 @@ enum mapping_flags {
AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
account usage to user cgroups */
+ AS_NO_DATA_INTEGRITY = 11, /* no data integrity guarantees */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
@@ -345,6 +346,16 @@ static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct addres
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
+static inline void mapping_set_no_data_integrity(struct address_space *mapping)
+{
+ set_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
+}
+
+static inline bool mapping_no_data_integrity(const struct address_space *mapping)
+{
+ return test_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 864775651c6f..b5cc0c2b9906 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2210,6 +2210,10 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
{
return -ENOSPC;
}
+
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+}
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d395f2810fac..da0133524d08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1874,7 +1874,6 @@ static inline int task_nice(const struct task_struct *p)
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
-extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0e1d73955fa5..95d0040df584 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -325,6 +325,7 @@ static inline void might_alloc(gfp_t gfp_mask)
/**
* memalloc_flags_save - Add a PF_* flag to current->flags, save old value
+ * @flags: Flags to add.
*
* This allows PF_* flags to be conveniently added, irrespective of current
* value, and then the old version restored with memalloc_flags_restore().
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 6673e4d4ac2e..4933777404d6 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -35,6 +35,7 @@ struct ts_state
* @get_pattern: return head of pattern
* @get_pattern_len: return length of pattern
* @owner: module reference to algorithm
+ * @list: list to search
*/
struct ts_ops
{
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 59409c1fc3de..2f7bd2fdc616 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -75,4 +75,7 @@
/* short SET_ADDRESS request timeout */
#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
+/* skip BOS descriptor request */
+#define USB_QUIRK_NO_BOS BIT(17)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cbccedf32228..6c11aa0e103b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3221,8 +3221,6 @@ struct cfg80211_auth_request {
* if this is %NULL for a link, that link is not requested
* @elems: extra elements for the per-STA profile for this link
* @elems_len: length of the elements
- * @disabled: If set this link should be included during association etc. but it
- * should not be used until enabled by the AP MLD.
* @error: per-link error code, must be <= 0. If there is an error, then the
* operation as a whole must fail.
*/
@@ -3230,7 +3228,6 @@ struct cfg80211_assoc_link {
struct cfg80211_bss *bss;
const u8 *elems;
size_t elems_len;
- bool disabled;
int error;
};
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 58fd6e84f961..a7860c047503 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1402,7 +1402,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
-void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
+int snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index de6f6d25767c..869f97c9bf73 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -322,6 +322,7 @@
EM(rxrpc_call_put_kernel, "PUT kernel ") \
EM(rxrpc_call_put_poke, "PUT poke ") \
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_recvmsg_peek_nowait, "PUT peek-nwt") \
EM(rxrpc_call_put_release_recvmsg_q, "PUT rls-rcmq") \
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
@@ -340,6 +341,9 @@
EM(rxrpc_call_see_input, "SEE input ") \
EM(rxrpc_call_see_notify_released, "SEE nfy-rlsd") \
EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_call_see_recvmsg_requeue, "SEE recv-rqu") \
+ EM(rxrpc_call_see_recvmsg_requeue_first, "SEE recv-rqF") \
+ EM(rxrpc_call_see_recvmsg_requeue_move, "SEE recv-rqM") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
diff --git a/include/uapi/linux/dev_energymodel.h b/include/uapi/linux/dev_energymodel.h
new file mode 100644
index 000000000000..355d8885c9a0
--- /dev/null
+++ b/include/uapi/linux/dev_energymodel.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dev-energymodel.yaml */
+/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _UAPI_LINUX_DEV_ENERGYMODEL_H
+#define _UAPI_LINUX_DEV_ENERGYMODEL_H
+
+#define DEV_ENERGYMODEL_FAMILY_NAME "dev-energymodel"
+#define DEV_ENERGYMODEL_FAMILY_VERSION 1
+
+/**
+ * enum dev_energymodel_perf_state_flags
+ * @DEV_ENERGYMODEL_PERF_STATE_FLAGS_PERF_STATE_INEFFICIENT: The performance
+ * state is inefficient. There is in this perf-domain, another performance
+ * state with a higher frequency but a lower or equal power cost.
+ */
+enum dev_energymodel_perf_state_flags {
+ DEV_ENERGYMODEL_PERF_STATE_FLAGS_PERF_STATE_INEFFICIENT = 1,
+};
+
+/**
+ * enum dev_energymodel_perf_domain_flags
+ * @DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_MICROWATTS: The power values
+ * are in micro-Watts or some other scale.
+ * @DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip
+ * inefficient states when estimating energy consumption.
+ * @DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_ARTIFICIAL: The power values
+ * are artificial and might be created by platform missing real power
+ * information.
+ */
+enum dev_energymodel_perf_domain_flags {
+ DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_MICROWATTS = 1,
+ DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_SKIP_INEFFICIENCIES = 2,
+ DEV_ENERGYMODEL_PERF_DOMAIN_FLAGS_PERF_DOMAIN_ARTIFICIAL = 4,
+};
+
+enum {
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_PAD = 1,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_FLAGS,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_CPUS,
+
+ __DEV_ENERGYMODEL_A_PERF_DOMAIN_MAX,
+ DEV_ENERGYMODEL_A_PERF_DOMAIN_MAX = (__DEV_ENERGYMODEL_A_PERF_DOMAIN_MAX - 1)
+};
+
+enum {
+ DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID = 1,
+ DEV_ENERGYMODEL_A_PERF_TABLE_PERF_STATE,
+
+ __DEV_ENERGYMODEL_A_PERF_TABLE_MAX,
+ DEV_ENERGYMODEL_A_PERF_TABLE_MAX = (__DEV_ENERGYMODEL_A_PERF_TABLE_MAX - 1)
+};
+
+enum {
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD = 1,
+ DEV_ENERGYMODEL_A_PERF_STATE_PERFORMANCE,
+ DEV_ENERGYMODEL_A_PERF_STATE_FREQUENCY,
+ DEV_ENERGYMODEL_A_PERF_STATE_POWER,
+ DEV_ENERGYMODEL_A_PERF_STATE_COST,
+ DEV_ENERGYMODEL_A_PERF_STATE_FLAGS,
+
+ __DEV_ENERGYMODEL_A_PERF_STATE_MAX,
+ DEV_ENERGYMODEL_A_PERF_STATE_MAX = (__DEV_ENERGYMODEL_A_PERF_STATE_MAX - 1)
+};
+
+enum {
+ DEV_ENERGYMODEL_CMD_GET_PERF_DOMAINS = 1,
+ DEV_ENERGYMODEL_CMD_GET_PERF_TABLE,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_CREATED,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_UPDATED,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_DELETED,
+
+ __DEV_ENERGYMODEL_CMD_MAX,
+ DEV_ENERGYMODEL_CMD_MAX = (__DEV_ENERGYMODEL_CMD_MAX - 1)
+};
+
+#define DEV_ENERGYMODEL_MCGRP_EVENT "event"
+
+#endif /* _UAPI_LINUX_DEV_ENERGYMODEL_H */
diff --git a/include/uapi/linux/energy_model.h b/include/uapi/linux/energy_model.h
deleted file mode 100644
index 0bcad967854f..000000000000
--- a/include/uapi/linux/energy_model.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
-/* Do not edit directly, auto-generated from: */
-/* Documentation/netlink/specs/em.yaml */
-/* YNL-GEN uapi header */
-/* To regenerate run: tools/net/ynl/ynl-regen.sh */
-
-#ifndef _UAPI_LINUX_ENERGY_MODEL_H
-#define _UAPI_LINUX_ENERGY_MODEL_H
-
-#define EM_FAMILY_NAME "em"
-#define EM_FAMILY_VERSION 1
-
-enum {
- EM_A_PDS_PD = 1,
-
- __EM_A_PDS_MAX,
- EM_A_PDS_MAX = (__EM_A_PDS_MAX - 1)
-};
-
-enum {
- EM_A_PD_PAD = 1,
- EM_A_PD_PD_ID,
- EM_A_PD_FLAGS,
- EM_A_PD_CPUS,
-
- __EM_A_PD_MAX,
- EM_A_PD_MAX = (__EM_A_PD_MAX - 1)
-};
-
-enum {
- EM_A_PD_TABLE_PD_ID = 1,
- EM_A_PD_TABLE_PS,
-
- __EM_A_PD_TABLE_MAX,
- EM_A_PD_TABLE_MAX = (__EM_A_PD_TABLE_MAX - 1)
-};
-
-enum {
- EM_A_PS_PAD = 1,
- EM_A_PS_PERFORMANCE,
- EM_A_PS_FREQUENCY,
- EM_A_PS_POWER,
- EM_A_PS_COST,
- EM_A_PS_FLAGS,
-
- __EM_A_PS_MAX,
- EM_A_PS_MAX = (__EM_A_PS_MAX - 1)
-};
-
-enum {
- EM_CMD_GET_PDS = 1,
- EM_CMD_GET_PD_TABLE,
- EM_CMD_PD_CREATED,
- EM_CMD_PD_UPDATED,
- EM_CMD_PD_DELETED,
-
- __EM_CMD_MAX,
- EM_CMD_MAX = (__EM_CMD_MAX - 1)
-};
-
-#define EM_MCGRP_EVENT "event"
-
-#endif /* _UAPI_LINUX_ENERGY_MODEL_H */
diff --git a/include/uapi/linux/ext4.h b/include/uapi/linux/ext4.h
index 411dcc1e4a35..9c683991c32f 100644
--- a/include/uapi/linux/ext4.h
+++ b/include/uapi/linux/ext4.h
@@ -139,7 +139,7 @@ struct ext4_tune_sb_params {
__u32 clear_feature_incompat_mask;
__u32 clear_feature_ro_compat_mask;
__u8 mount_opts[64];
- __u8 pad[64];
+ __u8 pad[68];
};
#define EXT4_TUNE_FL_ERRORS_BEHAVIOR 0x00000001
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index f030adc462ee..75fd7f5e6cc3 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -216,6 +216,23 @@ struct landlock_net_port_attr {
* :manpage:`ftruncate(2)`, :manpage:`creat(2)`, or :manpage:`open(2)` with
* ``O_TRUNC``. This access right is available since the third version of the
* Landlock ABI.
+ * - %LANDLOCK_ACCESS_FS_IOCTL_DEV: Invoke :manpage:`ioctl(2)` commands on an opened
+ * character or block device.
+ *
+ * This access right applies to all `ioctl(2)` commands implemented by device
+ * drivers. However, the following common IOCTL commands continue to be
+ * invokable independent of the %LANDLOCK_ACCESS_FS_IOCTL_DEV right:
+ *
+ * * IOCTL commands targeting file descriptors (``FIOCLEX``, ``FIONCLEX``),
+ * * IOCTL commands targeting file descriptions (``FIONBIO``, ``FIOASYNC``),
+ * * IOCTL commands targeting file systems (``FIFREEZE``, ``FITHAW``,
+ * ``FIGETBSZ``, ``FS_IOC_GETFSUUID``, ``FS_IOC_GETFSSYSFSPATH``)
+ * * Some IOCTL commands which do not make sense when used with devices, but
+ * whose implementations are safe and return the right error codes
+ * (``FS_IOC_FIEMAP``, ``FICLONE``, ``FICLONERANGE``, ``FIDEDUPERANGE``)
+ *
+ * This access right is available since the fifth version of the Landlock
+ * ABI.
*
* Whether an opened file can be truncated with :manpage:`ftruncate(2)` or used
* with `ioctl(2)` is determined during :manpage:`open(2)`, in the same way as
@@ -275,26 +292,6 @@ struct landlock_net_port_attr {
* If multiple requirements are not met, the ``EACCES`` error code takes
* precedence over ``EXDEV``.
*
- * The following access right applies both to files and directories:
- *
- * - %LANDLOCK_ACCESS_FS_IOCTL_DEV: Invoke :manpage:`ioctl(2)` commands on an opened
- * character or block device.
- *
- * This access right applies to all `ioctl(2)` commands implemented by device
- * drivers. However, the following common IOCTL commands continue to be
- * invokable independent of the %LANDLOCK_ACCESS_FS_IOCTL_DEV right:
- *
- * * IOCTL commands targeting file descriptors (``FIOCLEX``, ``FIONCLEX``),
- * * IOCTL commands targeting file descriptions (``FIONBIO``, ``FIOASYNC``),
- * * IOCTL commands targeting file systems (``FIFREEZE``, ``FITHAW``,
- * ``FIGETBSZ``, ``FS_IOC_GETFSUUID``, ``FS_IOC_GETFSSYSFSPATH``)
- * * Some IOCTL commands which do not make sense when used with devices, but
- * whose implementations are safe and return the right error codes
- * (``FS_IOC_FIEMAP``, ``FICLONE``, ``FICLONERANGE``, ``FIDEDUPERANGE``)
- *
- * This access right is available since the fifth version of the Landlock
- * ABI.
- *
* .. warning::
*
* It is currently not possible to restrict some file-related actions
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 964e1c779cdd..7c12badf85dc 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2880,8 +2880,9 @@ enum nl80211_commands {
* index. If the userspace includes more RNR elements than number of
* MBSSID elements then these will be added in every EMA beacon.
*
- * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
- * disabled.
+ * @NL80211_ATTR_MLO_LINK_DISABLED: Unused. It was used to indicate that a link
+ * is disabled during association. However, the AP will send the
+ * information by including a TTLM in the association response.
*
* @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e.
* include BSSes that can only be used in restricted scenarios and/or
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 87a87396e940..b7a077c11c21 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3003,12 +3003,12 @@ static __cold void io_ring_exit_work(struct work_struct *work)
mutex_unlock(&ctx->uring_lock);
}
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- io_move_task_work_from_local(ctx);
-
/* The SQPOLL thread never reaches this path */
- while (io_uring_try_cancel_requests(ctx, NULL, true, false))
+ do {
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ io_move_task_work_from_local(ctx);
cond_resched();
+ } while (io_uring_try_cancel_requests(ctx, NULL, true, false));
if (ctx->sq_data) {
struct io_sq_data *sqd = ctx->sq_data;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 554a02ee298b..5f0d33b04910 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic process-grouping system.
*
@@ -20,10 +21,6 @@
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* ---------------------------------------------------
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 3e8cc34d8d50..c06e2e96f79d 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kernel/cpuset.c
*
@@ -16,10 +17,6 @@
* 2006 Rework by Paul Menage to use generic cgroups
* 2008 Rework of the scheduler domains and CPU hotplug handling
* by Max Krasnyansky
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
*/
#include "cpuset-internal.h"
diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
index 915b02f65980..817c33450fee 100644
--- a/kernel/cgroup/legacy_freezer.c
+++ b/kernel/cgroup/legacy_freezer.c
@@ -1,17 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
/*
* cgroup_freezer.c - control group freezer subsystem
*
* Copyright IBM Corporation, 2007
*
* Author : Cedric Le Goater <clg@fr.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/export.h>
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 26392badc36b..c5da29ad010c 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
return pool;
}
+#ifdef CONFIG_ZONE_DMA32
+#define has_managed_dma32 has_managed_zone(ZONE_DMA32)
+#else
+#define has_managed_dma32 false
+#endif
+
static int __init dma_atomic_pool_init(void)
{
int ret = 0;
@@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void)
}
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
- atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
+ /* All memory might be in the DMA zone(s) to begin with */
+ if (has_managed_zone(ZONE_NORMAL)) {
+ atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL);
- if (!atomic_pool_kernel)
- ret = -ENOMEM;
+ if (!atomic_pool_kernel)
+ ret = -ENOMEM;
+ }
if (has_managed_dma()) {
atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA);
if (!atomic_pool_dma)
ret = -ENOMEM;
}
- if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
+ if (has_managed_dma32) {
atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA32);
if (!atomic_pool_dma32)
@@ -224,11 +233,11 @@ postcore_initcall(dma_atomic_pool_init);
static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
{
if (prev == NULL) {
- if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
- return atomic_pool_dma32;
- if (atomic_pool_dma && (gfp & GFP_DMA))
- return atomic_pool_dma;
- return atomic_pool_kernel;
+ if (gfp & GFP_DMA)
+ return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel;
+ if (gfp & GFP_DMA32)
+ return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel;
+ return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma;
}
if (prev == atomic_pool_kernel)
return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
index 9dc51fab604f..d4482b6e3cae 100644
--- a/kernel/liveupdate/kexec_handover.c
+++ b/kernel/liveupdate/kexec_handover.c
@@ -460,27 +460,23 @@ static void __init deserialize_bitmap(unsigned int order,
}
}
-/* Return true if memory was deserizlied */
-static bool __init kho_mem_deserialize(const void *fdt)
+/* Returns physical address of the preserved memory map from FDT */
+static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
{
- struct khoser_mem_chunk *chunk;
const void *mem_ptr;
- u64 mem;
int len;
mem_ptr = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len);
if (!mem_ptr || len != sizeof(u64)) {
pr_err("failed to get preserved memory bitmaps\n");
- return false;
+ return 0;
}
- mem = get_unaligned((const u64 *)mem_ptr);
- chunk = mem ? phys_to_virt(mem) : NULL;
-
- /* No preserved physical pages were passed, no deserialization */
- if (!chunk)
- return false;
+ return get_unaligned((const u64 *)mem_ptr);
+}
+static void __init kho_mem_deserialize(struct khoser_mem_chunk *chunk)
+{
while (chunk) {
unsigned int i;
@@ -489,8 +485,6 @@ static bool __init kho_mem_deserialize(const void *fdt)
&chunk->bitmaps[i]);
chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
}
-
- return true;
}
/*
@@ -1253,6 +1247,7 @@ bool kho_finalized(void)
struct kho_in {
phys_addr_t fdt_phys;
phys_addr_t scratch_phys;
+ phys_addr_t mem_map_phys;
struct kho_debugfs dbg;
};
@@ -1434,12 +1429,10 @@ static void __init kho_release_scratch(void)
void __init kho_memory_init(void)
{
- if (kho_in.scratch_phys) {
+ if (kho_in.mem_map_phys) {
kho_scratch = phys_to_virt(kho_in.scratch_phys);
kho_release_scratch();
-
- if (!kho_mem_deserialize(kho_get_fdt()))
- kho_in.fdt_phys = 0;
+ kho_mem_deserialize(phys_to_virt(kho_in.mem_map_phys));
} else {
kho_reserve_scratch();
}
@@ -1448,8 +1441,9 @@ void __init kho_memory_init(void)
void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
phys_addr_t scratch_phys, u64 scratch_len)
{
- void *fdt = NULL;
struct kho_scratch *scratch = NULL;
+ phys_addr_t mem_map_phys;
+ void *fdt = NULL;
int err = 0;
unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
@@ -1475,6 +1469,12 @@ void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
goto out;
}
+ mem_map_phys = kho_get_mem_map_phys(fdt);
+ if (!mem_map_phys) {
+ err = -ENOENT;
+ goto out;
+ }
+
scratch = early_memremap(scratch_phys, scratch_len);
if (!scratch) {
pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
@@ -1515,6 +1515,7 @@ void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
kho_in.fdt_phys = fdt_phys;
kho_in.scratch_phys = scratch_phys;
+ kho_in.mem_map_phys = mem_map_phys;
kho_scratch_cnt = scratch_cnt;
pr_info("found kexec handover data.\n");
diff --git a/kernel/module/kmod.c b/kernel/module/kmod.c
index 25f253812512..a25dccdf7aa7 100644
--- a/kernel/module/kmod.c
+++ b/kernel/module/kmod.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kmod - the kernel module loader
*
diff --git a/kernel/panic.c b/kernel/panic.c
index 0d52210a9e2b..0c20fcaae98a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -131,7 +131,8 @@ static int proc_taint(const struct ctl_table *table, int write,
static int sysctl_panic_print_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- panic_print_deprecated();
+ if (write)
+ panic_print_deprecated();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
@@ -1014,7 +1015,6 @@ static int panic_print_set(const char *val, const struct kernel_param *kp)
static int panic_print_get(char *val, const struct kernel_param *kp)
{
- panic_print_deprecated();
return param_get_ulong(val, kp);
}
diff --git a/kernel/power/em_netlink.c b/kernel/power/em_netlink.c
index 4b85da138a06..5a611d3950fd 100644
--- a/kernel/power/em_netlink.c
+++ b/kernel/power/em_netlink.c
@@ -12,27 +12,35 @@
#include <linux/energy_model.h>
#include <net/sock.h>
#include <net/genetlink.h>
-#include <uapi/linux/energy_model.h>
+#include <uapi/linux/dev_energymodel.h>
#include "em_netlink.h"
#include "em_netlink_autogen.h"
-#define EM_A_PD_CPUS_LEN 256
-
/*************************** Command encoding ********************************/
+struct dump_ctx {
+ int idx;
+ int start;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+};
+
static int __em_nl_get_pd_size(struct em_perf_domain *pd, void *data)
{
- char cpus_buf[EM_A_PD_CPUS_LEN];
+ int nr_cpus, msg_sz, cpus_sz;
int *tot_msg_sz = data;
- int msg_sz, cpus_sz;
- cpus_sz = snprintf(cpus_buf, sizeof(cpus_buf), "%*pb",
- cpumask_pr_args(to_cpumask(pd->cpus)));
+ nr_cpus = cpumask_weight(to_cpumask(pd->cpus));
+ cpus_sz = nla_total_size_64bit(sizeof(u64)) * nr_cpus;
- msg_sz = nla_total_size(0) + /* EM_A_PDS_PD */
- nla_total_size(sizeof(u32)) + /* EM_A_PD_PD_ID */
- nla_total_size_64bit(sizeof(u64)) + /* EM_A_PD_FLAGS */
- nla_total_size(cpus_sz); /* EM_A_PD_CPUS */
+ msg_sz = nla_total_size(0) +
+ /* DEV_ENERGYMODEL_A_PERF_DOMAINS_PERF_DOMAIN */
+ nla_total_size(sizeof(u32)) +
+ /* DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID */
+ nla_total_size_64bit(sizeof(u64)) +
+ /* DEV_ENERGYMODEL_A_PERF_DOMAIN_FLAGS */
+ nla_total_size(cpus_sz);
+ /* DEV_ENERGYMODEL_A_PERF_DOMAIN_CPUS */
*tot_msg_sz += nlmsg_total_size(genlmsg_msg_size(msg_sz));
return 0;
@@ -40,56 +48,80 @@ static int __em_nl_get_pd_size(struct em_perf_domain *pd, void *data)
static int __em_nl_get_pd(struct em_perf_domain *pd, void *data)
{
- char cpus_buf[EM_A_PD_CPUS_LEN];
struct sk_buff *msg = data;
- struct nlattr *entry;
-
- entry = nla_nest_start(msg, EM_A_PDS_PD);
- if (!entry)
- goto out_cancel_nest;
+ struct cpumask *cpumask;
+ int cpu;
- if (nla_put_u32(msg, EM_A_PD_PD_ID, pd->id))
+ if (nla_put_u32(msg, DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID,
+ pd->id))
goto out_cancel_nest;
- if (nla_put_u64_64bit(msg, EM_A_PD_FLAGS, pd->flags, EM_A_PD_PAD))
+ if (nla_put_u64_64bit(msg, DEV_ENERGYMODEL_A_PERF_DOMAIN_FLAGS,
+ pd->flags, DEV_ENERGYMODEL_A_PERF_DOMAIN_PAD))
goto out_cancel_nest;
- snprintf(cpus_buf, sizeof(cpus_buf), "%*pb",
- cpumask_pr_args(to_cpumask(pd->cpus)));
- if (nla_put_string(msg, EM_A_PD_CPUS, cpus_buf))
- goto out_cancel_nest;
-
- nla_nest_end(msg, entry);
+ cpumask = to_cpumask(pd->cpus);
+ for_each_cpu(cpu, cpumask) {
+ if (nla_put_u64_64bit(msg, DEV_ENERGYMODEL_A_PERF_DOMAIN_CPUS,
+ cpu, DEV_ENERGYMODEL_A_PERF_DOMAIN_PAD))
+ goto out_cancel_nest;
+ }
return 0;
out_cancel_nest:
- nla_nest_cancel(msg, entry);
-
return -EMSGSIZE;
}
-int em_nl_get_pds_doit(struct sk_buff *skb, struct genl_info *info)
+static int __em_nl_get_pd_for_dump(struct em_perf_domain *pd, void *data)
{
- struct sk_buff *msg;
+ const struct genl_info *info;
+ struct dump_ctx *ctx = data;
void *hdr;
+ int ret;
+
+ if (ctx->idx++ < ctx->start)
+ return 0;
+
+ info = genl_info_dump(ctx->cb);
+ hdr = genlmsg_iput(ctx->skb, info);
+ if (!hdr) {
+ genlmsg_cancel(ctx->skb, hdr);
+ return -EMSGSIZE;
+ }
+
+ ret = __em_nl_get_pd(pd, ctx->skb);
+ genlmsg_end(ctx->skb, hdr);
+ return ret;
+}
+
+int dev_energymodel_nl_get_perf_domains_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ int id, ret = -EMSGSIZE, msg_sz = 0;
int cmd = info->genlhdr->cmd;
- int ret = -EMSGSIZE, msg_sz = 0;
+ struct em_perf_domain *pd;
+ struct sk_buff *msg;
+ void *hdr;
+
+ if (!info->attrs[DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID])
+ return -EINVAL;
- for_each_em_perf_domain(__em_nl_get_pd_size, &msg_sz);
+ id = nla_get_u32(info->attrs[DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID]);
+ pd = em_perf_domain_get_by_id(id);
+ __em_nl_get_pd_size(pd, &msg_sz);
msg = genlmsg_new(msg_sz, GFP_KERNEL);
if (!msg)
return -ENOMEM;
- hdr = genlmsg_put_reply(msg, info, &em_nl_family, 0, cmd);
+ hdr = genlmsg_put_reply(msg, info, &dev_energymodel_nl_family, 0, cmd);
if (!hdr)
goto out_free_msg;
- ret = for_each_em_perf_domain(__em_nl_get_pd, msg);
+ ret = __em_nl_get_pd(pd, msg);
if (ret)
goto out_cancel_msg;
-
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -98,19 +130,31 @@ out_cancel_msg:
genlmsg_cancel(msg, hdr);
out_free_msg:
nlmsg_free(msg);
-
return ret;
}
+int dev_energymodel_nl_get_perf_domains_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct dump_ctx ctx = {
+ .idx = 0,
+ .start = cb->args[0],
+ .skb = skb,
+ .cb = cb,
+ };
+
+ return for_each_em_perf_domain(__em_nl_get_pd_for_dump, &ctx);
+}
+
static struct em_perf_domain *__em_nl_get_pd_table_id(struct nlattr **attrs)
{
struct em_perf_domain *pd;
int id;
- if (!attrs[EM_A_PD_TABLE_PD_ID])
+ if (!attrs[DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID])
return NULL;
- id = nla_get_u32(attrs[EM_A_PD_TABLE_PD_ID]);
+ id = nla_get_u32(attrs[DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID]);
pd = em_perf_domain_get_by_id(id);
return pd;
}
@@ -119,25 +163,34 @@ static int __em_nl_get_pd_table_size(const struct em_perf_domain *pd)
{
int id_sz, ps_sz;
- id_sz = nla_total_size(sizeof(u32)); /* EM_A_PD_TABLE_PD_ID */
- ps_sz = nla_total_size(0) + /* EM_A_PD_TABLE_PS */
- nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_PERFORMANCE */
- nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_FREQUENCY */
- nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_POWER */
- nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_COST */
- nla_total_size_64bit(sizeof(u64)); /* EM_A_PS_FLAGS */
+ id_sz = nla_total_size(sizeof(u32));
+ /* DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID */
+ ps_sz = nla_total_size(0) +
+ /* DEV_ENERGYMODEL_A_PERF_TABLE_PERF_STATE */
+ nla_total_size_64bit(sizeof(u64)) +
+ /* DEV_ENERGYMODEL_A_PERF_STATE_PERFORMANCE */
+ nla_total_size_64bit(sizeof(u64)) +
+ /* DEV_ENERGYMODEL_A_PERF_STATE_FREQUENCY */
+ nla_total_size_64bit(sizeof(u64)) +
+ /* DEV_ENERGYMODEL_A_PERF_STATE_POWER */
+ nla_total_size_64bit(sizeof(u64)) +
+ /* DEV_ENERGYMODEL_A_PERF_STATE_COST */
+ nla_total_size_64bit(sizeof(u64));
+ /* DEV_ENERGYMODEL_A_PERF_STATE_FLAGS */
ps_sz *= pd->nr_perf_states;
return nlmsg_total_size(genlmsg_msg_size(id_sz + ps_sz));
}
-static int __em_nl_get_pd_table(struct sk_buff *msg, const struct em_perf_domain *pd)
+static
+int __em_nl_get_pd_table(struct sk_buff *msg, const struct em_perf_domain *pd)
{
struct em_perf_state *table, *ps;
struct nlattr *entry;
int i;
- if (nla_put_u32(msg, EM_A_PD_TABLE_PD_ID, pd->id))
+ if (nla_put_u32(msg, DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID,
+ pd->id))
goto out_err;
rcu_read_lock();
@@ -146,24 +199,35 @@ static int __em_nl_get_pd_table(struct sk_buff *msg, const struct em_perf_domain
for (i = 0; i < pd->nr_perf_states; i++) {
ps = &table[i];
- entry = nla_nest_start(msg, EM_A_PD_TABLE_PS);
+ entry = nla_nest_start(msg,
+ DEV_ENERGYMODEL_A_PERF_TABLE_PERF_STATE);
if (!entry)
goto out_unlock_ps;
- if (nla_put_u64_64bit(msg, EM_A_PS_PERFORMANCE,
- ps->performance, EM_A_PS_PAD))
+ if (nla_put_u64_64bit(msg,
+ DEV_ENERGYMODEL_A_PERF_STATE_PERFORMANCE,
+ ps->performance,
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD))
goto out_cancel_ps_nest;
- if (nla_put_u64_64bit(msg, EM_A_PS_FREQUENCY,
- ps->frequency, EM_A_PS_PAD))
+ if (nla_put_u64_64bit(msg,
+ DEV_ENERGYMODEL_A_PERF_STATE_FREQUENCY,
+ ps->frequency,
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD))
goto out_cancel_ps_nest;
- if (nla_put_u64_64bit(msg, EM_A_PS_POWER,
- ps->power, EM_A_PS_PAD))
+ if (nla_put_u64_64bit(msg,
+ DEV_ENERGYMODEL_A_PERF_STATE_POWER,
+ ps->power,
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD))
goto out_cancel_ps_nest;
- if (nla_put_u64_64bit(msg, EM_A_PS_COST,
- ps->cost, EM_A_PS_PAD))
+ if (nla_put_u64_64bit(msg,
+ DEV_ENERGYMODEL_A_PERF_STATE_COST,
+ ps->cost,
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD))
goto out_cancel_ps_nest;
- if (nla_put_u64_64bit(msg, EM_A_PS_FLAGS,
- ps->flags, EM_A_PS_PAD))
+ if (nla_put_u64_64bit(msg,
+ DEV_ENERGYMODEL_A_PERF_STATE_FLAGS,
+ ps->flags,
+ DEV_ENERGYMODEL_A_PERF_STATE_PAD))
goto out_cancel_ps_nest;
nla_nest_end(msg, entry);
@@ -179,7 +243,8 @@ out_err:
return -EMSGSIZE;
}
-int em_nl_get_pd_table_doit(struct sk_buff *skb, struct genl_info *info)
+int dev_energymodel_nl_get_perf_table_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
int cmd = info->genlhdr->cmd;
int msg_sz, ret = -EMSGSIZE;
@@ -197,7 +262,7 @@ int em_nl_get_pd_table_doit(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- hdr = genlmsg_put_reply(msg, info, &em_nl_family, 0, cmd);
+ hdr = genlmsg_put_reply(msg, info, &dev_energymodel_nl_family, 0, cmd);
if (!hdr)
goto out_free_msg;
@@ -221,7 +286,7 @@ static void __em_notify_pd_table(const struct em_perf_domain *pd, int ntf_type)
int msg_sz, ret = -EMSGSIZE;
void *hdr;
- if (!genl_has_listeners(&em_nl_family, &init_net, EM_NLGRP_EVENT))
+ if (!genl_has_listeners(&dev_energymodel_nl_family, &init_net, DEV_ENERGYMODEL_NLGRP_EVENT))
return;
msg_sz = __em_nl_get_pd_table_size(pd);
@@ -230,7 +295,7 @@ static void __em_notify_pd_table(const struct em_perf_domain *pd, int ntf_type)
if (!msg)
return;
- hdr = genlmsg_put(msg, 0, 0, &em_nl_family, 0, ntf_type);
+ hdr = genlmsg_put(msg, 0, 0, &dev_energymodel_nl_family, 0, ntf_type);
if (!hdr)
goto out_free_msg;
@@ -240,28 +305,28 @@ static void __em_notify_pd_table(const struct em_perf_domain *pd, int ntf_type)
genlmsg_end(msg, hdr);
- genlmsg_multicast(&em_nl_family, msg, 0, EM_NLGRP_EVENT, GFP_KERNEL);
+ genlmsg_multicast(&dev_energymodel_nl_family, msg, 0,
+ DEV_ENERGYMODEL_NLGRP_EVENT, GFP_KERNEL);
return;
out_free_msg:
nlmsg_free(msg);
- return;
}
void em_notify_pd_created(const struct em_perf_domain *pd)
{
- __em_notify_pd_table(pd, EM_CMD_PD_CREATED);
+ __em_notify_pd_table(pd, DEV_ENERGYMODEL_CMD_PERF_DOMAIN_CREATED);
}
void em_notify_pd_updated(const struct em_perf_domain *pd)
{
- __em_notify_pd_table(pd, EM_CMD_PD_UPDATED);
+ __em_notify_pd_table(pd, DEV_ENERGYMODEL_CMD_PERF_DOMAIN_UPDATED);
}
static int __em_notify_pd_deleted_size(const struct em_perf_domain *pd)
{
- int id_sz = nla_total_size(sizeof(u32)); /* EM_A_PD_TABLE_PD_ID */
+ int id_sz = nla_total_size(sizeof(u32)); /* DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID */
return nlmsg_total_size(genlmsg_msg_size(id_sz));
}
@@ -272,7 +337,8 @@ void em_notify_pd_deleted(const struct em_perf_domain *pd)
void *hdr;
int msg_sz;
- if (!genl_has_listeners(&em_nl_family, &init_net, EM_NLGRP_EVENT))
+ if (!genl_has_listeners(&dev_energymodel_nl_family, &init_net,
+ DEV_ENERGYMODEL_NLGRP_EVENT))
return;
msg_sz = __em_notify_pd_deleted_size(pd);
@@ -281,28 +347,29 @@ void em_notify_pd_deleted(const struct em_perf_domain *pd)
if (!msg)
return;
- hdr = genlmsg_put(msg, 0, 0, &em_nl_family, 0, EM_CMD_PD_DELETED);
+ hdr = genlmsg_put(msg, 0, 0, &dev_energymodel_nl_family, 0,
+ DEV_ENERGYMODEL_CMD_PERF_DOMAIN_DELETED);
if (!hdr)
goto out_free_msg;
- if (nla_put_u32(msg, EM_A_PD_TABLE_PD_ID, pd->id)) {
+ if (nla_put_u32(msg, DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID,
+ pd->id))
goto out_free_msg;
- }
genlmsg_end(msg, hdr);
- genlmsg_multicast(&em_nl_family, msg, 0, EM_NLGRP_EVENT, GFP_KERNEL);
+ genlmsg_multicast(&dev_energymodel_nl_family, msg, 0,
+ DEV_ENERGYMODEL_NLGRP_EVENT, GFP_KERNEL);
return;
out_free_msg:
nlmsg_free(msg);
- return;
}
/**************************** Initialization *********************************/
static int __init em_netlink_init(void)
{
- return genl_register_family(&em_nl_family);
+ return genl_register_family(&dev_energymodel_nl_family);
}
postcore_initcall(em_netlink_init);
diff --git a/kernel/power/em_netlink_autogen.c b/kernel/power/em_netlink_autogen.c
index ceb3b2bb6ebe..fedd473e4244 100644
--- a/kernel/power/em_netlink_autogen.c
+++ b/kernel/power/em_netlink_autogen.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
/* Do not edit directly, auto-generated from: */
-/* Documentation/netlink/specs/em.yaml */
+/* Documentation/netlink/specs/dev-energymodel.yaml */
/* YNL-GEN kernel source */
/* To regenerate run: tools/net/ynl/ynl-regen.sh */
@@ -9,41 +9,53 @@
#include "em_netlink_autogen.h"
-#include <uapi/linux/energy_model.h>
+#include <uapi/linux/dev_energymodel.h>
-/* EM_CMD_GET_PD_TABLE - do */
-static const struct nla_policy em_get_pd_table_nl_policy[EM_A_PD_TABLE_PD_ID + 1] = {
- [EM_A_PD_TABLE_PD_ID] = { .type = NLA_U32, },
+/* DEV_ENERGYMODEL_CMD_GET_PERF_DOMAINS - do */
+static const struct nla_policy dev_energymodel_get_perf_domains_nl_policy[DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID + 1] = {
+ [DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID] = { .type = NLA_U32, },
};
-/* Ops table for em */
-static const struct genl_split_ops em_nl_ops[] = {
+/* DEV_ENERGYMODEL_CMD_GET_PERF_TABLE - do */
+static const struct nla_policy dev_energymodel_get_perf_table_nl_policy[DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID + 1] = {
+ [DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID] = { .type = NLA_U32, },
+};
+
+/* Ops table for dev_energymodel */
+static const struct genl_split_ops dev_energymodel_nl_ops[] = {
+ {
+ .cmd = DEV_ENERGYMODEL_CMD_GET_PERF_DOMAINS,
+ .doit = dev_energymodel_nl_get_perf_domains_doit,
+ .policy = dev_energymodel_get_perf_domains_nl_policy,
+ .maxattr = DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID,
+ .flags = GENL_CMD_CAP_DO,
+ },
{
- .cmd = EM_CMD_GET_PDS,
- .doit = em_nl_get_pds_doit,
- .flags = GENL_CMD_CAP_DO,
+ .cmd = DEV_ENERGYMODEL_CMD_GET_PERF_DOMAINS,
+ .dumpit = dev_energymodel_nl_get_perf_domains_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
},
{
- .cmd = EM_CMD_GET_PD_TABLE,
- .doit = em_nl_get_pd_table_doit,
- .policy = em_get_pd_table_nl_policy,
- .maxattr = EM_A_PD_TABLE_PD_ID,
+ .cmd = DEV_ENERGYMODEL_CMD_GET_PERF_TABLE,
+ .doit = dev_energymodel_nl_get_perf_table_doit,
+ .policy = dev_energymodel_get_perf_table_nl_policy,
+ .maxattr = DEV_ENERGYMODEL_A_PERF_TABLE_PERF_DOMAIN_ID,
.flags = GENL_CMD_CAP_DO,
},
};
-static const struct genl_multicast_group em_nl_mcgrps[] = {
- [EM_NLGRP_EVENT] = { "event", },
+static const struct genl_multicast_group dev_energymodel_nl_mcgrps[] = {
+ [DEV_ENERGYMODEL_NLGRP_EVENT] = { "event", },
};
-struct genl_family em_nl_family __ro_after_init = {
- .name = EM_FAMILY_NAME,
- .version = EM_FAMILY_VERSION,
+struct genl_family dev_energymodel_nl_family __ro_after_init = {
+ .name = DEV_ENERGYMODEL_FAMILY_NAME,
+ .version = DEV_ENERGYMODEL_FAMILY_VERSION,
.netnsok = true,
.parallel_ops = true,
.module = THIS_MODULE,
- .split_ops = em_nl_ops,
- .n_split_ops = ARRAY_SIZE(em_nl_ops),
- .mcgrps = em_nl_mcgrps,
- .n_mcgrps = ARRAY_SIZE(em_nl_mcgrps),
+ .split_ops = dev_energymodel_nl_ops,
+ .n_split_ops = ARRAY_SIZE(dev_energymodel_nl_ops),
+ .mcgrps = dev_energymodel_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(dev_energymodel_nl_mcgrps),
};
diff --git a/kernel/power/em_netlink_autogen.h b/kernel/power/em_netlink_autogen.h
index 140ab548103c..5caf2f7e18a5 100644
--- a/kernel/power/em_netlink_autogen.h
+++ b/kernel/power/em_netlink_autogen.h
@@ -1,24 +1,28 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
-/* Documentation/netlink/specs/em.yaml */
+/* Documentation/netlink/specs/dev-energymodel.yaml */
/* YNL-GEN kernel header */
/* To regenerate run: tools/net/ynl/ynl-regen.sh */
-#ifndef _LINUX_EM_GEN_H
-#define _LINUX_EM_GEN_H
+#ifndef _LINUX_DEV_ENERGYMODEL_GEN_H
+#define _LINUX_DEV_ENERGYMODEL_GEN_H
#include <net/netlink.h>
#include <net/genetlink.h>
-#include <uapi/linux/energy_model.h>
+#include <uapi/linux/dev_energymodel.h>
-int em_nl_get_pds_doit(struct sk_buff *skb, struct genl_info *info);
-int em_nl_get_pd_table_doit(struct sk_buff *skb, struct genl_info *info);
+int dev_energymodel_nl_get_perf_domains_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int dev_energymodel_nl_get_perf_domains_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int dev_energymodel_nl_get_perf_table_doit(struct sk_buff *skb,
+ struct genl_info *info);
enum {
- EM_NLGRP_EVENT,
+ DEV_ENERGYMODEL_NLGRP_EVENT,
};
-extern struct genl_family em_nl_family;
+extern struct genl_family dev_energymodel_nl_family;
-#endif /* _LINUX_EM_GEN_H */
+#endif /* _LINUX_DEV_ENERGYMODEL_GEN_H */
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 11af9f64aa82..5b055cbe5341 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -449,8 +449,10 @@ static int em_create_pd(struct device *dev, int nr_states,
INIT_LIST_HEAD(&pd->node);
id = ida_alloc(&em_pd_ida, GFP_KERNEL);
- if (id < 0)
- return -ENOMEM;
+ if (id < 0) {
+ kfree(pd);
+ return id;
+ }
pd->id = id;
em_table = em_table_alloc(pd);
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index 3fa403f9831f..32fc12e53675 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -1557,18 +1557,27 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
while (nbcon_seq_read(con) < stop_seq) {
- if (!nbcon_context_try_acquire(ctxt, false))
- return -EPERM;
-
/*
- * nbcon_emit_next_record() returns false when the console was
- * handed over or taken over. In both cases the context is no
- * longer valid.
+ * Atomic flushing does not use console driver synchronization
+ * (i.e. it does not hold the port lock for uart consoles).
+ * Therefore IRQs must be disabled to avoid being interrupted
+ * and then calling into a driver that will deadlock trying
+ * to acquire console ownership.
*/
- if (!nbcon_emit_next_record(&wctxt, true))
- return -EAGAIN;
+ scoped_guard(irqsave) {
+ if (!nbcon_context_try_acquire(ctxt, false))
+ return -EPERM;
- nbcon_context_release(ctxt);
+ /*
+ * nbcon_emit_next_record() returns false when
+ * the console was handed over or taken over.
+ * In both cases the context is no longer valid.
+ */
+ if (!nbcon_emit_next_record(&wctxt, true))
+ return -EAGAIN;
+
+ nbcon_context_release(ctxt);
+ }
if (!ctxt->backlog) {
/* Are there reserved but not yet finalized records? */
@@ -1595,22 +1604,11 @@ static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
{
struct console_flush_type ft;
- unsigned long flags;
int err;
again:
- /*
- * Atomic flushing does not use console driver synchronization (i.e.
- * it does not hold the port lock for uart consoles). Therefore IRQs
- * must be disabled to avoid being interrupted and then calling into
- * a driver that will deadlock trying to acquire console ownership.
- */
- local_irq_save(flags);
-
err = __nbcon_atomic_flush_pending_con(con, stop_seq);
- local_irq_restore(flags);
-
/*
* If there was a new owner (-EPERM, -EAGAIN), that context is
* responsible for completing.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 60afadb6eede..045f83ad261e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4950,9 +4950,13 @@ struct balance_callback *splice_balance_callbacks(struct rq *rq)
return __splice_balance_callbacks(rq, true);
}
-static void __balance_callbacks(struct rq *rq)
+void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
{
+ if (rf)
+ rq_unpin_lock(rq, rf);
do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
+ if (rf)
+ rq_repin_lock(rq, rf);
}
void balance_callbacks(struct rq *rq, struct balance_callback *head)
@@ -4991,7 +4995,7 @@ static inline void finish_lock_switch(struct rq *rq)
* prev into current:
*/
spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
- __balance_callbacks(rq);
+ __balance_callbacks(rq, NULL);
raw_spin_rq_unlock_irq(rq);
}
@@ -6867,7 +6871,7 @@ keep_resched:
proxy_tag_curr(rq, next);
rq_unpin_lock(rq, &rf);
- __balance_callbacks(rq);
+ __balance_callbacks(rq, NULL);
raw_spin_rq_unlock_irq(rq);
}
trace_sched_exit_tp(is_switch);
@@ -7316,7 +7320,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
trace_sched_pi_setprio(p, pi_task);
oldprio = p->prio;
- if (oldprio == prio)
+ if (oldprio == prio && !dl_prio(prio))
queue_flag &= ~DEQUEUE_MOVE;
prev_class = p->sched_class;
@@ -7362,9 +7366,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
out_unlock:
/* Caller holds task_struct::pi_lock, IRQs are still disabled */
- rq_unpin_lock(rq, &rf);
- __balance_callbacks(rq);
- rq_repin_lock(rq, &rf);
+ __balance_callbacks(rq, &rf);
__task_rq_unlock(rq, p, &rf);
}
#endif /* CONFIG_RT_MUTEXES */
@@ -9124,6 +9126,8 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
if (resched)
resched_curr(rq);
+
+ __balance_callbacks(rq, &rq_guard.rf);
}
static struct cgroup_subsys_state *
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 319439fe1870..c509f2e7d69d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -752,8 +752,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
- update_rq_clock(rq);
-
WARN_ON(is_dl_boosted(dl_se));
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
@@ -1420,7 +1418,7 @@ update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, int
static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
{
- bool idle = rq->curr == rq->idle;
+ bool idle = idle_rq(rq);
s64 scaled_delta_exec;
if (unlikely(delta_exec <= 0)) {
@@ -1603,8 +1601,8 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
* | 8 | B:zero_laxity-wait | | |
* | | | <---+ |
* | +--------------------------------+ |
- * | | ^ ^ 2 |
- * | | 7 | 2 +--------------------+
+ * | | ^ ^ 2 |
+ * | | 7 | 2, 1 +----------------+
* | v |
* | +-------------+ |
* +-- | C:idle-wait | -+
@@ -1649,8 +1647,11 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
* dl_defer_idle = 0
*
*
- * [1] A->B, A->D
+ * [1] A->B, A->D, C->B
* dl_server_start()
+ * dl_defer_idle = 0;
+ * if (dl_server_active)
+ * return; // [B]
* dl_server_active = 1;
* enqueue_dl_entity()
* update_dl_entity(WAKEUP)
@@ -1759,6 +1760,7 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
* "B:zero_laxity-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"]
* "B:zero_laxity-wait" -> "D:running" [label="3:dl_server_timer"]
* "C:idle-wait" -> "A:init" [label="8:dl_server_timer"]
+ * "C:idle-wait" -> "B:zero_laxity-wait" [label="1:dl_server_start"]
* "C:idle-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
* "C:idle-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"]
* "D:running" -> "A:init" [label="4:pick_task_dl"]
@@ -1784,6 +1786,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
{
struct rq *rq = dl_se->rq;
+ dl_se->dl_defer_idle = 0;
if (!dl_server(dl_se) || dl_se->dl_server_active)
return;
@@ -1834,6 +1837,7 @@ void sched_init_dl_servers(void)
rq = cpu_rq(cpu);
guard(rq_lock_irq)(rq);
+ update_rq_clock(rq);
dl_se = &rq->fair_server;
@@ -2210,7 +2214,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
update_dl_entity(dl_se);
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se);
- } else if ((flags & ENQUEUE_RESTORE) &&
+ } else if ((flags & ENQUEUE_MOVE) &&
!is_dl_boosted(dl_se) &&
dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
setup_new_dl_entity(dl_se);
@@ -3154,7 +3158,7 @@ void dl_add_task_root_domain(struct task_struct *p)
struct rq *rq;
struct dl_bw *dl_b;
unsigned int cpu;
- struct cpumask *msk = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
+ struct cpumask *msk;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
if (!dl_task(p) || dl_entity_is_special(&p->dl)) {
@@ -3162,20 +3166,12 @@ void dl_add_task_root_domain(struct task_struct *p)
return;
}
- /*
- * Get an active rq, whose rq->rd traces the correct root
- * domain.
- * Ideally this would be under cpuset reader lock until rq->rd is
- * fetched. However, sleepable locks cannot nest inside pi_lock, so we
- * rely on the caller of dl_add_task_root_domain() holds 'cpuset_mutex'
- * to guarantee the CPU stays in the cpuset.
- */
+ msk = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
dl_get_task_effective_cpus(p, msk);
cpu = cpumask_first_and(cpu_active_mask, msk);
BUG_ON(cpu >= nr_cpu_ids);
rq = cpu_rq(cpu);
dl_b = &rq->rd->dl_bw;
- /* End of fetching rd */
raw_spin_lock(&dl_b->lock);
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
@@ -3299,6 +3295,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
static u64 get_prio_dl(struct rq *rq, struct task_struct *p)
{
+ /*
+ * Make sure to update current so we don't return a stale value.
+ */
+ if (task_current_donor(rq, p))
+ update_curr_dl(rq);
+
return p->dl.deadline;
}
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 8f6d8d7f895c..afe28c04d5aa 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -545,6 +545,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
{
if (iter->locked_task) {
+ __balance_callbacks(iter->rq, &iter->rf);
task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
iter->locked_task = NULL;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d30cca6870f5..93fce4bbff5e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1364,6 +1364,28 @@ static inline u32 sched_rng(void)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
+static inline bool idle_rq(struct rq *rq)
+{
+ return rq->curr == rq->idle && !rq->nr_running && !rq->ttwu_pending;
+}
+
+/**
+ * available_idle_cpu - is a given CPU idle for enqueuing work.
+ * @cpu: the CPU in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+static inline bool available_idle_cpu(int cpu)
+{
+ if (!idle_rq(cpu_rq(cpu)))
+ return 0;
+
+ if (vcpu_is_preempted(cpu))
+ return 0;
+
+ return 1;
+}
+
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline void rq_set_donor(struct rq *rq, struct task_struct *t)
{
@@ -2366,7 +2388,8 @@ extern const u32 sched_prio_to_wmult[40];
* should preserve as much state as possible.
*
* MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
- * in the runqueue.
+ * in the runqueue. IOW the priority is allowed to change. Callers
+ * must expect to deal with balance callbacks.
*
* NOCLOCK - skip the update_rq_clock() (avoids double updates)
*
@@ -3947,6 +3970,8 @@ extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
+
+extern void __balance_callbacks(struct rq *rq, struct rq_flags *rf);
extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
/*
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 0496dc29ed0f..6f10db3646e7 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -180,35 +180,7 @@ int task_prio(const struct task_struct *p)
*/
int idle_cpu(int cpu)
{
- struct rq *rq = cpu_rq(cpu);
-
- if (rq->curr != rq->idle)
- return 0;
-
- if (rq->nr_running)
- return 0;
-
- if (rq->ttwu_pending)
- return 0;
-
- return 1;
-}
-
-/**
- * available_idle_cpu - is a given CPU idle for enqueuing work.
- * @cpu: the CPU in question.
- *
- * Return: 1 if the CPU is currently idle. 0 otherwise.
- */
-int available_idle_cpu(int cpu)
-{
- if (!idle_cpu(cpu))
- return 0;
-
- if (vcpu_is_preempted(cpu))
- return 0;
-
- return 1;
+ return idle_rq(cpu_rq(cpu));
}
/**
@@ -667,7 +639,7 @@ change:
* itself.
*/
newprio = rt_effective_prio(p, newprio);
- if (newprio == oldprio)
+ if (newprio == oldprio && !dl_prio(newprio))
queue_flags &= ~DEQUEUE_MOVE;
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bdb30cc5e873..0e4bc1ca15ff 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -913,7 +913,7 @@ static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base,
return true;
/* Extra check for softirq clock bases */
- if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT)
+ if (base->index < HRTIMER_BASE_MONOTONIC_SOFT)
continue;
if (cpu_base->softirq_activated)
continue;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ef2d5dca6f70..aa758efc3731 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1148,7 +1148,6 @@ struct ftrace_page {
};
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
-#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
@@ -3834,7 +3833,8 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
return 0;
}
-static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+static int ftrace_allocate_records(struct ftrace_page *pg, int count,
+ unsigned long *num_pages)
{
int order;
int pages;
@@ -3844,7 +3844,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return -EINVAL;
/* We want to fill as much as possible, with no empty pages */
- pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
+ pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE);
order = fls(pages) - 1;
again:
@@ -3859,6 +3859,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
}
ftrace_number_of_pages += 1 << order;
+ *num_pages += 1 << order;
ftrace_number_of_groups++;
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
@@ -3887,12 +3888,14 @@ static void ftrace_free_pages(struct ftrace_page *pages)
}
static struct ftrace_page *
-ftrace_allocate_pages(unsigned long num_to_init)
+ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages)
{
struct ftrace_page *start_pg;
struct ftrace_page *pg;
int cnt;
+ *num_pages = 0;
+
if (!num_to_init)
return NULL;
@@ -3906,7 +3909,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
* waste as little space as possible.
*/
for (;;) {
- cnt = ftrace_allocate_records(pg, num_to_init);
+ cnt = ftrace_allocate_records(pg, num_to_init, num_pages);
if (cnt < 0)
goto free_pages;
@@ -7192,8 +7195,6 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
- pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
-
/*
* Sorting mcount in vmlinux at build time depend on
* CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
@@ -7206,7 +7207,7 @@ static int ftrace_process_locs(struct module *mod,
test_is_sorted(start, count);
}
- start_pg = ftrace_allocate_pages(count);
+ start_pg = ftrace_allocate_pages(count, &pages);
if (!start_pg)
return -ENOMEM;
@@ -7305,27 +7306,27 @@ static int ftrace_process_locs(struct module *mod,
/* We should have used all pages unless we skipped some */
if (pg_unuse) {
unsigned long pg_remaining, remaining = 0;
- unsigned long skip;
+ long skip;
/* Count the number of entries unused and compare it to skipped. */
- pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
+ pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index;
if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) {
skip = skipped - pg_remaining;
- for (pg = pg_unuse; pg; pg = pg->next)
+ for (pg = pg_unuse; pg && skip > 0; pg = pg->next) {
remaining += 1 << pg->order;
+ skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE;
+ }
pages -= remaining;
- skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE);
-
/*
* Check to see if the number of pages remaining would
* just fit the number of entries skipped.
*/
- WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped",
+ WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped",
remaining, skipped);
}
/* Need to synchronize with ftrace_location_range() */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0685e3a8aa0a..366122f4a0f8 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -71,7 +71,7 @@ unsigned int __read_mostly hardlockup_panic =
* hard lockup is detected, it could be task, memory, lock etc.
* Refer include/linux/sys_info.h for detailed bit definition.
*/
-static unsigned long hardlockup_si_mask;
+unsigned long hardlockup_si_mask;
#ifdef CONFIG_SYSFS
diff --git a/lib/buildid.c b/lib/buildid.c
index aaf61dfc0919..818331051afe 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -5,6 +5,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
+#include <linux/fs.h>
#include <linux/secretmem.h>
#define BUILD_ID 3
@@ -46,20 +47,9 @@ static int freader_get_folio(struct freader *r, loff_t file_off)
freader_put_folio(r);
- /* reject secretmem folios created with memfd_secret() */
- if (secretmem_mapping(r->file->f_mapping))
- return -EFAULT;
-
+ /* only use page cache lookup - fail if not already cached */
r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
- /* if sleeping is allowed, wait for the page, if necessary */
- if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
- filemap_invalidate_lock_shared(r->file->f_mapping);
- r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
- NULL, r->file);
- filemap_invalidate_unlock_shared(r->file->f_mapping);
- }
-
if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
if (!IS_ERR(r->folio))
folio_put(r->folio);
@@ -97,6 +87,24 @@ const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
return r->data + file_off;
}
+ /* reject secretmem folios created with memfd_secret() */
+ if (secretmem_mapping(r->file->f_mapping)) {
+ r->err = -EFAULT;
+ return NULL;
+ }
+
+ /* use __kernel_read() for sleepable context */
+ if (r->may_fault) {
+ ssize_t ret;
+
+ ret = __kernel_read(r->file, r->buf, sz, &file_off);
+ if (ret != sz) {
+ r->err = (ret < 0) ? ret : -EIO;
+ return NULL;
+ }
+ return r->buf;
+ }
+
/* fetch or reuse folio for given file offset */
r->err = freader_get_folio(r, file_off);
if (r->err)
diff --git a/mm/Kconfig b/mm/Kconfig
index bd0ea5454af8..a992f2203eb9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1220,10 +1220,14 @@ config ZONE_DEVICE
Device memory hotplug support allows for establishing pmem,
or other device driver discovered memory regions, in the
memmap. This allows pfn_to_page() lookups of otherwise
- "device-physical" addresses which is needed for using a DAX
- mapping in an O_DIRECT operation, among other things.
-
- If FS_DAX is enabled, then say Y.
+ "device-physical" addresses which is needed for DAX, PCI_P2PDMA, and
+ DEVICE_PRIVATE features among others.
+
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
#
# Helpers to mirror range of the CPU page tables of a process into device page
diff --git a/mm/damon/core.c b/mm/damon/core.c
index f9fc0375890a..84f80a20f233 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1431,6 +1431,35 @@ bool damon_is_running(struct damon_ctx *ctx)
return running;
}
+/*
+ * damon_call_handle_inactive_ctx() - handle DAMON call request that added to
+ * an inactive context.
+ * @ctx: The inactive DAMON context.
+ * @control: Control variable of the call request.
+ *
+ * This function is called in a case that @control is added to @ctx but @ctx is
+ * not running (inactive). See if @ctx handled @control or not, and cleanup
+ * @control if it was not handled.
+ *
+ * Returns 0 if @control was handled by @ctx, negative error code otherwise.
+ */
+static int damon_call_handle_inactive_ctx(
+ struct damon_ctx *ctx, struct damon_call_control *control)
+{
+ struct damon_call_control *c;
+
+ mutex_lock(&ctx->call_controls_lock);
+ list_for_each_entry(c, &ctx->call_controls, list) {
+ if (c == control) {
+ list_del(&control->list);
+ mutex_unlock(&ctx->call_controls_lock);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&ctx->call_controls_lock);
+ return 0;
+}
+
/**
* damon_call() - Invoke a given function on DAMON worker thread (kdamond).
* @ctx: DAMON context to call the function for.
@@ -1461,7 +1490,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock);
if (!damon_is_running(ctx))
- return -EINVAL;
+ return damon_call_handle_inactive_ctx(ctx, control);
if (control->repeat)
return 0;
wait_for_completion(&control->completion);
@@ -2051,13 +2080,15 @@ static unsigned long damos_get_node_memcg_used_bp(
rcu_read_lock();
memcg = mem_cgroup_from_id(goal->memcg_id);
- rcu_read_unlock();
- if (!memcg) {
+ if (!memcg || !mem_cgroup_tryget(memcg)) {
+ rcu_read_unlock();
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
return 0;
else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
return 10000;
}
+ rcu_read_unlock();
+
mem_cgroup_flush_stats(memcg);
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
@@ -2065,6 +2096,8 @@ static unsigned long damos_get_node_memcg_used_bp(
used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
+ mem_cgroup_put(memcg);
+
si_meminfo_node(&i, goal->nid);
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
numerator = used_pages;
@@ -2751,13 +2784,13 @@ done:
if (ctx->ops.cleanup)
ctx->ops.cleanup(ctx);
kfree(ctx->regions_score_histogram);
+ kdamond_call(ctx, true);
pr_debug("kdamond (%d) finishes\n", current->pid);
mutex_lock(&ctx->kdamond_lock);
ctx->kdamond = NULL;
mutex_unlock(&ctx->kdamond_lock);
- kdamond_call(ctx, true);
damos_walk_cancel(ctx);
mutex_lock(&damon_lock);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 30d20f5b3192..3a699dcd5a7f 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -2152,13 +2152,13 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
return err;
err = damos_sysfs_set_dests(scheme);
if (err)
- goto put_access_pattern_out;
+ goto rmdir_put_access_pattern_out;
err = damon_sysfs_scheme_set_quotas(scheme);
if (err)
goto put_dests_out;
err = damon_sysfs_scheme_set_watermarks(scheme);
if (err)
- goto put_quotas_access_pattern_out;
+ goto rmdir_put_quotas_access_pattern_out;
err = damos_sysfs_set_filter_dirs(scheme);
if (err)
goto put_watermarks_quotas_access_pattern_out;
@@ -2183,13 +2183,15 @@ put_filters_watermarks_quotas_access_pattern_out:
put_watermarks_quotas_access_pattern_out:
kobject_put(&scheme->watermarks->kobj);
scheme->watermarks = NULL;
-put_quotas_access_pattern_out:
+rmdir_put_quotas_access_pattern_out:
+ damon_sysfs_quotas_rm_dirs(scheme->quotas);
kobject_put(&scheme->quotas->kobj);
scheme->quotas = NULL;
put_dests_out:
kobject_put(&scheme->dests->kobj);
scheme->dests = NULL;
-put_access_pattern_out:
+rmdir_put_access_pattern_out:
+ damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
kobject_put(&scheme->access_pattern->kobj);
scheme->access_pattern = NULL;
return err;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index e2bd2d7becdd..95fd9375a7d8 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -792,7 +792,7 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
if (!nr_regions_range) {
err = -ENOMEM;
- goto put_intervals_out;
+ goto rmdir_put_intervals_out;
}
err = kobject_init_and_add(&nr_regions_range->kobj,
@@ -806,6 +806,8 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
put_nr_regions_intervals_out:
kobject_put(&nr_regions_range->kobj);
attrs->nr_regions_range = NULL;
+rmdir_put_intervals_out:
+ damon_sysfs_intervals_rm_dirs(intervals);
put_intervals_out:
kobject_put(&intervals->kobj);
attrs->intervals = NULL;
@@ -948,7 +950,7 @@ static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
err = damon_sysfs_context_set_targets(context);
if (err)
- goto put_attrs_out;
+ goto rmdir_put_attrs_out;
err = damon_sysfs_context_set_schemes(context);
if (err)
@@ -958,7 +960,8 @@ static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
put_targets_attrs_out:
kobject_put(&context->targets->kobj);
context->targets = NULL;
-put_attrs_out:
+rmdir_put_attrs_out:
+ damon_sysfs_attrs_rm_dirs(context->attrs);
kobject_put(&context->attrs->kobj);
context->attrs = NULL;
return err;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51273baec9e5..a1832da0f623 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4286,6 +4286,11 @@ static int __init hugepages_setup(char *s)
unsigned long tmp;
char *p = s;
+ if (!hugepages_supported()) {
+ pr_warn("HugeTLB: hugepages unsupported, ignoring hugepages=%s cmdline\n", s);
+ return 0;
+ }
+
if (!parsed_valid_hugepagesz) {
pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
parsed_valid_hugepagesz = true;
@@ -4366,6 +4371,11 @@ static int __init hugepagesz_setup(char *s)
unsigned long size;
struct hstate *h;
+ if (!hugepages_supported()) {
+ pr_warn("HugeTLB: hugepages unsupported, ignoring hugepagesz=%s cmdline\n", s);
+ return 0;
+ }
+
parsed_valid_hugepagesz = false;
size = (unsigned long)memparse(s, NULL);
@@ -4414,6 +4424,12 @@ static int __init default_hugepagesz_setup(char *s)
unsigned long size;
int i;
+ if (!hugepages_supported()) {
+ pr_warn("HugeTLB: hugepages unsupported, ignoring default_hugepagesz=%s cmdline\n",
+ s);
+ return 0;
+ }
+
parsed_valid_hugepagesz = false;
if (parsed_default_hugepagesz) {
pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
@@ -5096,7 +5112,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long last_addr_mask;
pte_t *src_pte, *dst_pte;
struct mmu_notifier_range range;
- bool shared_pmd = false;
+ struct mmu_gather tlb;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
old_end);
@@ -5106,6 +5122,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
* range.
*/
flush_cache_range(vma, range.start, range.end);
+ tlb_gather_mmu_vma(&tlb, vma);
mmu_notifier_invalidate_range_start(&range);
last_addr_mask = hugetlb_mask_last_page(h);
@@ -5122,8 +5139,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
continue;
- if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
- shared_pmd = true;
+ if (huge_pmd_unshare(&tlb, vma, old_addr, src_pte)) {
old_addr |= last_addr_mask;
new_addr |= last_addr_mask;
continue;
@@ -5134,15 +5150,16 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
break;
move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
+ tlb_remove_huge_tlb_entry(h, &tlb, src_pte, old_addr);
}
- if (shared_pmd)
- flush_hugetlb_tlb_range(vma, range.start, range.end);
- else
- flush_hugetlb_tlb_range(vma, old_end - len, old_end);
+ tlb_flush_mmu_tlbonly(&tlb);
+ huge_pmd_unshare_flush(&tlb, vma);
+
mmu_notifier_invalidate_range_end(&range);
i_mmap_unlock_write(mapping);
hugetlb_vma_unlock_write(vma);
+ tlb_finish_mmu(&tlb);
return len + old_addr - old_end;
}
@@ -5161,7 +5178,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long sz = huge_page_size(h);
bool adjust_reservation;
unsigned long last_addr_mask;
- bool force_flush = false;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -5184,10 +5200,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, vma, address, ptep)) {
+ if (huge_pmd_unshare(tlb, vma, address, ptep)) {
spin_unlock(ptl);
- tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
- force_flush = true;
address |= last_addr_mask;
continue;
}
@@ -5303,21 +5317,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
tlb_end_vma(tlb, vma);
- /*
- * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
- * could defer the flush until now, since by holding i_mmap_rwsem we
- * guaranteed that the last reference would not be dropped. But we must
- * do the flushing before we return, as otherwise i_mmap_rwsem will be
- * dropped and the last reference to the shared PMDs page might be
- * dropped as well.
- *
- * In theory we could defer the freeing of the PMD pages as well, but
- * huge_pmd_unshare() relies on the exact page_count for the PMD page to
- * detect sharing, so we cannot defer the release of the page either.
- * Instead, do flush now.
- */
- if (force_flush)
- tlb_flush_mmu_tlbonly(tlb);
+ huge_pmd_unshare_flush(tlb, vma);
}
void __hugetlb_zap_begin(struct vm_area_struct *vma,
@@ -6416,11 +6416,11 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
pte_t pte;
struct hstate *h = hstate_vma(vma);
long pages = 0, psize = huge_page_size(h);
- bool shared_pmd = false;
struct mmu_notifier_range range;
unsigned long last_addr_mask;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ struct mmu_gather tlb;
/*
* In the case of shared PMDs, the area to flush could be beyond
@@ -6433,6 +6433,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
BUG_ON(address >= end);
flush_cache_range(vma, range.start, range.end);
+ tlb_gather_mmu_vma(&tlb, vma);
mmu_notifier_invalidate_range_start(&range);
hugetlb_vma_lock_write(vma);
@@ -6459,7 +6460,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
}
}
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, vma, address, ptep)) {
+ if (huge_pmd_unshare(&tlb, vma, address, ptep)) {
/*
* When uffd-wp is enabled on the vma, unshare
* shouldn't happen at all. Warn about it if it
@@ -6468,7 +6469,6 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
pages++;
spin_unlock(ptl);
- shared_pmd = true;
address |= last_addr_mask;
continue;
}
@@ -6529,23 +6529,16 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
pte = huge_pte_clear_uffd_wp(pte);
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
pages++;
+ tlb_remove_huge_tlb_entry(h, &tlb, ptep, address);
}
next:
spin_unlock(ptl);
cond_resched();
}
- /*
- * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
- * may have cleared our pud entry and done put_page on the page table:
- * once we release i_mmap_rwsem, another task can do the final put_page
- * and that page table be reused and filled with junk. If we actually
- * did unshare a page of pmds, flush the range corresponding to the pud.
- */
- if (shared_pmd)
- flush_hugetlb_tlb_range(vma, range.start, range.end);
- else
- flush_hugetlb_tlb_range(vma, start, end);
+
+ tlb_flush_mmu_tlbonly(&tlb);
+ huge_pmd_unshare_flush(&tlb, vma);
/*
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
* downgrading page table protection not changing it to point to a new
@@ -6556,6 +6549,7 @@ next:
i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma);
mmu_notifier_invalidate_range_end(&range);
+ tlb_finish_mmu(&tlb);
return pages > 0 ? (pages << h->order) : pages;
}
@@ -6912,18 +6906,27 @@ out:
return pte;
}
-/*
- * unmap huge page backed by shared pte.
+/**
+ * huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users
+ * @tlb: the current mmu_gather.
+ * @vma: the vma covering the pmd table.
+ * @addr: the address we are trying to unshare.
+ * @ptep: pointer into the (pmd) page table.
*
- * Called with page table lock held.
+ * Called with the page table lock held, the i_mmap_rwsem held in write mode
+ * and the hugetlb vma lock held in write mode.
*
- * returns: 1 successfully unmapped a shared pte page
- * 0 the underlying pte page is not shared, or it is the last user
+ * Note: The caller must call huge_pmd_unshare_flush() before dropping the
+ * i_mmap_rwsem.
+ *
+ * Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it
+ * was not a shared PMD table.
*/
-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
unsigned long sz = huge_page_size(hstate_vma(vma));
+ struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd = pgd_offset(mm, addr);
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
@@ -6935,18 +6938,36 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
hugetlb_vma_assert_locked(vma);
pud_clear(pud);
- /*
- * Once our caller drops the rmap lock, some other process might be
- * using this page table as a normal, non-hugetlb page table.
- * Wait for pending gup_fast() in other threads to finish before letting
- * that happen.
- */
- tlb_remove_table_sync_one();
- ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
+
+ tlb_unshare_pmd_ptdesc(tlb, virt_to_ptdesc(ptep), addr);
+
mm_dec_nr_pmds(mm);
return 1;
}
+/*
+ * huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls
+ * @tlb: the current mmu_gather.
+ * @vma: the vma covering the pmd table.
+ *
+ * Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table
+ * unsharing with concurrent page table walkers.
+ *
+ * This function must be called after a sequence of huge_pmd_unshare()
+ * calls while still holding the i_mmap_rwsem.
+ */
+void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ /*
+ * We must synchronize page table unsharing such that nobody will
+ * try reusing a previously-shared page table while it might still
+ * be in use by previous sharers (TLB, GUP_fast).
+ */
+ i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+
+ tlb_flush_unshared_tables(tlb);
+}
+
#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -6955,12 +6976,16 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
}
-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
+void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+}
+
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
@@ -7227,6 +7252,7 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long sz = huge_page_size(h);
struct mm_struct *mm = vma->vm_mm;
struct mmu_notifier_range range;
+ struct mmu_gather tlb;
unsigned long address;
spinlock_t *ptl;
pte_t *ptep;
@@ -7238,6 +7264,8 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
return;
flush_cache_range(vma, start, end);
+ tlb_gather_mmu_vma(&tlb, vma);
+
/*
* No need to call adjust_range_if_pmd_sharing_possible(), because
* we have already done the PUD_SIZE alignment.
@@ -7256,10 +7284,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
- huge_pmd_unshare(mm, vma, address, ptep);
+ huge_pmd_unshare(&tlb, vma, address, ptep);
spin_unlock(ptl);
}
- flush_hugetlb_tlb_range(vma, start, end);
+ huge_pmd_unshare_flush(&tlb, vma);
if (take_locks) {
i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma);
@@ -7269,6 +7297,7 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
* Documentation/mm/mmu_notifier.rst.
*/
mmu_notifier_invalidate_range_end(&range);
+ tlb_finish_mmu(&tlb);
}
/*
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 4600e7605cab..c5556bb9d5f0 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -44,7 +44,10 @@ struct mm_struct init_mm = {
.mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq),
#endif
.user_ns = &init_user_ns,
- .cpu_bitmap = CPU_BITS_NONE,
+#ifdef CONFIG_SCHED_MM_CID
+ .mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(init_mm.mm_cid.lock),
+#endif
+ .flexible_array = MM_STRUCT_FLEXIBLE_ARRAY_INIT,
INIT_MM_CONTEXT(init_mm)
};
diff --git a/mm/internal.h b/mm/internal.h
index e430da900430..f35dbcf99a86 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -538,16 +538,8 @@ extern unsigned long highest_memmap_pfn;
bool folio_isolate_lru(struct folio *folio);
void folio_putback_lru(struct folio *folio);
extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
-#ifdef CONFIG_NUMA
int user_proactive_reclaim(char *buf,
struct mem_cgroup *memcg, pg_data_t *pgdat);
-#else
-static inline int user_proactive_reclaim(char *buf,
- struct mem_cgroup *memcg, pg_data_t *pgdat)
-{
- return 0;
-}
-#endif
/*
* in mm/rmap.c:
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 577a1699c553..da0f5b6f5744 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -823,6 +823,9 @@ static struct notifier_block kfence_check_canary_notifier = {
static struct delayed_work kfence_timer;
#ifdef CONFIG_KFENCE_STATIC_KEYS
+/* Wait queue to wake up allocation-gate timer task. */
+static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
+
static int kfence_reboot_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -832,7 +835,12 @@ static int kfence_reboot_callback(struct notifier_block *nb,
*/
WRITE_ONCE(kfence_enabled, false);
/* Cancel any pending timer work */
- cancel_delayed_work_sync(&kfence_timer);
+ cancel_delayed_work(&kfence_timer);
+ /*
+ * Wake up any blocked toggle_allocation_gate() so it can complete
+ * early while the system is still able to handle IPIs.
+ */
+ wake_up(&allocation_wait);
return NOTIFY_OK;
}
@@ -842,9 +850,6 @@ static struct notifier_block kfence_reboot_notifier = {
.priority = INT_MAX, /* Run early to stop timers ASAP */
};
-/* Wait queue to wake up allocation-gate timer task. */
-static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
-
static void wake_up_kfence_timer(struct irq_work *work)
{
wake_up(&allocation_wait);
@@ -873,7 +878,9 @@ static void toggle_allocation_gate(struct work_struct *work)
/* Enable static key, and await allocation to happen. */
static_branch_enable(&kfence_allocation_key);
- wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0);
+ wait_event_idle(allocation_wait,
+ atomic_read(&kfence_allocation_gate) > 0 ||
+ !READ_ONCE(kfence_enabled));
/* Disable static key and reset timer. */
static_branch_disable(&kfence_allocation_key);
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index e7f554a31bb4..9e1c5f2b7a41 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -207,7 +207,7 @@ void kmsan_free_page(struct page *page, unsigned int order)
if (!kmsan_enabled || kmsan_in_runtime())
return;
kmsan_enter_runtime();
- kmsan_internal_poison_memory(page_address(page), page_size(page),
+ kmsan_internal_poison_memory(page_address(page), PAGE_SIZE << order,
GFP_KERNEL & ~(__GFP_RECLAIM),
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
kmsan_leave_runtime();
diff --git a/mm/memory.c b/mm/memory.c
index 2a55edc48a65..da360a6eb8a4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1465,7 +1465,11 @@ copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
static bool
vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
{
- if (src_vma->vm_flags & VM_COPY_ON_FORK)
+ /*
+ * We check against dst_vma as while sane VMA flags will have been
+ * copied, VM_UFFD_WP may be set only on dst_vma.
+ */
+ if (dst_vma->vm_flags & VM_COPY_ON_FORK)
return true;
/*
* The presence of an anon_vma indicates an anonymous VMA has page
@@ -1963,10 +1967,9 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
do {
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud)) {
- if (next - addr != HPAGE_PUD_SIZE) {
- mmap_assert_locked(tlb->mm);
+ if (next - addr != HPAGE_PUD_SIZE)
split_huge_pud(vma, pud, addr);
- } else if (zap_huge_pud(tlb, vma, pud, addr))
+ else if (zap_huge_pud(tlb, vma, pud, addr))
goto next;
/* fall through */
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 5169f9717f60..4688b9e38cd2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1458,6 +1458,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
+ enum ttu_flags ttu = 0;
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
@@ -1498,8 +1499,6 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
goto put_anon;
if (folio_mapped(src)) {
- enum ttu_flags ttu = 0;
-
if (!folio_test_anon(src)) {
/*
* In shared mappings, try_to_unmap could potentially
@@ -1516,16 +1515,17 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
try_to_migrate(src, ttu);
page_was_mapped = 1;
-
- if (ttu & TTU_RMAP_LOCKED)
- i_mmap_unlock_write(mapping);
}
if (!folio_mapped(src))
rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped)
- remove_migration_ptes(src, !rc ? dst : src, 0);
+ remove_migration_ptes(src, !rc ? dst : src,
+ ttu ? RMP_LOCKED : 0);
+
+ if (ttu & TTU_RMAP_LOCKED)
+ i_mmap_unlock_write(mapping);
unlock_put_anon:
folio_unlock(dst);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 247e3f9db6c7..7468ec388455 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -10,6 +10,7 @@
#include <linux/swap.h>
#include <linux/rmap.h>
#include <linux/pgalloc.h>
+#include <linux/hugetlb.h>
#include <asm/tlb.h>
@@ -426,6 +427,7 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
#endif
tlb->vma_pfn = 0;
+ tlb->fully_unshared_tables = 0;
__tlb_reset_range(tlb);
inc_tlb_flush_pending(tlb->mm);
}
@@ -460,6 +462,31 @@ void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
}
/**
+ * tlb_gather_mmu_vma - initialize an mmu_gather structure for operating on a
+ * single VMA
+ * @tlb: the mmu_gather structure to initialize
+ * @vma: the vm_area_struct
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for operating on
+ * a single VMA. In contrast to tlb_gather_mmu(), calling this function will
+ * not require another call to tlb_start_vma(). In contrast to tlb_start_vma(),
+ * this function will *not* call flush_cache_range().
+ *
+ * For hugetlb VMAs, this function will also initialize the mmu_gather
+ * page_size accordingly, not requiring a separate call to
+ * tlb_change_page_size().
+ *
+ */
+void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ tlb_gather_mmu(tlb, vma->vm_mm);
+ tlb_update_vma_flags(tlb, vma);
+ if (is_vm_hugetlb_page(vma))
+ /* All entries have the same size. */
+ tlb_change_page_size(tlb, huge_page_size(hstate_vma(vma)));
+}
+
+/**
* tlb_finish_mmu - finish an mmu_gather structure
* @tlb: the mmu_gather structure to finish
*
@@ -469,6 +496,12 @@ void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
void tlb_finish_mmu(struct mmu_gather *tlb)
{
/*
+ * We expect an earlier huge_pmd_unshare_flush() call to sort this out,
+ * due to complicated locking requirements with page table unsharing.
+ */
+ VM_WARN_ON_ONCE(tlb->fully_unshared_tables);
+
+ /*
* If there are parallel threads are doing PTE changes on same range
* under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
* flush by batching, one thread may end up seeing inconsistent PTEs
diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c
index 5b009a9cd8b4..8f5735fda0a2 100644
--- a/mm/numa_memblks.c
+++ b/mm/numa_memblks.c
@@ -7,6 +7,8 @@
#include <linux/numa.h>
#include <linux/numa_memblks.h>
+#include <asm/numa.h>
+
int numa_distance_cnt;
static u8 *numa_distance;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c380f063e8b7..cbf758e27aa2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -167,6 +167,33 @@ static inline void __pcp_trylock_noop(unsigned long *flags) { }
pcp_trylock_finish(UP_flags); \
})
+/*
+ * With the UP spinlock implementation, when we spin_lock(&pcp->lock) (for i.e.
+ * a potentially remote cpu drain) and get interrupted by an operation that
+ * attempts pcp_spin_trylock(), we can't rely on the trylock failure due to UP
+ * spinlock assumptions making the trylock a no-op. So we have to turn that
+ * spin_lock() to a spin_lock_irqsave(). This works because on UP there are no
+ * remote cpu's so we can only be locking the only existing local one.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+static inline void __flags_noop(unsigned long *flags) { }
+#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
+({ \
+ __flags_noop(&(flags)); \
+ spin_lock(&(ptr)->lock); \
+})
+#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
+({ \
+ spin_unlock(&(ptr)->lock); \
+ __flags_noop(&(flags)); \
+})
+#else
+#define pcp_spin_lock_maybe_irqsave(ptr, flags) \
+ spin_lock_irqsave(&(ptr)->lock, flags)
+#define pcp_spin_unlock_maybe_irqrestore(ptr, flags) \
+ spin_unlock_irqrestore(&(ptr)->lock, flags)
+#endif
+
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
@@ -2556,6 +2583,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
{
int high_min, to_drain, to_drain_batched, batch;
+ unsigned long UP_flags;
bool todo = false;
high_min = READ_ONCE(pcp->high_min);
@@ -2575,9 +2603,9 @@ bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->count - pcp->high;
while (to_drain > 0) {
to_drain_batched = min(to_drain, batch);
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
todo = true;
to_drain -= to_drain_batched;
@@ -2594,14 +2622,15 @@ bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
+ unsigned long UP_flags;
int to_drain, batch;
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
free_pcppages_bulk(zone, to_drain, pcp, 0);
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
}
}
#endif
@@ -2612,10 +2641,11 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+ unsigned long UP_flags;
int count;
do {
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
count = pcp->count;
if (count) {
int to_drain = min(count,
@@ -2624,7 +2654,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
free_pcppages_bulk(zone, to_drain, pcp, 0);
count -= to_drain;
}
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
} while (count);
}
@@ -6109,6 +6139,7 @@ static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
{
struct per_cpu_pages *pcp;
struct cpu_cacheinfo *cci;
+ unsigned long UP_flags;
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
cci = get_cpu_cacheinfo(cpu);
@@ -6119,12 +6150,12 @@ static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
* This can reduce zone lock contention without hurting
* cache-hot pages sharing.
*/
- spin_lock(&pcp->lock);
+ pcp_spin_lock_maybe_irqsave(pcp, UP_flags);
if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
pcp->flags |= PCPF_FREE_HIGH_BATCH;
else
pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
- spin_unlock(&pcp->lock);
+ pcp_spin_unlock_maybe_irqrestore(pcp, UP_flags);
}
void setup_pcp_cacheinfo(unsigned int cpu)
@@ -6667,11 +6698,19 @@ static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *
int old_percpu_pagelist_high_fraction;
int ret;
+ /*
+ * Avoid using pcp_batch_high_lock for reads as the value is read
+ * atomically and a race with offlining is harmless.
+ */
+
+ if (!write)
+ return proc_dointvec_minmax(table, write, buffer, length, ppos);
+
mutex_lock(&pcp_batch_high_lock);
old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
- if (!write || ret < 0)
+ if (ret < 0)
goto out;
/* Sanity checking to avoid pcp imbalance */
@@ -7418,20 +7457,16 @@ bool put_page_back_buddy(struct page *page)
}
#endif
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void)
+bool has_managed_zone(enum zone_type zone)
{
struct pglist_data *pgdat;
for_each_online_pgdat(pgdat) {
- struct zone *zone = &pgdat->node_zones[ZONE_DMA];
-
- if (managed_zone(zone))
+ if (managed_zone(&pgdat->node_zones[zone]))
return true;
}
return false;
}
-#endif /* CONFIG_ZONE_DMA */
#ifdef CONFIG_UNACCEPTED_MEMORY
diff --git a/mm/rmap.c b/mm/rmap.c
index f955f02d570e..7b9879ef442d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -76,7 +76,7 @@
#include <linux/mm_inline.h>
#include <linux/oom.h>
-#include <asm/tlbflush.h>
+#include <asm/tlb.h>
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h>
@@ -2008,26 +2008,25 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* if unsuccessful.
*/
if (!anon) {
+ struct mmu_gather tlb;
+
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
if (!hugetlb_vma_trylock_write(vma))
goto walk_abort;
- if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
+
+ tlb_gather_mmu_vma(&tlb, vma);
+ if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
hugetlb_vma_unlock_write(vma);
- flush_tlb_range(vma,
- range.start, range.end);
+ huge_pmd_unshare_flush(&tlb, vma);
+ tlb_finish_mmu(&tlb);
/*
- * The ref count of the PMD page was
- * dropped which is part of the way map
- * counting is done for shared PMDs.
- * Return 'true' here. When there is
- * no other sharing, huge_pmd_unshare
- * returns false and we will unmap the
- * actual page and drop map count
- * to zero.
+ * The PMD table was unmapped,
+ * consequently unmapping the folio.
*/
goto walk_done;
}
hugetlb_vma_unlock_write(vma);
+ tlb_finish_mmu(&tlb);
}
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
if (pte_dirty(pteval))
@@ -2404,31 +2403,29 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* fail if unsuccessful.
*/
if (!anon) {
+ struct mmu_gather tlb;
+
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
if (!hugetlb_vma_trylock_write(vma)) {
page_vma_mapped_walk_done(&pvmw);
ret = false;
break;
}
- if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
- hugetlb_vma_unlock_write(vma);
- flush_tlb_range(vma,
- range.start, range.end);
+ tlb_gather_mmu_vma(&tlb, vma);
+ if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
+ hugetlb_vma_unlock_write(vma);
+ huge_pmd_unshare_flush(&tlb, vma);
+ tlb_finish_mmu(&tlb);
/*
- * The ref count of the PMD page was
- * dropped which is part of the way map
- * counting is done for shared PMDs.
- * Return 'true' here. When there is
- * no other sharing, huge_pmd_unshare
- * returns false and we will unmap the
- * actual page and drop map count
- * to zero.
+ * The PMD table was unmapped,
+ * consequently unmapping the folio.
*/
page_vma_mapped_walk_done(&pvmw);
break;
}
hugetlb_vma_unlock_write(vma);
+ tlb_finish_mmu(&tlb);
}
/* Nuke the hugetlb page table entry */
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
diff --git a/mm/slub.c b/mm/slub.c
index 861592ac5425..f77b7407c51b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5694,8 +5694,12 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
if (unlikely(!size))
return ZERO_SIZE_PTR;
- if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq()))
- /* kmalloc_nolock() in PREEMPT_RT is not supported from irq */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
+ /*
+ * kmalloc_nolock() in PREEMPT_RT is not supported from
+ * non-preemptible context because local_lock becomes a
+ * sleeping lock on RT.
+ */
return NULL;
retry:
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
diff --git a/mm/vma.c b/mm/vma.c
index fc90befd162f..7a908a964d18 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -37,6 +37,8 @@ struct mmap_state {
bool check_ksm_early :1;
/* If we map new, hold the file rmap lock on mapping. */
bool hold_file_rmap_lock :1;
+ /* If .mmap_prepare changed the file, we don't need to pin. */
+ bool file_doesnt_need_get :1;
};
#define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \
@@ -67,18 +69,13 @@ struct mmap_state {
.state = VMA_MERGE_START, \
}
-/*
- * If, at any point, the VMA had unCoW'd mappings from parents, it will maintain
- * more than one anon_vma_chain connecting it to more than one anon_vma. A merge
- * would mean a wider range of folios sharing the root anon_vma lock, and thus
- * potential lock contention, we do not wish to encourage merging such that this
- * scales to a problem.
- */
-static bool vma_had_uncowed_parents(struct vm_area_struct *vma)
+/* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */
+static bool vma_is_fork_child(struct vm_area_struct *vma)
{
/*
* The list_is_singular() test is to avoid merging VMA cloned from
- * parents. This can improve scalability caused by anon_vma lock.
+ * parents. This can improve scalability caused by the anon_vma root
+ * lock.
*/
return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain);
}
@@ -115,11 +112,19 @@ static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next)
VM_WARN_ON(src && src_anon != src->anon_vma);
/* Case 1 - we will dup_anon_vma() from src into tgt. */
- if (!tgt_anon && src_anon)
- return !vma_had_uncowed_parents(src);
+ if (!tgt_anon && src_anon) {
+ struct vm_area_struct *copied_from = vmg->copied_from;
+
+ if (vma_is_fork_child(src))
+ return false;
+ if (vma_is_fork_child(copied_from))
+ return false;
+
+ return true;
+ }
/* Case 2 - we will simply use tgt's anon_vma. */
if (tgt_anon && !src_anon)
- return !vma_had_uncowed_parents(tgt);
+ return !vma_is_fork_child(tgt);
/* Case 3 - the anon_vma's are already shared. */
return src_anon == tgt_anon;
}
@@ -829,6 +834,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
VM_WARN_ON_VMG(middle &&
!(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
+ /* An existing merge can never be used by the mremap() logic. */
+ VM_WARN_ON_VMG(vmg->copied_from, vmg);
vmg->state = VMA_MERGE_NOMERGE;
@@ -1099,6 +1106,33 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
}
/*
+ * vma_merge_copied_range - Attempt to merge a VMA that is being copied by
+ * mremap()
+ *
+ * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to
+ * @vmg->end (exclusive), which we try to merge with any adjacent VMAs if
+ * possible.
+ *
+ * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO
+ * range, i.e. the target range for the VMA.
+ *
+ * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
+ * to the VMA we expanded.
+ *
+ * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain
+ * the copied-from VMA.
+ */
+static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg)
+{
+ /* We must have a copied-from VMA. */
+ VM_WARN_ON_VMG(!vmg->middle, vmg);
+
+ vmg->copied_from = vmg->middle;
+ vmg->middle = NULL;
+ return vma_merge_new_range(vmg);
+}
+
+/*
* vma_expand - Expand an existing VMA
*
* @vmg: Describes a VMA expansion operation.
@@ -1117,46 +1151,52 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
int vma_expand(struct vma_merge_struct *vmg)
{
struct vm_area_struct *anon_dup = NULL;
- bool remove_next = false;
struct vm_area_struct *target = vmg->target;
struct vm_area_struct *next = vmg->next;
+ bool remove_next = false;
vm_flags_t sticky_flags;
-
- sticky_flags = vmg->vm_flags & VM_STICKY;
- sticky_flags |= target->vm_flags & VM_STICKY;
-
- VM_WARN_ON_VMG(!target, vmg);
+ int ret = 0;
mmap_assert_write_locked(vmg->mm);
-
vma_start_write(target);
- if (next && (target != next) && (vmg->end == next->vm_end)) {
- int ret;
- sticky_flags |= next->vm_flags & VM_STICKY;
+ if (next && target != next && vmg->end == next->vm_end)
remove_next = true;
- /* This should already have been checked by this point. */
- VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
- vma_start_write(next);
- /*
- * In this case we don't report OOM, so vmg->give_up_on_mm is
- * safe.
- */
- ret = dup_anon_vma(target, next, &anon_dup);
- if (ret)
- return ret;
- }
+ /* We must have a target. */
+ VM_WARN_ON_VMG(!target, vmg);
+ /* This should have already been checked by this point. */
+ VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg);
/* Not merging but overwriting any part of next is not handled. */
VM_WARN_ON_VMG(next && !remove_next &&
next != target && vmg->end > next->vm_start, vmg);
- /* Only handles expanding */
+ /* Only handles expanding. */
VM_WARN_ON_VMG(target->vm_start < vmg->start ||
target->vm_end > vmg->end, vmg);
+ sticky_flags = vmg->vm_flags & VM_STICKY;
+ sticky_flags |= target->vm_flags & VM_STICKY;
if (remove_next)
- vmg->__remove_next = true;
+ sticky_flags |= next->vm_flags & VM_STICKY;
+
+ /*
+ * If we are removing the next VMA or copying from a VMA
+ * (e.g. mremap()'ing), we must propagate anon_vma state.
+ *
+ * Note that, by convention, callers ignore OOM for this case, so
+ * we don't need to account for vmg->give_up_on_mm here.
+ */
+ if (remove_next)
+ ret = dup_anon_vma(target, next, &anon_dup);
+ if (!ret && vmg->copied_from)
+ ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
+ if (ret)
+ return ret;
+ if (remove_next) {
+ vma_start_write(next);
+ vmg->__remove_next = true;
+ }
if (commit_merge(vmg))
goto nomem;
@@ -1828,10 +1868,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (new_vma && new_vma->vm_start < addr + len)
return NULL; /* should never get here */
- vmg.middle = NULL; /* New VMA range. */
vmg.pgoff = pgoff;
vmg.next = vma_iter_next_rewind(&vmi, NULL);
- new_vma = vma_merge_new_range(&vmg);
+ new_vma = vma_merge_copied_range(&vmg);
if (new_vma) {
/*
@@ -2413,7 +2452,9 @@ static int __mmap_new_file_vma(struct mmap_state *map,
struct vma_iterator *vmi = map->vmi;
int error;
- vma->vm_file = get_file(map->file);
+ vma->vm_file = map->file;
+ if (!map->file_doesnt_need_get)
+ get_file(map->file);
if (!map->file->f_op->mmap)
return 0;
@@ -2601,7 +2642,10 @@ static int call_mmap_prepare(struct mmap_state *map,
/* Update fields permitted to be changed. */
map->pgoff = desc->pgoff;
- map->file = desc->vm_file;
+ if (desc->vm_file != map->file) {
+ map->file_doesnt_need_get = true;
+ map->file = desc->vm_file;
+ }
map->vm_flags = desc->vm_flags;
map->page_prot = desc->page_prot;
/* User-defined fields. */
diff --git a/mm/vma.h b/mm/vma.h
index abada6a64c4e..9d5ee6ac913a 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -106,6 +106,9 @@ struct vma_merge_struct {
struct anon_vma_name *anon_name;
enum vma_merge_state state;
+ /* If copied from (i.e. mremap()'d) the VMA from which we are copying. */
+ struct vm_area_struct *copied_from;
+
/* Flags which callers can use to modify merge behaviour: */
/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 41dd01e8430c..628f96e83b11 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4248,7 +4248,7 @@ void *vzalloc_node_noprof(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node_noprof);
/**
- * vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents
+ * vrealloc_node_align - reallocate virtually contiguous memory; contents
* remain unchanged
* @p: object to reallocate memory for
* @size: the size to reallocate
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5ba..614ccf39fe3f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -7707,6 +7707,17 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
return ret;
}
+#else
+
+static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
+ unsigned long nr_pages,
+ struct scan_control *sc)
+{
+ return 0;
+}
+
+#endif
+
enum {
MEMORY_RECLAIM_SWAPPINESS = 0,
MEMORY_RECLAIM_SWAPPINESS_MAX,
@@ -7814,8 +7825,6 @@ int user_proactive_reclaim(char *buf,
return 0;
}
-#endif
-
/**
* check_move_unevictable_folios - Move evictable folios to appropriate zone
* lru list
diff --git a/mm/zswap.c b/mm/zswap.c
index 5d0f8b13a958..ac9b7a60736b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -787,7 +787,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
return 0;
fail:
- if (acomp)
+ if (!IS_ERR_OR_NULL(acomp))
crypto_free_acomp(acomp);
kfree(buffer);
return ret;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c57c806edba8..1a84c5a3c446 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1332,14 +1332,15 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
has_mac = skb_mac_header_was_set(skb);
has_trans = skb_transport_header_was_set(skb);
- printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
- "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
+ printk("%sskb len=%u data_len=%u headroom=%u headlen=%u tailroom=%u\n"
+ "end-tail=%u mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
"shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
"csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
"hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n"
"priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n"
"encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n",
- level, skb->len, headroom, skb_headlen(skb), tailroom,
+ level, skb->len, skb->data_len, headroom, skb_headlen(skb),
+ tailroom, skb->end - skb->tail,
has_mac ? skb->mac_header : -1,
has_mac ? skb_mac_header_len(skb) : -1,
skb->mac_len,
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 99ede37698ac..35ce3941fae3 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -158,7 +158,7 @@ unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
DSA_MAX_NUM_OFFLOADING_BRIDGES,
1);
- if (bridge_num >= max)
+ if (bridge_num > max)
return 0;
set_bit(bridge_num, &dsa_fwd_offloading_bridges);
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 3970b6b7ace5..ab8f309f8925 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -215,6 +215,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
return gue_control_message(skb, guehdr);
proto_ctype = guehdr->proto_ctype;
+ if (unlikely(!proto_ctype))
+ goto drop;
+
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb);
diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c
index 7a99639204b1..309d5ba983d0 100644
--- a/net/ipv4/fou_nl.c
+++ b/net/ipv4/fou_nl.c
@@ -15,7 +15,7 @@
const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = {
[FOU_ATTR_PORT] = { .type = NLA_BE16, },
[FOU_ATTR_AF] = { .type = NLA_U8, },
- [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
+ [FOU_ATTR_IPPROTO] = NLA_POLICY_MIN(NLA_U8, 1),
[FOU_ATTR_TYPE] = { .type = NLA_U8, },
[FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
[FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 59d17b6f06bf..f6a5d8c73af9 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1555,8 +1555,8 @@ skip_routeinfo:
memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
mtu = ntohl(n);
- if (in6_dev->ra_mtu != mtu) {
- in6_dev->ra_mtu = mtu;
+ if (READ_ONCE(in6_dev->ra_mtu) != mtu) {
+ WRITE_ONCE(in6_dev->ra_mtu, mtu);
send_ifinfo_notify = true;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 687c1366a4d0..f9b0f666600f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1086,8 +1086,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tunnel = session->tunnel;
/* Check protocol version */
- if (version != tunnel->version)
+ if (version != tunnel->version) {
+ l2tp_session_put(session);
goto invalid;
+ }
if (version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
@@ -1414,8 +1416,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
{
struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
del_work);
- struct sock *sk = tunnel->sock;
- struct socket *sock = sk->sk_socket;
l2tp_tunnel_closeall(tunnel);
@@ -1423,6 +1423,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
* the sk API to release it here.
*/
if (tunnel->fd < 0) {
+ struct socket *sock = tunnel->sock->sk_socket;
+
if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 649ea9d2ae9b..5bc7eb2342bb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -451,8 +451,6 @@ struct ieee80211_mgd_assoc_data {
struct ieee80211_conn_settings conn;
u16 status;
-
- bool disabled;
} link[IEEE80211_MLD_MAX_NUM_LINKS];
u8 ap_addr[ETH_ALEN] __aligned(2);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7b0aa24c1f97..515384ca2f8f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -350,6 +350,8 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
/* we hold the RTNL here so can safely walk the list */
list_for_each_entry(nsdata, &local->interfaces, list) {
if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
+ struct ieee80211_link_data *link;
+
/*
* Only OCB and monitor mode may coexist
*/
@@ -376,8 +378,10 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* will not add another interface while any channel
* switch is active.
*/
- if (nsdata->vif.bss_conf.csa_active)
- return -EBUSY;
+ for_each_link_data(nsdata, link) {
+ if (link->conf->csa_active)
+ return -EBUSY;
+ }
/*
* The remaining checks are only performed for interfaces
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index d5da7ccea66e..04c8809173d7 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -987,7 +987,8 @@ void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata)
if (ieee80211_sdata_running(sdata)) {
list_for_each_entry(key, &sdata->key_list, list) {
- increment_tailroom_need_count(sdata);
+ if (!(key->flags & KEY_FLAG_TAINTED))
+ increment_tailroom_need_count(sdata);
ieee80211_key_enable_hw_accel(key);
}
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ddff090e7dce..82a08f49f498 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6161,6 +6161,98 @@ static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies,
return true;
}
+static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
+{
+ if (bm_size == 1)
+ return *data;
+
+ return get_unaligned_le16(data);
+}
+
+static int
+ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_ttlm_elem *ttlm,
+ struct ieee80211_adv_ttlm_info *ttlm_info)
+{
+ /* The element size was already validated in
+ * ieee80211_tid_to_link_map_size_ok()
+ */
+ u8 control, link_map_presence, map_size, tid;
+ u8 *pos;
+
+ memset(ttlm_info, 0, sizeof(*ttlm_info));
+ pos = (void *)ttlm->optional;
+ control = ttlm->control;
+
+ if ((control & IEEE80211_TTLM_CONTROL_DIRECTION) !=
+ IEEE80211_TTLM_DIRECTION_BOTH) {
+ sdata_info(sdata, "Invalid advertised T2L map direction\n");
+ return -EINVAL;
+ }
+
+ link_map_presence = *pos;
+ pos++;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) {
+ ttlm_info->switch_time = get_unaligned_le16(pos);
+
+ /* Since ttlm_info->switch_time == 0 means no switch time, bump
+ * it by 1.
+ */
+ if (!ttlm_info->switch_time)
+ ttlm_info->switch_time = 1;
+
+ pos += 2;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) {
+ ttlm_info->duration = pos[0] | pos[1] << 8 | pos[2] << 16;
+ pos += 3;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) {
+ ttlm_info->map = 0xffff;
+ return 0;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ map_size = 1;
+ else
+ map_size = 2;
+
+ /* According to Draft P802.11be_D3.0 clause 35.3.7.1.7, an AP MLD shall
+ * not advertise a TID-to-link mapping that does not map all TIDs to the
+ * same link set, reject frame if not all links have mapping
+ */
+ if (link_map_presence != 0xff) {
+ sdata_info(sdata,
+ "Invalid advertised T2L mapping presence indicator\n");
+ return -EINVAL;
+ }
+
+ ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
+ if (!ttlm_info->map) {
+ sdata_info(sdata,
+ "Invalid advertised T2L map for TID 0\n");
+ return -EINVAL;
+ }
+
+ pos += map_size;
+
+ for (tid = 1; tid < 8; tid++) {
+ u16 map = ieee80211_get_ttlm(map_size, pos);
+
+ if (map != ttlm_info->map) {
+ sdata_info(sdata, "Invalid advertised T2L map for tid %d\n",
+ tid);
+ return -EINVAL;
+ }
+
+ pos += map_size;
+ }
+ return 0;
+}
+
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
struct ieee802_11_elems *elems,
@@ -6192,8 +6284,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
continue;
valid_links |= BIT(link_id);
- if (assoc_data->link[link_id].disabled)
- dormant_links |= BIT(link_id);
if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_allocate_link(sta, link_id);
@@ -6202,6 +6292,33 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
}
+ /*
+ * We do not support setting a negotiated TTLM during
+ * association. As such, we can assume that if there is a TTLM,
+ * then it is the currently active advertised TTLM.
+ * In that case, there must be exactly one TTLM that does not
+ * have a switch time set. This mapping should also leave us
+ * with at least one usable link.
+ */
+ if (elems->ttlm_num > 1) {
+ sdata_info(sdata,
+ "More than one advertised TTLM in association response\n");
+ goto out_err;
+ } else if (elems->ttlm_num == 1) {
+ if (ieee80211_parse_adv_t2l(sdata, elems->ttlm[0],
+ &sdata->u.mgd.ttlm_info) ||
+ sdata->u.mgd.ttlm_info.switch_time != 0 ||
+ !(valid_links & sdata->u.mgd.ttlm_info.map)) {
+ sdata_info(sdata,
+ "Invalid advertised TTLM in association response\n");
+ goto out_err;
+ }
+
+ sdata->u.mgd.ttlm_info.active = true;
+ dormant_links =
+ valid_links & ~sdata->u.mgd.ttlm_info.map;
+ }
+
ieee80211_vif_set_links(sdata, valid_links, dormant_links);
}
@@ -6992,95 +7109,6 @@ static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
sdata->u.mgd.ttlm_info.switch_time = 0;
}
-static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
-{
- if (bm_size == 1)
- return *data;
- else
- return get_unaligned_le16(data);
-}
-
-static int
-ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata,
- const struct ieee80211_ttlm_elem *ttlm,
- struct ieee80211_adv_ttlm_info *ttlm_info)
-{
- /* The element size was already validated in
- * ieee80211_tid_to_link_map_size_ok()
- */
- u8 control, link_map_presence, map_size, tid;
- u8 *pos;
-
- memset(ttlm_info, 0, sizeof(*ttlm_info));
- pos = (void *)ttlm->optional;
- control = ttlm->control;
-
- if ((control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) ||
- !(control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT))
- return 0;
-
- if ((control & IEEE80211_TTLM_CONTROL_DIRECTION) !=
- IEEE80211_TTLM_DIRECTION_BOTH) {
- sdata_info(sdata, "Invalid advertised T2L map direction\n");
- return -EINVAL;
- }
-
- link_map_presence = *pos;
- pos++;
-
- ttlm_info->switch_time = get_unaligned_le16(pos);
-
- /* Since ttlm_info->switch_time == 0 means no switch time, bump it
- * by 1.
- */
- if (!ttlm_info->switch_time)
- ttlm_info->switch_time = 1;
-
- pos += 2;
-
- if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) {
- ttlm_info->duration = pos[0] | pos[1] << 8 | pos[2] << 16;
- pos += 3;
- }
-
- if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
- map_size = 1;
- else
- map_size = 2;
-
- /* According to Draft P802.11be_D3.0 clause 35.3.7.1.7, an AP MLD shall
- * not advertise a TID-to-link mapping that does not map all TIDs to the
- * same link set, reject frame if not all links have mapping
- */
- if (link_map_presence != 0xff) {
- sdata_info(sdata,
- "Invalid advertised T2L mapping presence indicator\n");
- return -EINVAL;
- }
-
- ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
- if (!ttlm_info->map) {
- sdata_info(sdata,
- "Invalid advertised T2L map for TID 0\n");
- return -EINVAL;
- }
-
- pos += map_size;
-
- for (tid = 1; tid < 8; tid++) {
- u16 map = ieee80211_get_ttlm(map_size, pos);
-
- if (map != ttlm_info->map) {
- sdata_info(sdata, "Invalid advertised T2L map for tid %d\n",
- tid);
- return -EINVAL;
- }
-
- pos += map_size;
- }
- return 0;
-}
-
static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
u64 beacon_ts)
@@ -9737,7 +9765,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req, true, i,
&assoc_data->link[i].conn);
assoc_data->link[i].bss = link_cbss;
- assoc_data->link[i].disabled = req->links[i].disabled;
if (!bss->uapsd_supported)
uapsd_supported = false;
@@ -10719,8 +10746,6 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata,
&data->link[link_id].conn);
data->link[link_id].bss = link_cbss;
- data->link[link_id].disabled =
- req->add_links[link_id].disabled;
data->link[link_id].elems =
(u8 *)req->add_links[link_id].elems;
data->link[link_id].elems_len =
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5ef315ed3b0f..4823c8d45639 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -347,8 +347,13 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
mgmt->da))
return;
} else {
- /* Beacons are expected only with broadcast address */
- if (!is_broadcast_ether_addr(mgmt->da))
+ /*
+ * Non-S1G beacons are expected only with broadcast address.
+ * S1G beacons only carry the SA so no DA check is required
+ * nor possible.
+ */
+ if (!ieee80211_is_s1g_beacon(mgmt->frame_control) &&
+ !is_broadcast_ether_addr(mgmt->da))
return;
}
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index b94cb2ffbaf8..9cc29ae85b06 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -752,7 +752,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
unsigned char *dptr;
ax25_cb *ax25s;
int ret;
- struct sk_buff *skbn;
+ struct sk_buff *nskb, *oskb;
/*
* Reject malformed packets early. Check that it contains at least 2
@@ -811,14 +811,16 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
/* We are going to change the netrom headers so we should get our
own skb, we also did not know until now how much header space
we had to reserve... - RXQ */
- if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
+ nskb = skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC);
+
+ if (!nskb) {
nr_node_unlock(nr_node);
nr_node_put(nr_node);
dev_put(dev);
return 0;
}
- kfree_skb(skb);
- skb=skbn;
+ oskb = skb;
+ skb = nskb;
skb->data[14]--;
dptr = skb_push(skb, 1);
@@ -837,6 +839,9 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
nr_node_unlock(nr_node);
nr_node_put(nr_node);
+ if (ret)
+ kfree_skb(oskb);
+
return ret;
}
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6bbbc16ab778..f0ce8ce1dce0 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -310,22 +310,23 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
*/
int ovs_vport_get_upcall_stats(struct vport *vport, struct sk_buff *skb)
{
+ u64 tx_success = 0, tx_fail = 0;
struct nlattr *nla;
int i;
- __u64 tx_success = 0;
- __u64 tx_fail = 0;
-
for_each_possible_cpu(i) {
const struct vport_upcall_stats_percpu *stats;
+ u64 n_success, n_fail;
unsigned int start;
stats = per_cpu_ptr(vport->upcall_stats, i);
do {
start = u64_stats_fetch_begin(&stats->syncp);
- tx_success += u64_stats_read(&stats->n_success);
- tx_fail += u64_stats_read(&stats->n_fail);
+ n_success = u64_stats_read(&stats->n_success);
+ n_fail = u64_stats_read(&stats->n_fail);
} while (u64_stats_fetch_retry(&stats->syncp, start));
+ tx_success += n_success;
+ tx_fail += n_fail;
}
nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_UPCALL_STATS);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5b7342d43486..36d6ca0d1089 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -387,7 +387,7 @@ struct rxrpc_peer {
struct rb_root service_conns; /* Service connections */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
unsigned long app_data; /* Application data (e.g. afs_server) */
- time64_t last_tx_at; /* Last time packet sent here */
+ unsigned int last_tx_at; /* Last time packet sent here (time64_t LSW) */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
int debug_id; /* debug ID for printks */
@@ -1379,6 +1379,13 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t acked_serial,
bool sendmsg_fail);
+/* Update the last transmission time on a peer for keepalive purposes. */
+static inline void rxrpc_peer_mark_tx(struct rxrpc_peer *peer)
+{
+ /* To avoid tearing on 32-bit systems, we only keep the LSW. */
+ WRITE_ONCE(peer->last_tx_at, ktime_get_seconds());
+}
+
/*
* peer_object.c
*/
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 232b6986da83..98ad9b51ca2c 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -194,7 +194,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
}
ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
if (ret < 0)
trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
rxrpc_tx_point_call_final_resend);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 8b5903b6e481..d70db367e358 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -275,7 +275,7 @@ static void rxrpc_send_ack_packet(struct rxrpc_call *call, int nr_kv, size_t len
rxrpc_local_dont_fragment(conn->local, why == rxrpc_propose_ack_ping_for_mtu_probe);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
- call->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(call->peer);
if (ret < 0) {
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_ack);
@@ -411,7 +411,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
ret = do_udp_sendmsg(conn->local->socket, &msg, sizeof(pkt));
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_abort);
@@ -698,7 +698,7 @@ void rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_send_data_req
ret = 0;
trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags,
rxrpc_txdata_inject_loss);
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
goto done;
}
}
@@ -711,7 +711,7 @@ void rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_send_data_req
*/
rxrpc_inc_stat(call->rxnet, stat_tx_data_send);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
if (ret == -EMSGSIZE) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_msgsize);
@@ -797,7 +797,7 @@ void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
}
/*
@@ -917,7 +917,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
trace_rxrpc_tx_packet(peer->debug_id, &whdr,
rxrpc_tx_point_version_keepalive);
- peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(peer);
_leave("");
}
@@ -973,7 +973,7 @@ void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response
if (ret < 0)
goto fail;
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
return;
fail:
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7f4729234957..9d02448ac062 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -238,6 +238,21 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb,
}
/*
+ * Reconstruct the last transmission time. The difference calculated should be
+ * valid provided no more than ~68 years elapsed since the last transmission.
+ */
+static time64_t rxrpc_peer_get_tx_mark(const struct rxrpc_peer *peer, time64_t base)
+{
+ s32 last_tx_at = READ_ONCE(peer->last_tx_at);
+ s32 base_lsw = base;
+ s32 diff = last_tx_at - base_lsw;
+
+ diff = clamp(diff, -RXRPC_KEEPALIVE_TIME, RXRPC_KEEPALIVE_TIME);
+
+ return diff + base;
+}
+
+/*
* Perform keep-alive pings.
*/
static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
@@ -265,7 +280,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_unlock_bh(&rxnet->peer_hash_lock);
if (use) {
- keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+ keepalive_at = rxrpc_peer_get_tx_mark(peer, base) + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
cursor, peer->debug_id, slot, &peer->srx.transport);
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index d803562ca0ac..59292f7f9205 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -296,13 +296,13 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
now = ktime_get_seconds();
seq_printf(seq,
- "UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8d %8d\n",
+ "UDP %-47.47s %-47.47s %3u %4u %5u %6ds %8d %8d\n",
lbuff,
rbuff,
refcount_read(&peer->ref),
peer->cong_ssthresh,
peer->max_data,
- now - peer->last_tx_at,
+ (s32)now - (s32)READ_ONCE(peer->last_tx_at),
READ_ONCE(peer->recent_srtt_us),
READ_ONCE(peer->recent_rto_us));
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 7fa7e77f6bb9..e1f7513a46db 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -518,7 +518,8 @@ try_again:
if (rxrpc_call_has_failed(call))
goto call_failed;
- if (!skb_queue_empty(&call->recvmsg_queue))
+ if (!(flags & MSG_PEEK) &&
+ !skb_queue_empty(&call->recvmsg_queue))
rxrpc_notify_socket(call);
goto not_yet_complete;
@@ -549,11 +550,21 @@ error_unlock_call:
error_requeue_call:
if (!(flags & MSG_PEEK)) {
spin_lock_irq(&rx->recvmsg_lock);
- list_add(&call->recvmsg_link, &rx->recvmsg_q);
- spin_unlock_irq(&rx->recvmsg_lock);
+ if (list_empty(&call->recvmsg_link)) {
+ list_add(&call->recvmsg_link, &rx->recvmsg_q);
+ rxrpc_see_call(call, rxrpc_call_see_recvmsg_requeue);
+ spin_unlock_irq(&rx->recvmsg_lock);
+ } else if (list_is_first(&call->recvmsg_link, &rx->recvmsg_q)) {
+ spin_unlock_irq(&rx->recvmsg_lock);
+ rxrpc_put_call(call, rxrpc_call_see_recvmsg_requeue_first);
+ } else {
+ list_move(&call->recvmsg_link, &rx->recvmsg_q);
+ spin_unlock_irq(&rx->recvmsg_lock);
+ rxrpc_put_call(call, rxrpc_call_see_recvmsg_requeue_move);
+ }
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
- rxrpc_put_call(call, rxrpc_call_put_recvmsg);
+ rxrpc_put_call(call, rxrpc_call_put_recvmsg_peek_nowait);
}
error_no_call:
release_sock(&rx->sk);
diff --git a/net/rxrpc/rxgk.c b/net/rxrpc/rxgk.c
index dce5a3d8a964..43cbf9efd89f 100644
--- a/net/rxrpc/rxgk.c
+++ b/net/rxrpc/rxgk.c
@@ -678,7 +678,7 @@ static int rxgk_issue_challenge(struct rxrpc_connection *conn)
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
if (ret > 0)
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
__free_page(page);
if (ret < 0) {
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 3657c0661cdc..a756855a0a62 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -694,7 +694,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
return -EAGAIN;
}
- conn->peer->last_tx_at = ktime_get_seconds();
+ rxrpc_peer_mark_tx(conn->peer);
trace_rxrpc_tx_packet(conn->debug_id, &whdr,
rxrpc_tx_point_rxkad_challenge);
_leave(" = 0");
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 1dfdda6c2d4c..8e8f6af731d5 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -821,6 +821,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
/* could be stupid policy setup or mtu config
* so lets be conservative.. */
if ((action == TC_ACT_SHOT) || exceed_mtu) {
+drop:
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
@@ -829,6 +830,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
skb_push(skb, skb->dev->hard_header_len);
ife_meta = ife_encode(skb, metalen);
+ if (!ife_meta)
+ goto drop;
spin_lock(&ife->tcf_lock);
@@ -844,8 +847,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
if (err < 0) {
/* too corrupt to keep around if overwritten */
spin_unlock(&ife->tcf_lock);
- qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
- return TC_ACT_SHOT;
+ goto drop;
}
skboff += err;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9d59090bbe93..e7778413e72f 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -373,7 +373,7 @@ static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
/* Deschedule class and remove it from its parent aggregate. */
static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
{
- if (cl->qdisc->q.qlen > 0) /* class is active */
+ if (cl_is_active(cl)) /* class is active */
qfq_deactivate_class(q, cl);
qfq_rm_from_agg(q, cl);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 8badec6d82a2..6e4bdaa876ed 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -178,6 +178,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
if (m->dev == dev)
return -ELOOP;
+ if (sch->parent != TC_H_ROOT) {
+ NL_SET_ERR_MSG_MOD(extack, "teql can only be used as root");
+ return -EOPNOTSUPP;
+ }
+
q->m = m;
skb_queue_head_init(&q->q);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3755ba079d07..7b823d759141 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -603,6 +603,11 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
SCTP_PEER_INIT(initchunk));
+ /* SCTP-AUTH: generate the association shared keys so that
+ * we can potentially sign the COOKIE-ECHO.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
+
/* Reset init error count upon receipt of INIT-ACK. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
@@ -617,11 +622,6 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
- /* SCTP-AUTH: generate the association shared keys so that
- * we can potentially sign the COOKIE-ECHO.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
-
/* 5.1 C) "A" shall then send the State Cookie received in the
* INIT ACK chunk in a COOKIE ECHO chunk, ...
*/
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index dcc8a1d5851e..d3e26025ef58 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -28,6 +28,7 @@
static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
bool cancel_timeout);
+static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs);
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
@@ -499,9 +500,7 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
return 0;
spin_lock_bh(&vvs->tx_lock);
- ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
- if (ret > credit)
- ret = credit;
+ ret = min_t(u32, credit, virtio_transport_has_space(vvs));
vvs->tx_cnt += ret;
vvs->bytes_unsent += ret;
spin_unlock_bh(&vvs->tx_lock);
@@ -822,6 +821,15 @@ virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
+static u32 virtio_transport_tx_buf_size(struct virtio_vsock_sock *vvs)
+{
+ /* The peer advertises its receive buffer via peer_buf_alloc, but we
+ * cap it to our local buf_alloc so a remote peer cannot force us to
+ * queue more data than our own buffer configuration allows.
+ */
+ return min(vvs->peer_buf_alloc, vvs->buf_alloc);
+}
+
int
virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
struct msghdr *msg,
@@ -831,7 +839,7 @@ virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
spin_lock_bh(&vvs->tx_lock);
- if (len > vvs->peer_buf_alloc) {
+ if (len > virtio_transport_tx_buf_size(vvs)) {
spin_unlock_bh(&vvs->tx_lock);
return -EMSGSIZE;
}
@@ -877,12 +885,16 @@ u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
}
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
-static s64 virtio_transport_has_space(struct vsock_sock *vsk)
+static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs)
{
- struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
- bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ /* Use s64 arithmetic so if the peer shrinks peer_buf_alloc while
+ * we have bytes in flight (tx_cnt - peer_fwd_cnt), the subtraction
+ * does not underflow.
+ */
+ bytes = (s64)virtio_transport_tx_buf_size(vvs) -
+ (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (bytes < 0)
bytes = 0;
@@ -895,7 +907,7 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
s64 bytes;
spin_lock_bh(&vvs->tx_lock);
- bytes = virtio_transport_has_space(vsk);
+ bytes = virtio_transport_has_space(vvs);
spin_unlock_bh(&vvs->tx_lock);
return bytes;
@@ -1359,9 +1371,11 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
- * payload.
+ * payload. Skip non-linear (e.g. zerocopy) skbs; these carry payload
+ * in skb_shinfo.
*/
- if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
+ if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue) &&
+ !skb_is_nonlinear(skb)) {
struct virtio_vsock_hdr *last_hdr;
struct sk_buff *last_skb;
@@ -1490,7 +1504,7 @@ static bool virtio_transport_space_update(struct sock *sk,
spin_lock_bh(&vvs->tx_lock);
vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
- space_available = virtio_transport_has_space(vsk);
+ space_available = virtio_transport_has_space(vvs);
spin_unlock_bh(&vvs->tx_lock);
return space_available;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 225580507a4b..bf3a1a617cc0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -12244,9 +12244,6 @@ static int nl80211_process_links(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
}
-
- links[link_id].disabled =
- nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
}
return 0;
@@ -12426,13 +12423,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto free;
}
- if (req.links[req.link_id].disabled) {
- GENL_SET_ERR_MSG(info,
- "cannot have assoc link disabled");
- err = -EINVAL;
- goto free;
- }
-
if (info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS])
req.ext_mld_capa_ops =
nla_get_u16(info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index cc55b759694e..08c525835518 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1562,12 +1562,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
tmp = result;
tmp *= SCALE;
do_div(tmp, mcs_divisors[rate->mcs]);
- result = tmp;
/* and take NSS, DCM into account */
- result = (result * rate->nss) / 8;
+ tmp *= rate->nss;
+ do_div(tmp, 8);
if (rate->he_dcm)
- result /= 2;
+ do_div(tmp, 2);
+
+ result = tmp;
return result / 10000;
}
diff --git a/security/landlock/audit.c b/security/landlock/audit.c
index c52d079cdb77..e899995f1fd5 100644
--- a/security/landlock/audit.c
+++ b/security/landlock/audit.c
@@ -191,7 +191,7 @@ static size_t get_denied_layer(const struct landlock_ruleset *const domain,
long youngest_layer = -1;
for_each_set_bit(access_bit, &access_req, layer_masks_size) {
- const access_mask_t mask = (*layer_masks)[access_bit];
+ const layer_mask_t mask = (*layer_masks)[access_bit];
long layer;
if (!mask)
diff --git a/security/landlock/domain.h b/security/landlock/domain.h
index 7fb70b25f85a..621f054c9a2b 100644
--- a/security/landlock/domain.h
+++ b/security/landlock/domain.h
@@ -97,7 +97,7 @@ struct landlock_hierarchy {
*/
atomic64_t num_denials;
/**
- * @id: Landlock domain ID, sets once at domain creation time.
+ * @id: Landlock domain ID, set once at domain creation time.
*/
u64 id;
/**
diff --git a/security/landlock/errata/abi-6.h b/security/landlock/errata/abi-6.h
index df7bc0e1fdf4..5113a829f87e 100644
--- a/security/landlock/errata/abi-6.h
+++ b/security/landlock/errata/abi-6.h
@@ -9,7 +9,7 @@
* This fix addresses an issue where signal scoping was overly restrictive,
* preventing sandboxed threads from signaling other threads within the same
* process if they belonged to different domains. Because threads are not
- * security boundaries, user space might assume that any thread within the same
+ * security boundaries, user space might assume that all threads within the same
* process can send signals between themselves (see :manpage:`nptl(7)` and
* :manpage:`libpsx(3)`). Consistent with :manpage:`ptrace(2)` behavior, direct
* interaction between threads of the same process should always be allowed.
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index fe794875ad46..8205673c8b1c 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -939,7 +939,12 @@ jump_up:
}
path_put(&walker_path);
- if (!allowed_parent1) {
+ /*
+ * Check CONFIG_AUDIT to enable elision of log_request_parent* and
+ * associated caller's stack variables thanks to dead code elimination.
+ */
+#ifdef CONFIG_AUDIT
+ if (!allowed_parent1 && log_request_parent1) {
log_request_parent1->type = LANDLOCK_REQUEST_FS_ACCESS;
log_request_parent1->audit.type = LSM_AUDIT_DATA_PATH;
log_request_parent1->audit.u.path = *path;
@@ -949,7 +954,7 @@ jump_up:
ARRAY_SIZE(*layer_masks_parent1);
}
- if (!allowed_parent2) {
+ if (!allowed_parent2 && log_request_parent2) {
log_request_parent2->type = LANDLOCK_REQUEST_FS_ACCESS;
log_request_parent2->audit.type = LSM_AUDIT_DATA_PATH;
log_request_parent2->audit.u.path = *path;
@@ -958,6 +963,8 @@ jump_up:
log_request_parent2->layer_masks_size =
ARRAY_SIZE(*layer_masks_parent2);
}
+#endif /* CONFIG_AUDIT */
+
return allowed_parent1 && allowed_parent2;
}
@@ -1314,7 +1321,8 @@ static void hook_sb_delete(struct super_block *const sb)
* second call to iput() for the same Landlock object. Also
* checks I_NEW because such inode cannot be tied to an object.
*/
- if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
+ if (inode_state_read(inode) &
+ (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}
diff --git a/security/landlock/net.c b/security/landlock/net.c
index 1f3915a90a80..e6367e30e5b0 100644
--- a/security/landlock/net.c
+++ b/security/landlock/net.c
@@ -71,6 +71,61 @@ static int current_check_access_socket(struct socket *const sock,
switch (address->sa_family) {
case AF_UNSPEC:
+ if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) {
+ /*
+ * Connecting to an address with AF_UNSPEC dissolves
+ * the TCP association, which have the same effect as
+ * closing the connection while retaining the socket
+ * object (i.e., the file descriptor). As for dropping
+ * privileges, closing connections is always allowed.
+ *
+ * For a TCP access control system, this request is
+ * legitimate. Let the network stack handle potential
+ * inconsistencies and return -EINVAL if needed.
+ */
+ return 0;
+ } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) {
+ /*
+ * Binding to an AF_UNSPEC address is treated
+ * differently by IPv4 and IPv6 sockets. The socket's
+ * family may change under our feet due to
+ * setsockopt(IPV6_ADDRFORM), but that's ok: we either
+ * reject entirely or require
+ * %LANDLOCK_ACCESS_NET_BIND_TCP for the given port, so
+ * it cannot be used to bypass the policy.
+ *
+ * IPv4 sockets map AF_UNSPEC to AF_INET for
+ * retrocompatibility for bind accesses, only if the
+ * address is INADDR_ANY (cf. __inet_bind). IPv6
+ * sockets always reject it.
+ *
+ * Checking the address is required to not wrongfully
+ * return -EACCES instead of -EAFNOSUPPORT or -EINVAL.
+ * We could return 0 and let the network stack handle
+ * these checks, but it is safer to return a proper
+ * error and test consistency thanks to kselftest.
+ */
+ if (sock->sk->__sk_common.skc_family == AF_INET) {
+ const struct sockaddr_in *const sockaddr =
+ (struct sockaddr_in *)address;
+
+ if (addrlen < sizeof(struct sockaddr_in))
+ return -EINVAL;
+
+ if (sockaddr->sin_addr.s_addr !=
+ htonl(INADDR_ANY))
+ return -EAFNOSUPPORT;
+ } else {
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ else
+ return -EAFNOSUPPORT;
+ }
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ /* Only for bind(AF_UNSPEC+INADDR_ANY) on IPv4 socket. */
+ fallthrough;
case AF_INET: {
const struct sockaddr_in *addr4;
@@ -119,57 +174,18 @@ static int current_check_access_socket(struct socket *const sock,
return 0;
}
- /* Specific AF_UNSPEC handling. */
- if (address->sa_family == AF_UNSPEC) {
- /*
- * Connecting to an address with AF_UNSPEC dissolves the TCP
- * association, which have the same effect as closing the
- * connection while retaining the socket object (i.e., the file
- * descriptor). As for dropping privileges, closing
- * connections is always allowed.
- *
- * For a TCP access control system, this request is legitimate.
- * Let the network stack handle potential inconsistencies and
- * return -EINVAL if needed.
- */
- if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP)
- return 0;
-
- /*
- * For compatibility reason, accept AF_UNSPEC for bind
- * accesses (mapped to AF_INET) only if the address is
- * INADDR_ANY (cf. __inet_bind). Checking the address is
- * required to not wrongfully return -EACCES instead of
- * -EAFNOSUPPORT.
- *
- * We could return 0 and let the network stack handle these
- * checks, but it is safer to return a proper error and test
- * consistency thanks to kselftest.
- */
- if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) {
- /* addrlen has already been checked for AF_UNSPEC. */
- const struct sockaddr_in *const sockaddr =
- (struct sockaddr_in *)address;
-
- if (sock->sk->__sk_common.skc_family != AF_INET)
- return -EINVAL;
-
- if (sockaddr->sin_addr.s_addr != htonl(INADDR_ANY))
- return -EAFNOSUPPORT;
- }
- } else {
- /*
- * Checks sa_family consistency to not wrongfully return
- * -EACCES instead of -EINVAL. Valid sa_family changes are
- * only (from AF_INET or AF_INET6) to AF_UNSPEC.
- *
- * We could return 0 and let the network stack handle this
- * check, but it is safer to return a proper error and test
- * consistency thanks to kselftest.
- */
- if (address->sa_family != sock->sk->__sk_common.skc_family)
- return -EINVAL;
- }
+ /*
+ * Checks sa_family consistency to not wrongfully return
+ * -EACCES instead of -EINVAL. Valid sa_family changes are
+ * only (from AF_INET or AF_INET6) to AF_UNSPEC.
+ *
+ * We could return 0 and let the network stack handle this
+ * check, but it is safer to return a proper error and test
+ * consistency thanks to kselftest.
+ */
+ if (address->sa_family != sock->sk->__sk_common.skc_family &&
+ address->sa_family != AF_UNSPEC)
+ return -EINVAL;
id.key.data = (__force uintptr_t)port;
BUILD_BUG_ON(sizeof(port) > sizeof(id.key.data));
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index dfcdc19ea268..0a5b0c76b3f7 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -23,7 +23,6 @@
#include <linux/workqueue.h>
#include "access.h"
-#include "audit.h"
#include "domain.h"
#include "limits.h"
#include "object.h"
diff --git a/security/landlock/task.c b/security/landlock/task.c
index 2385017418ca..833bc0cfe5c9 100644
--- a/security/landlock/task.c
+++ b/security/landlock/task.c
@@ -86,7 +86,6 @@ static int hook_ptrace_access_check(struct task_struct *const child,
const unsigned int mode)
{
const struct landlock_cred_security *parent_subject;
- const struct landlock_ruleset *child_dom;
int err;
/* Quick return for non-landlocked tasks. */
@@ -96,7 +95,8 @@ static int hook_ptrace_access_check(struct task_struct *const child,
scoped_guard(rcu)
{
- child_dom = landlock_get_task_domain(child);
+ const struct landlock_ruleset *const child_dom =
+ landlock_get_task_domain(child);
err = domain_ptrace(parent_subject->domain, child_dom);
}
@@ -166,15 +166,15 @@ static int hook_ptrace_traceme(struct task_struct *const parent)
}
/**
- * domain_is_scoped - Checks if the client domain is scoped in the same
- * domain as the server.
+ * domain_is_scoped - Check if an interaction from a client/sender to a
+ * server/receiver should be restricted based on scope controls.
*
* @client: IPC sender domain.
* @server: IPC receiver domain.
* @scope: The scope restriction criteria.
*
- * Returns: True if the @client domain is scoped to access the @server,
- * unless the @server is also scoped in the same domain as @client.
+ * Returns: True if @server is in a different domain from @client, and @client
+ * is scoped to access @server (i.e. access should be denied).
*/
static bool domain_is_scoped(const struct landlock_ruleset *const client,
const struct landlock_ruleset *const server,
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index a82dd155e1d3..b12df5b5ddfc 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1074,7 +1074,9 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
runtime->oss.params = 0;
runtime->oss.prepare = 1;
runtime->oss.buffer_used = 0;
- snd_pcm_runtime_buffer_set_silence(runtime);
+ err = snd_pcm_runtime_buffer_set_silence(runtime);
+ if (err < 0)
+ goto failure;
runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 68bee40c9ada..932a9bf98cbc 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -730,13 +730,18 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
}
/* fill the PCM buffer with the current silence format; called from pcm_oss.c */
-void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
+int snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
{
- snd_pcm_buffer_access_lock(runtime);
+ int err;
+
+ err = snd_pcm_buffer_access_lock(runtime);
+ if (err < 0)
+ return err;
if (runtime->dma_area)
snd_pcm_format_set_silence(runtime->format, runtime->dma_area,
bytes_to_samples(runtime, runtime->dma_bytes));
snd_pcm_buffer_access_unlock(runtime);
+ return 0;
}
EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence);
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index 61c7372e6307..29469e549791 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -6613,6 +6613,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a30, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a31, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8a34, "HP Pavilion x360 2-in-1 Laptop 14-ek0xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -6817,6 +6818,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8f42, "HP ZBook 8 G2a 14W", ALC245_FIXUP_HP_TAS2781_I2C_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8f57, "HP Trekker G7JC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8f62, "HP ZBook 8 G2a 16W", ALC245_FIXUP_HP_TAS2781_I2C_MUTE_LED),
+ SND_PCI_QUIRK(0x1043, 0x1024, "ASUS Zephyrus G14 2025", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1032, "ASUS VivoBook X513EA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1034, "ASUS GU605C", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
diff --git a/sound/hda/codecs/side-codecs/cirrus_scodec_test.c b/sound/hda/codecs/side-codecs/cirrus_scodec_test.c
index 3cca750857b6..dc35932b6b22 100644
--- a/sound/hda/codecs/side-codecs/cirrus_scodec_test.c
+++ b/sound/hda/codecs/side-codecs/cirrus_scodec_test.c
@@ -103,6 +103,7 @@ static int cirrus_scodec_test_gpio_probe(struct platform_device *pdev)
/* GPIO core modifies our struct gpio_chip so use a copy */
gpio_priv->chip = cirrus_scodec_test_gpio_chip;
+ gpio_priv->chip.parent = &pdev->dev;
ret = devm_gpiochip_add_data(&pdev->dev, &gpio_priv->chip, gpio_priv);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to add gpiochip\n");
@@ -319,7 +320,7 @@ static struct kunit_case cirrus_scodec_test_cases[] = {
};
static struct kunit_suite cirrus_scodec_test_suite = {
- .name = "snd-hda-scodec-cs35l56-test",
+ .name = "snd-hda-cirrus-scodec-test",
.init = cirrus_scodec_test_case_init,
.test_cases = cirrus_scodec_test_cases,
};
diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
index f7a7f216d586..624a822341bb 100644
--- a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
+++ b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c
@@ -2,7 +2,7 @@
//
// TAS2781 HDA I2C driver
//
-// Copyright 2023 - 2025 Texas Instruments, Inc.
+// Copyright 2023 - 2026 Texas Instruments, Inc.
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
// Current maintainer: Baojun Xu <baojun.xu@ti.com>
@@ -60,6 +60,7 @@ struct tas2781_hda_i2c_priv {
int (*save_calibration)(struct tas2781_hda *h);
int hda_chip_id;
+ bool skip_calibration;
};
static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
@@ -491,7 +492,8 @@ static void tasdevice_dspfw_init(void *context)
/* If calibrated data occurs error, dsp will still works with default
* calibrated data inside algo.
*/
- hda_priv->save_calibration(tas_hda);
+ if (!hda_priv->skip_calibration)
+ hda_priv->save_calibration(tas_hda);
}
static void tasdev_fw_ready(const struct firmware *fmw, void *context)
@@ -548,6 +550,7 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
void *master_data)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ struct tas2781_hda_i2c_priv *hda_priv = tas_hda->hda_priv;
struct hda_component_parent *parent = master_data;
struct hda_component *comp;
struct hda_codec *codec;
@@ -568,11 +571,22 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
case 0x1028:
tas_hda->catlog_id = DELL;
break;
+ case 0x103C:
+ tas_hda->catlog_id = HP;
+ break;
default:
tas_hda->catlog_id = LENOVO;
break;
}
+ /*
+ * Using ASUS ROG Xbox Ally X (RC73XA) UEFI calibration data
+ * causes audio dropouts during playback, use fallback data
+ * from DSP firmware as a workaround.
+ */
+ if (codec->core.subsystem_id == 0x10431384)
+ hda_priv->skip_calibration = true;
+
pm_runtime_get_sync(dev);
comp->dev = dev;
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index bf4d9d336561..0294177acc66 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -420,6 +420,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6500RE"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "M6501RM"),
}
},
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
index 443cf59cb71a..fdf4a9add852 100644
--- a/sound/soc/codecs/tlv320adcx140.c
+++ b/sound/soc/codecs/tlv320adcx140.c
@@ -23,7 +23,6 @@
#include "tlv320adcx140.h"
struct adcx140_priv {
- struct snd_soc_component *component;
struct regulator *supply_areg;
struct gpio_desc *gpio_reset;
struct regmap *regmap;
@@ -338,7 +337,7 @@ static const struct snd_kcontrol_new adcx140_dapm_ch4_dre_en_switch =
SOC_DAPM_SINGLE("Switch", ADCX140_CH4_CFG0, 0, 1, 0);
static const struct snd_kcontrol_new adcx140_dapm_dre_en_switch =
- SOC_DAPM_SINGLE("Switch", ADCX140_DSP_CFG1, 3, 1, 0);
+ SOC_DAPM_SINGLE("Switch", ADCX140_DSP_CFG1, 3, 1, 1);
/* Output Mixer */
static const struct snd_kcontrol_new adcx140_output_mixer_controls[] = {
@@ -699,7 +698,6 @@ static void adcx140_pwr_ctrl(struct adcx140_priv *adcx140, bool power_state)
{
int pwr_ctrl = 0;
int ret = 0;
- struct snd_soc_component *component = adcx140->component;
if (power_state)
pwr_ctrl = ADCX140_PWR_CFG_ADC_PDZ | ADCX140_PWR_CFG_PLL_PDZ;
@@ -711,7 +709,7 @@ static void adcx140_pwr_ctrl(struct adcx140_priv *adcx140, bool power_state)
ret = regmap_write(adcx140->regmap, ADCX140_PHASE_CALIB,
adcx140->phase_calib_on ? 0x00 : 0x40);
if (ret)
- dev_err(component->dev, "%s: register write error %d\n",
+ dev_err(adcx140->dev, "%s: register write error %d\n",
__func__, ret);
}
@@ -727,7 +725,7 @@ static int adcx140_hw_params(struct snd_pcm_substream *substream,
struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
u8 data = 0;
- switch (params_width(params)) {
+ switch (params_physical_width(params)) {
case 16:
data = ADCX140_16_BIT_WORD;
break;
@@ -742,7 +740,7 @@ static int adcx140_hw_params(struct snd_pcm_substream *substream,
break;
default:
dev_err(component->dev, "%s: Unsupported width %d\n",
- __func__, params_width(params));
+ __func__, params_physical_width(params));
return -EINVAL;
}
@@ -1156,6 +1154,9 @@ static int adcx140_i2c_probe(struct i2c_client *i2c)
adcx140->gpio_reset = devm_gpiod_get_optional(adcx140->dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(adcx140->gpio_reset))
+ return dev_err_probe(&i2c->dev, PTR_ERR(adcx140->gpio_reset),
+ "Failed to get Reset GPIO\n");
+ if (!adcx140->gpio_reset)
dev_info(&i2c->dev, "Reset GPIO not defined\n");
adcx140->supply_areg = devm_regulator_get_optional(adcx140->dev,
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index d7aca6567c2d..2fc234adca5f 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -678,6 +678,7 @@ struct wsa881x_priv {
*/
unsigned int sd_n_val;
int active_ports;
+ bool hw_init;
bool port_prepared[WSA881X_MAX_SWR_PORTS];
bool port_enable[WSA881X_MAX_SWR_PORTS];
};
@@ -687,6 +688,9 @@ static void wsa881x_init(struct wsa881x_priv *wsa881x)
struct regmap *rm = wsa881x->regmap;
unsigned int val = 0;
+ if (wsa881x->hw_init)
+ return;
+
regmap_register_patch(wsa881x->regmap, wsa881x_rev_2_0,
ARRAY_SIZE(wsa881x_rev_2_0));
@@ -724,6 +728,8 @@ static void wsa881x_init(struct wsa881x_priv *wsa881x)
regmap_update_bits(rm, WSA881X_OTP_REG_28, 0x3F, 0x3A);
regmap_update_bits(rm, WSA881X_BONGO_RESRV_REG1, 0xFF, 0xB2);
regmap_update_bits(rm, WSA881X_BONGO_RESRV_REG2, 0xFF, 0x05);
+
+ wsa881x->hw_init = true;
}
static int wsa881x_component_probe(struct snd_soc_component *comp)
@@ -1067,6 +1073,9 @@ static int wsa881x_update_status(struct sdw_slave *slave,
{
struct wsa881x_priv *wsa881x = dev_get_drvdata(&slave->dev);
+ if (status == SDW_SLAVE_UNATTACHED)
+ wsa881x->hw_init = false;
+
if (status == SDW_SLAVE_ATTACHED && slave->dev_num > 0)
wsa881x_init(wsa881x);
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
index c3046e260cb9..468d2b38a22a 100644
--- a/sound/soc/codecs/wsa883x.c
+++ b/sound/soc/codecs/wsa883x.c
@@ -475,6 +475,7 @@ struct wsa883x_priv {
int active_ports;
int dev_mode;
int comp_offset;
+ bool hw_init;
/*
* Protects temperature reading code (related to speaker protection) and
* fields: temperature and pa_on.
@@ -1043,6 +1044,9 @@ static int wsa883x_init(struct wsa883x_priv *wsa883x)
struct regmap *regmap = wsa883x->regmap;
int variant, version, ret;
+ if (wsa883x->hw_init)
+ return 0;
+
ret = regmap_read(regmap, WSA883X_OTP_REG_0, &variant);
if (ret)
return ret;
@@ -1054,22 +1058,23 @@ static int wsa883x_init(struct wsa883x_priv *wsa883x)
switch (variant) {
case WSA8830:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8830\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8830\n",
+ version);
break;
case WSA8835:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835\n",
+ version);
break;
case WSA8832:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8832\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8832\n",
+ version);
break;
case WSA8835_V2:
- dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835_V2\n",
- version);
+ dev_dbg(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835_V2\n",
+ version);
break;
default:
+ dev_warn(wsa883x->dev, "unknown variant: %d\n", variant);
break;
}
@@ -1085,6 +1090,8 @@ static int wsa883x_init(struct wsa883x_priv *wsa883x)
wsa883x->comp_offset);
}
+ wsa883x->hw_init = true;
+
return 0;
}
@@ -1093,6 +1100,9 @@ static int wsa883x_update_status(struct sdw_slave *slave,
{
struct wsa883x_priv *wsa883x = dev_get_drvdata(&slave->dev);
+ if (status == SDW_SLAVE_UNATTACHED)
+ wsa883x->hw_init = false;
+
if (status == SDW_SLAVE_ATTACHED && slave->dev_num > 0)
return wsa883x_init(wsa883x);
diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
index 887edd2be705..6c6b497657d0 100644
--- a/sound/soc/codecs/wsa884x.c
+++ b/sound/soc/codecs/wsa884x.c
@@ -1534,7 +1534,7 @@ static void wsa884x_init(struct wsa884x_priv *wsa884x)
wsa884x_set_gain_parameters(wsa884x);
- wsa884x->hw_init = false;
+ wsa884x->hw_init = true;
}
static int wsa884x_update_status(struct sdw_slave *slave,
@@ -2109,7 +2109,6 @@ static int wsa884x_probe(struct sdw_slave *pdev,
/* Start in cache-only until device is enumerated */
regcache_cache_only(wsa884x->regmap, true);
- wsa884x->hw_init = true;
if (IS_REACHABLE(CONFIG_HWMON)) {
struct device *hwmon;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 355f7ec8943c..bdc02e85b089 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1179,9 +1179,9 @@ void graph_util_parse_link_direction(struct device_node *np,
bool is_playback_only = of_property_read_bool(np, "playback-only");
bool is_capture_only = of_property_read_bool(np, "capture-only");
- if (playback_only)
+ if (np && playback_only)
*playback_only = is_playback_only;
- if (capture_only)
+ if (np && capture_only)
*capture_only = is_capture_only;
}
EXPORT_SYMBOL_GPL(graph_util_parse_link_direction);
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 2c1001148d54..8721a098d53f 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -767,6 +767,14 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
{
.callback = sof_sdw_quirk_cb,
.matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0DD6")
+ },
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_ptlrvp"),
},
.driver_data = (void *)(SOC_SDW_PCH_DMIC),
diff --git a/sound/soc/sdw_utils/soc_sdw_cs42l43.c b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
index 4c954501e500..2685ff4f0932 100644
--- a/sound/soc/sdw_utils/soc_sdw_cs42l43.c
+++ b/sound/soc/sdw_utils/soc_sdw_cs42l43.c
@@ -44,7 +44,7 @@ static const struct snd_soc_dapm_route cs42l43_dmic_map[] = {
static struct snd_soc_jack_pin soc_jack_pins[] = {
{
.pin = "Headphone",
- .mask = SND_JACK_HEADPHONE,
+ .mask = SND_JACK_HEADPHONE | SND_JACK_LINEOUT,
},
{
.pin = "Headset Mic",
diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
index bf382aa07e92..ccf149f949e8 100644
--- a/sound/soc/sdw_utils/soc_sdw_utils.c
+++ b/sound/soc/sdw_utils/soc_sdw_utils.c
@@ -841,6 +841,19 @@ struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_part(const u64 adr)
}
EXPORT_SYMBOL_NS(asoc_sdw_find_codec_info_part, "SND_SOC_SDW_UTILS");
+static struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_sdw_id(const struct sdw_slave_id *id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ if (id->part_id == codec_info_list[i].part_id &&
+ (!codec_info_list[i].version_id ||
+ id->sdw_version == codec_info_list[i].version_id))
+ return &codec_info_list[i];
+
+ return NULL;
+}
+
struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_acpi(const u8 *acpi_id)
{
int i;
@@ -873,22 +886,46 @@ struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_dai(const char *dai_name, i
}
EXPORT_SYMBOL_NS(asoc_sdw_find_codec_info_dai, "SND_SOC_SDW_UTILS");
+static int asoc_sdw_find_codec_info_dai_index(const struct asoc_sdw_codec_info *codec_info,
+ const char *dai_name)
+{
+ int i;
+
+ for (i = 0; i < codec_info->dai_num; i++) {
+ if (!strcmp(codec_info->dais[i].dai_name, dai_name))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct snd_soc_dapm_context *dapm = snd_soc_card_to_dapm(card);
struct asoc_sdw_codec_info *codec_info;
struct snd_soc_dai *dai;
+ struct sdw_slave *sdw_peripheral;
const char *spk_components="";
int dai_index;
int ret;
int i;
for_each_rtd_codec_dais(rtd, i, dai) {
- codec_info = asoc_sdw_find_codec_info_dai(dai->name, &dai_index);
+ if (is_sdw_slave(dai->component->dev))
+ sdw_peripheral = dev_to_sdw_dev(dai->component->dev);
+ else if (dai->component->dev->parent && is_sdw_slave(dai->component->dev->parent))
+ sdw_peripheral = dev_to_sdw_dev(dai->component->dev->parent);
+ else
+ continue;
+
+ codec_info = asoc_sdw_find_codec_info_sdw_id(&sdw_peripheral->id);
if (!codec_info)
return -EINVAL;
+ dai_index = asoc_sdw_find_codec_info_dai_index(codec_info, dai->name);
+ WARN_ON(dai_index < 0);
+
/*
* A codec dai can be connected to different dai links for capture and playback,
* but we only need to call the rtd_init function once.
@@ -898,6 +935,10 @@ int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd)
if (codec_info->dais[dai_index].rtd_init_done)
continue;
+ dev_dbg(card->dev, "%#x/%s initializing for %s/%s\n",
+ codec_info->part_id, codec_info->dais[dai_index].dai_name,
+ dai->component->name, dai->name);
+
/*
* Add card controls and dapm widgets for the first codec dai.
* The controls and widgets will be used for all codec dais.
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 624e9269fc25..ba42939d5f01 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -543,11 +543,11 @@ int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
ucontrol->value.bytes.data[0] &= ~params->mask;
break;
case 2:
- ((u16 *)(&ucontrol->value.bytes.data))[0]
+ ((__be16 *)(&ucontrol->value.bytes.data))[0]
&= cpu_to_be16(~params->mask);
break;
case 4:
- ((u32 *)(&ucontrol->value.bytes.data))[0]
+ ((__be32 *)(&ucontrol->value.bytes.data))[0]
&= cpu_to_be32(~params->mask);
break;
default:
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index 261d9067d27b..e795907a3963 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -2077,7 +2077,7 @@ static const struct regmap_config tegra210_ahub_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.max_register = TEGRA210_MAX_REGISTER_ADDR,
- .cache_type = REGCACHE_FLAT_S,
+ .cache_type = REGCACHE_FLAT,
};
static const struct regmap_config tegra186_ahub_regmap_config = {
@@ -2085,7 +2085,7 @@ static const struct regmap_config tegra186_ahub_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.max_register = TEGRA186_MAX_REGISTER_ADDR,
- .cache_type = REGCACHE_FLAT_S,
+ .cache_type = REGCACHE_FLAT,
};
static const struct regmap_config tegra264_ahub_regmap_config = {
@@ -2094,7 +2094,7 @@ static const struct regmap_config tegra264_ahub_regmap_config = {
.reg_stride = 4,
.writeable_reg = tegra264_ahub_wr_reg,
.max_register = TEGRA264_MAX_REGISTER_ADDR,
- .cache_type = REGCACHE_FLAT_S,
+ .cache_type = REGCACHE_FLAT,
};
static const struct tegra_ahub_soc_data soc_data_tegra210 = {
diff --git a/sound/soc/ti/davinci-evm.c b/sound/soc/ti/davinci-evm.c
index 3848766d96c3..ad514c2e5a25 100644
--- a/sound/soc/ti/davinci-evm.c
+++ b/sound/soc/ti/davinci-evm.c
@@ -194,27 +194,32 @@ static int davinci_evm_probe(struct platform_device *pdev)
return -EINVAL;
dai->cpus->of_node = of_parse_phandle(np, "ti,mcasp-controller", 0);
- if (!dai->cpus->of_node)
- return -EINVAL;
+ if (!dai->cpus->of_node) {
+ ret = -EINVAL;
+ goto err_put;
+ }
dai->platforms->of_node = dai->cpus->of_node;
evm_soc_card.dev = &pdev->dev;
ret = snd_soc_of_parse_card_name(&evm_soc_card, "ti,model");
if (ret)
- return ret;
+ goto err_put;
mclk = devm_clk_get(&pdev->dev, "mclk");
if (PTR_ERR(mclk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto err_put;
} else if (IS_ERR(mclk)) {
dev_dbg(&pdev->dev, "mclk not found.\n");
mclk = NULL;
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
drvdata->mclk = mclk;
@@ -224,7 +229,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
if (!drvdata->mclk) {
dev_err(&pdev->dev,
"No clock or clock rate defined.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put;
}
drvdata->sysclk = clk_get_rate(drvdata->mclk);
} else if (drvdata->mclk) {
@@ -240,8 +246,25 @@ static int davinci_evm_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(&evm_soc_card, drvdata);
ret = devm_snd_soc_register_card(&pdev->dev, &evm_soc_card);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ goto err_put;
+ }
+
+ return ret;
+
+err_put:
+ dai->platforms->of_node = NULL;
+
+ if (dai->cpus->of_node) {
+ of_node_put(dai->cpus->of_node);
+ dai->cpus->of_node = NULL;
+ }
+
+ if (dai->codecs->of_node) {
+ of_node_put(dai->codecs->of_node);
+ dai->codecs->of_node = NULL;
+ }
return ret;
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 54d01dfd820f..263abb36bb2d 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1553,7 +1553,7 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
for (i = 0; i < ctx->packets; i++) {
counts = snd_usb_endpoint_next_packet_size(ep, ctx, i, avail);
- if (counts < 0)
+ if (counts < 0 || frames + counts >= ep->max_urb_frames)
break;
/* set up descriptor */
urb->iso_frame_desc[i].offset = frames * stride;
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index c2f3e8b3f2ac..9b692f368be7 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -41,7 +41,7 @@ clean distclean:
rm -rf pyynl.egg-info
rm -rf build
-install: libynl.a lib/*.h
+install: libynl.a lib/*.h ynltool
@echo -e "\tINSTALL libynl.a"
@$(INSTALL) -d $(DESTDIR)$(libdir)
@$(INSTALL) -m 0644 libynl.a $(DESTDIR)$(libdir)/libynl.a
@@ -51,6 +51,7 @@ install: libynl.a lib/*.h
@echo -e "\tINSTALL pyynl"
@pip install --prefix=$(DESTDIR)$(prefix) .
@make -C generated install
+ @make -C ynltool install
run_tests:
@$(MAKE) -C tests run_tests
diff --git a/tools/net/ynl/ynl-regen.sh b/tools/net/ynl/ynl-regen.sh
index 81b4ecd89100..d9809276db98 100755
--- a/tools/net/ynl/ynl-regen.sh
+++ b/tools/net/ynl/ynl-regen.sh
@@ -21,7 +21,7 @@ files=$(git grep --files-with-matches '^/\* YNL-GEN \(kernel\|uapi\|user\)')
for f in $files; do
# params: 0 1 2 3
# $YAML YNL-GEN kernel $mode
- params=( $(git grep -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
+ params=( $(git grep --no-line-number -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
args=$(sed -n 's@/\* YNL-ARG \(.*\) \*/@\1@p' $f)
if [ $f -nt ${params[0]} -a -z "$force" ]; then
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index ad6e1ec706ce..9b4503113ce5 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -72,23 +72,27 @@ HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)"
#
# To support disassembly, objtool needs libopcodes which is provided
-# with libbdf (binutils-dev or binutils-devel package).
+# with libbfd (binutils-dev or binutils-devel package).
#
-FEATURE_USER = .objtool
-FEATURE_TESTS = libbfd disassembler-init-styled
-FEATURE_DISPLAY =
-include $(srctree)/tools/build/Makefile.feature
+# We check using HOSTCC directly rather than the shared feature framework
+# because objtool is a host tool that links against host libraries.
+#
+HAVE_LIBOPCODES := $(shell echo 'int main(void) { return 0; }' | \
+ $(HOSTCC) -xc - -o /dev/null -lopcodes 2>/dev/null && echo y)
-ifeq ($(feature-disassembler-init-styled), 1)
- OBJTOOL_CFLAGS += -DDISASM_INIT_STYLED
-endif
+# Styled disassembler support requires binutils >= 2.39
+HAVE_DISASM_STYLED := $(shell echo '$(pound)include <dis-asm.h>' | \
+ $(HOSTCC) -E -xc - 2>/dev/null | grep -q disassembler_style && echo y)
BUILD_DISAS := n
-ifeq ($(feature-libbfd),1)
+ifeq ($(HAVE_LIBOPCODES),y)
BUILD_DISAS := y
- OBJTOOL_CFLAGS += -DDISAS -DPACKAGE="objtool"
+ OBJTOOL_CFLAGS += -DDISAS -DPACKAGE='"objtool"'
OBJTOOL_LDFLAGS += -lopcodes
+ifeq ($(HAVE_DISASM_STYLED),y)
+ OBJTOOL_CFLAGS += -DDISASM_INIT_STYLED
+endif
endif
export BUILD_DISAS
diff --git a/tools/objtool/include/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index 25ff7942b4d5..2b27b54096b8 100644
--- a/tools/objtool/include/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -152,8 +152,8 @@ static inline void unindent(int *unused) { indent--; }
if (unlikely(insn->sym && insn->sym->pfunc && \
insn->sym->pfunc->debug_checksum)) { \
char *insn_off = offstr(insn->sec, insn->offset); \
- __dbg("checksum: %s %s %016lx", \
- func->name, insn_off, checksum); \
+ __dbg("checksum: %s %s %016llx", \
+ func->name, insn_off, (unsigned long long)checksum);\
free(insn_off); \
} \
})
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 17c1c36a7bf9..000c89a1e50d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -251,8 +251,11 @@ __add_event(struct list_head *list, int *idx,
event_attr_init(attr);
evsel = evsel__new_idx(attr, *idx);
- if (!evsel)
- goto out_err;
+ if (!evsel) {
+ perf_cpu_map__put(cpus);
+ perf_cpu_map__put(pmu_cpus);
+ return NULL;
+ }
if (name) {
evsel->name = strdup(name);
diff --git a/tools/testing/cxl/test/cxl_translate.c b/tools/testing/cxl/test/cxl_translate.c
index 2200ae21795c..16328b2112b2 100644
--- a/tools/testing/cxl/test/cxl_translate.c
+++ b/tools/testing/cxl/test/cxl_translate.c
@@ -68,6 +68,8 @@ static u64 to_hpa(u64 dpa_offset, int pos, u8 r_eiw, u16 r_eig, u8 hb_ways,
/* Calculate base HPA offset from DPA and position */
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, r_eiw, r_eig);
+ if (hpa_offset == ULLONG_MAX)
+ return ULLONG_MAX;
if (math == XOR_MATH) {
cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
@@ -258,19 +260,23 @@ static int test_random_params(void)
pos = get_random_u32() % ways;
dpa = get_random_u64() >> 12;
+ reverse_dpa = ULLONG_MAX;
+ reverse_pos = -1;
+
hpa = cxl_calculate_hpa_offset(dpa, pos, eiw, eig);
- reverse_dpa = cxl_calculate_dpa_offset(hpa, eiw, eig);
- reverse_pos = cxl_calculate_position(hpa, eiw, eig);
-
- if (reverse_dpa != dpa || reverse_pos != pos) {
- pr_err("test random iter %d FAIL hpa=%llu, dpa=%llu reverse_dpa=%llu, pos=%d reverse_pos=%d eiw=%u eig=%u\n",
- i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw,
- eig);
-
- if (failures++ > 10) {
- pr_err("test random too many failures, stop\n");
- break;
- }
+ if (hpa != ULLONG_MAX) {
+ reverse_dpa = cxl_calculate_dpa_offset(hpa, eiw, eig);
+ reverse_pos = cxl_calculate_position(hpa, eiw, eig);
+ if (reverse_dpa == dpa && reverse_pos == pos)
+ continue;
+ }
+
+ pr_err("test random iter %d FAIL hpa=%llu, dpa=%llu reverse_dpa=%llu, pos=%d reverse_pos=%d eiw=%u eig=%u\n",
+ i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw, eig);
+
+ if (failures++ > 10) {
+ pr_err("test random too many failures, stop\n");
+ break;
}
}
pr_info("..... test random: PASS %d FAIL %d\n", i - failures, failures);
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index 230b75f6015b..90551650299c 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -237,6 +237,7 @@ struct service_fixture {
struct sockaddr_un unix_addr;
socklen_t unix_addr_len;
};
+ struct sockaddr_storage _largest;
};
};
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index eee814e09dd7..968a91c927a4 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -4362,22 +4362,24 @@ TEST_F_FORK(layout1, named_unix_domain_socket_ioctl)
{
const char *const path = file1_s1d1;
int srv_fd, cli_fd, ruleset_fd;
- socklen_t size;
- struct sockaddr_un srv_un, cli_un;
+ struct sockaddr_un srv_un = {
+ .sun_family = AF_UNIX,
+ };
+ struct sockaddr_un cli_un = {
+ .sun_family = AF_UNIX,
+ };
const struct landlock_ruleset_attr attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_IOCTL_DEV,
};
/* Sets up a server */
- srv_un.sun_family = AF_UNIX;
- strncpy(srv_un.sun_path, path, sizeof(srv_un.sun_path));
-
ASSERT_EQ(0, unlink(path));
srv_fd = socket(AF_UNIX, SOCK_STREAM, 0);
ASSERT_LE(0, srv_fd);
- size = offsetof(struct sockaddr_un, sun_path) + strlen(srv_un.sun_path);
- ASSERT_EQ(0, bind(srv_fd, (struct sockaddr *)&srv_un, size));
+ strncpy(srv_un.sun_path, path, sizeof(srv_un.sun_path));
+ ASSERT_EQ(0, bind(srv_fd, (struct sockaddr *)&srv_un, sizeof(srv_un)));
+
ASSERT_EQ(0, listen(srv_fd, 10 /* qlen */));
/* Enables Landlock. */
@@ -4387,24 +4389,18 @@ TEST_F_FORK(layout1, named_unix_domain_socket_ioctl)
ASSERT_EQ(0, close(ruleset_fd));
/* Sets up a client connection to it */
- cli_un.sun_family = AF_UNIX;
cli_fd = socket(AF_UNIX, SOCK_STREAM, 0);
ASSERT_LE(0, cli_fd);
- size = offsetof(struct sockaddr_un, sun_path) + strlen(cli_un.sun_path);
- ASSERT_EQ(0, bind(cli_fd, (struct sockaddr *)&cli_un, size));
-
- bzero(&cli_un, sizeof(cli_un));
- cli_un.sun_family = AF_UNIX;
strncpy(cli_un.sun_path, path, sizeof(cli_un.sun_path));
- size = offsetof(struct sockaddr_un, sun_path) + strlen(cli_un.sun_path);
-
- ASSERT_EQ(0, connect(cli_fd, (struct sockaddr *)&cli_un, size));
+ ASSERT_EQ(0,
+ connect(cli_fd, (struct sockaddr *)&cli_un, sizeof(cli_un)));
/* FIONREAD and other IOCTLs should not be forbidden. */
EXPECT_EQ(0, test_fionread_ioctl(cli_fd));
- ASSERT_EQ(0, close(cli_fd));
+ EXPECT_EQ(0, close(cli_fd));
+ EXPECT_EQ(0, close(srv_fd));
}
/* clang-format off */
@@ -7074,8 +7070,8 @@ static int matches_log_fs_extra(struct __test_metadata *const _metadata,
return -E2BIG;
/*
- * It is assume that absolute_path does not contain control characters nor
- * spaces, see audit_string_contains_control().
+ * It is assumed that absolute_path does not contain control
+ * characters nor spaces, see audit_string_contains_control().
*/
absolute_path = realpath(path, NULL);
if (!absolute_path)
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index 2a45208551e6..b34b139b3f89 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -121,6 +121,10 @@ static socklen_t get_addrlen(const struct service_fixture *const srv,
{
switch (srv->protocol.domain) {
case AF_UNSPEC:
+ if (minimal)
+ return sizeof(sa_family_t);
+ return sizeof(struct sockaddr_storage);
+
case AF_INET:
return sizeof(srv->ipv4_addr);
@@ -758,6 +762,11 @@ TEST_F(protocol, bind_unspec)
bind_fd = socket_variant(&self->srv0);
ASSERT_LE(0, bind_fd);
+ /* Tries to bind with too small addrlen. */
+ EXPECT_EQ(-EINVAL, bind_variant_addrlen(
+ bind_fd, &self->unspec_any0,
+ get_addrlen(&self->unspec_any0, true) - 1));
+
/* Allowed bind on AF_UNSPEC/INADDR_ANY. */
ret = bind_variant(bind_fd, &self->unspec_any0);
if (variant->prot.domain == AF_INET) {
@@ -766,6 +775,8 @@ TEST_F(protocol, bind_unspec)
TH_LOG("Failed to bind to unspec/any socket: %s",
strerror(errno));
}
+ } else if (variant->prot.domain == AF_INET6) {
+ EXPECT_EQ(-EAFNOSUPPORT, ret);
} else {
EXPECT_EQ(-EINVAL, ret);
}
@@ -792,6 +803,8 @@ TEST_F(protocol, bind_unspec)
} else {
EXPECT_EQ(0, ret);
}
+ } else if (variant->prot.domain == AF_INET6) {
+ EXPECT_EQ(-EAFNOSUPPORT, ret);
} else {
EXPECT_EQ(-EINVAL, ret);
}
@@ -801,7 +814,8 @@ TEST_F(protocol, bind_unspec)
bind_fd = socket_variant(&self->srv0);
ASSERT_LE(0, bind_fd);
ret = bind_variant(bind_fd, &self->unspec_srv0);
- if (variant->prot.domain == AF_INET) {
+ if (variant->prot.domain == AF_INET ||
+ variant->prot.domain == AF_INET6) {
EXPECT_EQ(-EAFNOSUPPORT, ret);
} else {
EXPECT_EQ(-EINVAL, ret)
@@ -892,7 +906,19 @@ TEST_F(protocol, connect_unspec)
EXPECT_EQ(0, close(ruleset_fd));
}
- ret = connect_variant(connect_fd, &self->unspec_any0);
+ /* Try to re-disconnect with a truncated address struct. */
+ EXPECT_EQ(-EINVAL,
+ connect_variant_addrlen(
+ connect_fd, &self->unspec_any0,
+ get_addrlen(&self->unspec_any0, true) - 1));
+
+ /*
+ * Re-disconnect, with a minimal sockaddr struct (just a
+ * bare af_family=AF_UNSPEC field).
+ */
+ ret = connect_variant_addrlen(connect_fd, &self->unspec_any0,
+ get_addrlen(&self->unspec_any0,
+ true));
if (self->srv0.protocol.domain == AF_UNIX &&
self->srv0.protocol.type == SOCK_STREAM) {
EXPECT_EQ(-EINVAL, ret);
diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c
index 4e356334ecb7..4f64c90583cd 100644
--- a/tools/testing/selftests/landlock/ptrace_test.c
+++ b/tools/testing/selftests/landlock/ptrace_test.c
@@ -86,16 +86,9 @@ static int get_yama_ptrace_scope(void)
}
/* clang-format off */
-FIXTURE(hierarchy) {};
+FIXTURE(scoped_domains) {};
/* clang-format on */
-FIXTURE_VARIANT(hierarchy)
-{
- const bool domain_both;
- const bool domain_parent;
- const bool domain_child;
-};
-
/*
* Test multiple tracing combinations between a parent process P1 and a child
* process P2.
@@ -104,155 +97,18 @@ FIXTURE_VARIANT(hierarchy)
* restriction is enforced in addition to any Landlock check, which means that
* all P2 requests to trace P1 would be denied.
*/
+#include "scoped_base_variants.h"
-/*
- * No domain
- *
- * P1-. P1 -> P2 : allow
- * \ P2 -> P1 : allow
- * 'P2
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, allow_without_domain) {
- /* clang-format on */
- .domain_both = false,
- .domain_parent = false,
- .domain_child = false,
-};
-
-/*
- * Child domain
- *
- * P1--. P1 -> P2 : allow
- * \ P2 -> P1 : deny
- * .'-----.
- * | P2 |
- * '------'
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, allow_with_one_domain) {
- /* clang-format on */
- .domain_both = false,
- .domain_parent = false,
- .domain_child = true,
-};
-
-/*
- * Parent domain
- * .------.
- * | P1 --. P1 -> P2 : deny
- * '------' \ P2 -> P1 : allow
- * '
- * P2
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, deny_with_parent_domain) {
- /* clang-format on */
- .domain_both = false,
- .domain_parent = true,
- .domain_child = false,
-};
-
-/*
- * Parent + child domain (siblings)
- * .------.
- * | P1 ---. P1 -> P2 : deny
- * '------' \ P2 -> P1 : deny
- * .---'--.
- * | P2 |
- * '------'
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, deny_with_sibling_domain) {
- /* clang-format on */
- .domain_both = false,
- .domain_parent = true,
- .domain_child = true,
-};
-
-/*
- * Same domain (inherited)
- * .-------------.
- * | P1----. | P1 -> P2 : allow
- * | \ | P2 -> P1 : allow
- * | ' |
- * | P2 |
- * '-------------'
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, allow_sibling_domain) {
- /* clang-format on */
- .domain_both = true,
- .domain_parent = false,
- .domain_child = false,
-};
-
-/*
- * Inherited + child domain
- * .-----------------.
- * | P1----. | P1 -> P2 : allow
- * | \ | P2 -> P1 : deny
- * | .-'----. |
- * | | P2 | |
- * | '------' |
- * '-----------------'
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, allow_with_nested_domain) {
- /* clang-format on */
- .domain_both = true,
- .domain_parent = false,
- .domain_child = true,
-};
-
-/*
- * Inherited + parent domain
- * .-----------------.
- * |.------. | P1 -> P2 : deny
- * || P1 ----. | P2 -> P1 : allow
- * |'------' \ |
- * | ' |
- * | P2 |
- * '-----------------'
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, deny_with_nested_and_parent_domain) {
- /* clang-format on */
- .domain_both = true,
- .domain_parent = true,
- .domain_child = false,
-};
-
-/*
- * Inherited + parent and child domain (siblings)
- * .-----------------.
- * | .------. | P1 -> P2 : deny
- * | | P1 . | P2 -> P1 : deny
- * | '------'\ |
- * | \ |
- * | .--'---. |
- * | | P2 | |
- * | '------' |
- * '-----------------'
- */
-/* clang-format off */
-FIXTURE_VARIANT_ADD(hierarchy, deny_with_forked_domain) {
- /* clang-format on */
- .domain_both = true,
- .domain_parent = true,
- .domain_child = true,
-};
-
-FIXTURE_SETUP(hierarchy)
+FIXTURE_SETUP(scoped_domains)
{
}
-FIXTURE_TEARDOWN(hierarchy)
+FIXTURE_TEARDOWN(scoped_domains)
{
}
/* Test PTRACE_TRACEME and PTRACE_ATTACH for parent and child. */
-TEST_F(hierarchy, trace)
+TEST_F(scoped_domains, trace)
{
pid_t child, parent;
int status, err_proc_read;
diff --git a/tools/testing/selftests/landlock/scoped_abstract_unix_test.c b/tools/testing/selftests/landlock/scoped_abstract_unix_test.c
index 6825082c079c..72f97648d4a7 100644
--- a/tools/testing/selftests/landlock/scoped_abstract_unix_test.c
+++ b/tools/testing/selftests/landlock/scoped_abstract_unix_test.c
@@ -543,7 +543,7 @@ TEST_F(scoped_vs_unscoped, unix_scoping)
ASSERT_EQ(1, write(pipe_child[1], ".", 1));
ASSERT_EQ(grand_child, waitpid(grand_child, &status, 0));
- EXPECT_EQ(0, close(stream_server_child))
+ EXPECT_EQ(0, close(stream_server_child));
EXPECT_EQ(0, close(dgram_server_child));
return;
}
@@ -779,7 +779,6 @@ FIXTURE_TEARDOWN(various_address_sockets)
TEST_F(various_address_sockets, scoped_pathname_sockets)
{
- socklen_t size_stream, size_dgram;
pid_t child;
int status;
char buf_child, buf_parent;
@@ -798,12 +797,8 @@ TEST_F(various_address_sockets, scoped_pathname_sockets)
/* Pathname address. */
snprintf(stream_pathname_addr.sun_path,
sizeof(stream_pathname_addr.sun_path), "%s", stream_path);
- size_stream = offsetof(struct sockaddr_un, sun_path) +
- strlen(stream_pathname_addr.sun_path);
snprintf(dgram_pathname_addr.sun_path,
sizeof(dgram_pathname_addr.sun_path), "%s", dgram_path);
- size_dgram = offsetof(struct sockaddr_un, sun_path) +
- strlen(dgram_pathname_addr.sun_path);
/* Abstract address. */
memset(&stream_abstract_addr, 0, sizeof(stream_abstract_addr));
@@ -841,8 +836,9 @@ TEST_F(various_address_sockets, scoped_pathname_sockets)
/* Connects with pathname sockets. */
stream_pathname_socket = socket(AF_UNIX, SOCK_STREAM, 0);
ASSERT_LE(0, stream_pathname_socket);
- ASSERT_EQ(0, connect(stream_pathname_socket,
- &stream_pathname_addr, size_stream));
+ ASSERT_EQ(0,
+ connect(stream_pathname_socket, &stream_pathname_addr,
+ sizeof(stream_pathname_addr)));
ASSERT_EQ(1, write(stream_pathname_socket, "b", 1));
EXPECT_EQ(0, close(stream_pathname_socket));
@@ -850,12 +846,13 @@ TEST_F(various_address_sockets, scoped_pathname_sockets)
dgram_pathname_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
ASSERT_LE(0, dgram_pathname_socket);
err = sendto(dgram_pathname_socket, "c", 1, 0,
- &dgram_pathname_addr, size_dgram);
+ &dgram_pathname_addr, sizeof(dgram_pathname_addr));
EXPECT_EQ(1, err);
/* Sends with connection. */
- ASSERT_EQ(0, connect(dgram_pathname_socket,
- &dgram_pathname_addr, size_dgram));
+ ASSERT_EQ(0,
+ connect(dgram_pathname_socket, &dgram_pathname_addr,
+ sizeof(dgram_pathname_addr)));
ASSERT_EQ(1, write(dgram_pathname_socket, "d", 1));
EXPECT_EQ(0, close(dgram_pathname_socket));
@@ -910,13 +907,13 @@ TEST_F(various_address_sockets, scoped_pathname_sockets)
stream_pathname_socket = socket(AF_UNIX, SOCK_STREAM, 0);
ASSERT_LE(0, stream_pathname_socket);
ASSERT_EQ(0, bind(stream_pathname_socket, &stream_pathname_addr,
- size_stream));
+ sizeof(stream_pathname_addr)));
ASSERT_EQ(0, listen(stream_pathname_socket, backlog));
dgram_pathname_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
ASSERT_LE(0, dgram_pathname_socket);
ASSERT_EQ(0, bind(dgram_pathname_socket, &dgram_pathname_addr,
- size_dgram));
+ sizeof(dgram_pathname_addr)));
/* Sets up abstract servers. */
stream_abstract_socket = socket(AF_UNIX, SOCK_STREAM, 0);
diff --git a/tools/testing/selftests/landlock/scoped_base_variants.h b/tools/testing/selftests/landlock/scoped_base_variants.h
index d3b1fa8a584e..7116728ebc68 100644
--- a/tools/testing/selftests/landlock/scoped_base_variants.h
+++ b/tools/testing/selftests/landlock/scoped_base_variants.h
@@ -1,8 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Landlock scoped_domains variants
+ * Landlock scoped_domains test variant definition.
*
- * See the hierarchy variants from ptrace_test.c
+ * This file defines a fixture variant "scoped_domains" that has all
+ * permutations of parent/child process being in separate or shared
+ * Landlock domain, or not being in a Landlock domain at all.
+ *
+ * Scoped access tests can include this file to avoid repeating these
+ * combinations.
*
* Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2019-2020 ANSSI
diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
index 6279893a0adc..f61150d28eb2 100644
--- a/tools/testing/selftests/mm/gup_longterm.c
+++ b/tools/testing/selftests/mm/gup_longterm.c
@@ -179,7 +179,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
if (rw && shared && fs_is_unknown(fs_type)) {
ksft_print_msg("Unknown filesystem\n");
result = KSFT_SKIP;
- return;
+ break;
}
/*
* R/O pinning or pinning in a private mapping is always
diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
index 363c1033cc7d..10b686102b79 100644
--- a/tools/testing/selftests/mm/merge.c
+++ b/tools/testing/selftests/mm/merge.c
@@ -22,12 +22,37 @@ FIXTURE(merge)
struct procmap_fd procmap;
};
+static char *map_carveout(unsigned int page_size)
+{
+ return mmap(NULL, 30 * page_size, PROT_NONE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+}
+
+static pid_t do_fork(struct procmap_fd *procmap)
+{
+ pid_t pid = fork();
+
+ if (pid == -1)
+ return -1;
+ if (pid != 0) {
+ wait(NULL);
+ return pid;
+ }
+
+ /* Reopen for child. */
+ if (close_procmap(procmap))
+ return -1;
+ if (open_self_procmap(procmap))
+ return -1;
+
+ return 0;
+}
+
FIXTURE_SETUP(merge)
{
self->page_size = psize();
/* Carve out PROT_NONE region to map over. */
- self->carveout = mmap(NULL, 30 * self->page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ self->carveout = map_carveout(self->page_size);
ASSERT_NE(self->carveout, MAP_FAILED);
/* Setup PROCMAP_QUERY interface. */
ASSERT_EQ(open_self_procmap(&self->procmap), 0);
@@ -36,7 +61,8 @@ FIXTURE_SETUP(merge)
FIXTURE_TEARDOWN(merge)
{
ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
- ASSERT_EQ(close_procmap(&self->procmap), 0);
+ /* May fail for parent of forked process. */
+ close_procmap(&self->procmap);
/*
* Clear unconditionally, as some tests set this. It is no issue if this
* fails (KSM may be disabled for instance).
@@ -44,6 +70,44 @@ FIXTURE_TEARDOWN(merge)
prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
}
+FIXTURE(merge_with_fork)
+{
+ unsigned int page_size;
+ char *carveout;
+ struct procmap_fd procmap;
+};
+
+FIXTURE_VARIANT(merge_with_fork)
+{
+ bool forked;
+};
+
+FIXTURE_VARIANT_ADD(merge_with_fork, forked)
+{
+ .forked = true,
+};
+
+FIXTURE_VARIANT_ADD(merge_with_fork, unforked)
+{
+ .forked = false,
+};
+
+FIXTURE_SETUP(merge_with_fork)
+{
+ self->page_size = psize();
+ self->carveout = map_carveout(self->page_size);
+ ASSERT_NE(self->carveout, MAP_FAILED);
+ ASSERT_EQ(open_self_procmap(&self->procmap), 0);
+}
+
+FIXTURE_TEARDOWN(merge_with_fork)
+{
+ ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
+ ASSERT_EQ(close_procmap(&self->procmap), 0);
+ /* See above. */
+ prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
+}
+
TEST_F(merge, mprotect_unfaulted_left)
{
unsigned int page_size = self->page_size;
@@ -322,8 +386,8 @@ TEST_F(merge, forked_target_vma)
unsigned int page_size = self->page_size;
char *carveout = self->carveout;
struct procmap_fd *procmap = &self->procmap;
- pid_t pid;
char *ptr, *ptr2;
+ pid_t pid;
int i;
/*
@@ -344,19 +408,10 @@ TEST_F(merge, forked_target_vma)
*/
ptr[0] = 'x';
- pid = fork();
+ pid = do_fork(&self->procmap);
ASSERT_NE(pid, -1);
-
- if (pid != 0) {
- wait(NULL);
+ if (pid != 0)
return;
- }
-
- /* Child process below: */
-
- /* Reopen for child. */
- ASSERT_EQ(close_procmap(&self->procmap), 0);
- ASSERT_EQ(open_self_procmap(&self->procmap), 0);
/* unCOWing everything does not cause the AVC to go away. */
for (i = 0; i < 5 * page_size; i += page_size)
@@ -386,8 +441,8 @@ TEST_F(merge, forked_source_vma)
unsigned int page_size = self->page_size;
char *carveout = self->carveout;
struct procmap_fd *procmap = &self->procmap;
- pid_t pid;
char *ptr, *ptr2;
+ pid_t pid;
int i;
/*
@@ -408,19 +463,10 @@ TEST_F(merge, forked_source_vma)
*/
ptr[0] = 'x';
- pid = fork();
+ pid = do_fork(&self->procmap);
ASSERT_NE(pid, -1);
-
- if (pid != 0) {
- wait(NULL);
+ if (pid != 0)
return;
- }
-
- /* Child process below: */
-
- /* Reopen for child. */
- ASSERT_EQ(close_procmap(&self->procmap), 0);
- ASSERT_EQ(open_self_procmap(&self->procmap), 0);
/* unCOWing everything does not cause the AVC to go away. */
for (i = 0; i < 5 * page_size; i += page_size)
@@ -1171,4 +1217,288 @@ TEST_F(merge, mremap_correct_placed_faulted)
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
}
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ unsigned long offset;
+ char *ptr_a, *ptr_b;
+
+ /*
+ * mremap() such that A and B merge:
+ *
+ * |------------|
+ * | \ |
+ * |-----------| | / |---------|
+ * | unfaulted | v \ | faulted |
+ * |-----------| / |---------|
+ * B \ A
+ */
+
+ /* Map VMA A into place. */
+ ptr_a = mmap(&self->carveout[page_size + 3 * page_size],
+ 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+ /* Fault it in. */
+ ptr_a[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move it out of the way so we can place VMA B in position,
+ * unfaulted.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* Map VMA B into place. */
+ ptr_b = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /*
+ * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size + 3 * page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* The VMAs should have merged, if not forked. */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
+
+ offset = variant->forked ? 3 * page_size : 6 * page_size;
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + offset);
+}
+
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_next)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ unsigned long offset;
+ char *ptr_a, *ptr_b;
+
+ /*
+ * mremap() such that A and B merge:
+ *
+ * |---------------------------|
+ * | \ |
+ * | |-----------| / |---------|
+ * v | unfaulted | \ | faulted |
+ * |-----------| / |---------|
+ * B \ A
+ *
+ * Then unmap VMA A to trigger the bug.
+ */
+
+ /* Map VMA A into place. */
+ ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+ /* Fault it in. */
+ ptr_a[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move it out of the way so we can place VMA B in position,
+ * unfaulted.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* Map VMA B into place. */
+ ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /*
+ * Now move VMA A into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_a = mremap(ptr_a, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size]);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* The VMAs should have merged, if not forked. */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
+ offset = variant->forked ? 3 * page_size : 6 * page_size;
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
+}
+
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_unfaulted_next)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ unsigned long offset;
+ char *ptr_a, *ptr_b, *ptr_c;
+
+ /*
+ * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
+ *
+ * |---------------------------|
+ * | \ |
+ * |-----------| | |-----------| / |---------|
+ * | unfaulted | v | unfaulted | \ | faulted |
+ * |-----------| |-----------| / |---------|
+ * A C \ B
+ */
+
+ /* Map VMA B into place. */
+ ptr_b = mmap(&self->carveout[page_size + 3 * page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+ /* Fault it in. */
+ ptr_b[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move it out of the way so we can place VMAs A, C in position,
+ * unfaulted.
+ */
+ ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* Map VMA A into place. */
+
+ ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /* Map VMA C into place. */
+ ptr_c = mmap(&self->carveout[page_size + 3 * page_size + 3 * page_size],
+ 3 * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_c, MAP_FAILED);
+
+ /*
+ * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size + 3 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* The VMAs should have merged, if not forked. */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
+ offset = variant->forked ? 3 * page_size : 9 * page_size;
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + offset);
+
+ /* If forked, B and C should also not have merged. */
+ if (variant->forked) {
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 3 * page_size);
+ }
+}
+
+TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev_faulted_next)
+{
+ struct procmap_fd *procmap = &self->procmap;
+ unsigned int page_size = self->page_size;
+ char *ptr_a, *ptr_b, *ptr_bc;
+
+ /*
+ * mremap() with MREMAP_DONTUNMAP such that A, B and C merge:
+ *
+ * |---------------------------|
+ * | \ |
+ * |-----------| | |-----------| / |---------|
+ * | unfaulted | v | faulted | \ | faulted |
+ * |-----------| |-----------| / |---------|
+ * A C \ B
+ */
+
+ /*
+ * Map VMA B and C into place. We have to map them together so their
+ * anon_vma is the same and the vma->vm_pgoff's are correctly aligned.
+ */
+ ptr_bc = mmap(&self->carveout[page_size + 3 * page_size],
+ 3 * page_size + 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_bc, MAP_FAILED);
+
+ /* Fault it in. */
+ ptr_bc[0] = 'x';
+
+ if (variant->forked) {
+ pid_t pid = do_fork(&self->procmap);
+
+ ASSERT_NE(pid, -1);
+ if (pid != 0)
+ return;
+ }
+
+ /*
+ * Now move VMA B out the way (splitting VMA BC) so we can place VMA A
+ * in position, unfaulted, and leave the remainder of the VMA we just
+ * moved in place, faulted, as VMA C.
+ */
+ ptr_b = mremap(ptr_bc, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE, &self->carveout[20 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* Map VMA A into place. */
+ ptr_a = mmap(&self->carveout[page_size], 3 * page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr_a, MAP_FAILED);
+
+ /*
+ * Now move VMA B into position with MREMAP_DONTUNMAP to catch incorrect
+ * anon_vma propagation.
+ */
+ ptr_b = mremap(ptr_b, 3 * page_size, 3 * page_size,
+ MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ &self->carveout[page_size + 3 * page_size]);
+ ASSERT_NE(ptr_b, MAP_FAILED);
+
+ /* The VMAs should have merged. A,B,C if unforked, B, C if forked. */
+ if (variant->forked) {
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_b));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_b);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_b + 6 * page_size);
+ } else {
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr_a));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr_a);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr_a + 9 * page_size);
+ }
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index fe7937dc5f45..ce9699092f50 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -48,6 +48,7 @@ TEST_PROGS := \
ipv6_flowlabel.sh \
ipv6_force_forwarding.sh \
ipv6_route_update_soft_lockup.sh \
+ ipvtap_test.sh \
l2_tos_ttl_inherit.sh \
l2tp.sh \
link_netns.py \
diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
index 3ef209cacb8e..663744305e52 100755
--- a/tools/testing/selftests/net/amt.sh
+++ b/tools/testing/selftests/net/amt.sh
@@ -73,6 +73,8 @@
# +------------------------+
#==============================================================================
+source lib.sh
+
readonly LISTENER=$(mktemp -u listener-XXXXXXXX)
readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX)
readonly RELAY=$(mktemp -u relay-XXXXXXXX)
@@ -246,14 +248,15 @@ test_ipv6_forward()
send_mcast4()
{
- sleep 2
+ sleep 5
+ wait_local_port_listen ${LISTENER} 4000 udp
ip netns exec "${SOURCE}" bash -c \
'printf "%s %128s" 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
}
send_mcast6()
{
- sleep 2
+ wait_local_port_listen ${LISTENER} 6000 udp
ip netns exec "${SOURCE}" bash -c \
'printf "%s %128s" 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
}
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 1e1f253118f5..b84362b9b508 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -48,6 +48,7 @@ CONFIG_IPV6_SEG6_LWTUNNEL=y
CONFIG_IPV6_SIT=y
CONFIG_IPV6_VTI=y
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_KALLSYMS=y
CONFIG_L2TP=m
CONFIG_L2TP_ETH=m
@@ -116,6 +117,7 @@ CONFIG_PROC_SYSCTL=y
CONFIG_PSAMPLE=m
CONFIG_RPS=y
CONFIG_SYSFS=y
+CONFIG_TAP=m
CONFIG_TCP_MD5SIG=y
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_TEST_BPF=m
diff --git a/tools/testing/selftests/net/fib-onlink-tests.sh b/tools/testing/selftests/net/fib-onlink-tests.sh
index b5773ac8847d..e0d45292a298 100755
--- a/tools/testing/selftests/net/fib-onlink-tests.sh
+++ b/tools/testing/selftests/net/fib-onlink-tests.sh
@@ -121,7 +121,7 @@ log_subsection()
run_cmd()
{
- local cmd="$*"
+ local cmd="$1"
local out
local rc
@@ -146,7 +146,7 @@ get_linklocal()
local pfx
local addr
- addr=$(${pfx} ip -6 -br addr show dev ${dev} | \
+ addr=$(${pfx} ${IP} -6 -br addr show dev ${dev} | \
awk '{
for (i = 3; i <= NF; ++i) {
if ($i ~ /^fe80/)
@@ -174,58 +174,48 @@ setup()
set -e
- # create namespace
- setup_ns PEER_NS
+ # create namespaces
+ setup_ns ns1
+ IP="ip -netns $ns1"
+ setup_ns ns2
# add vrf table
- ip li add ${VRF} type vrf table ${VRF_TABLE}
- ip li set ${VRF} up
- ip ro add table ${VRF_TABLE} unreachable default metric 8192
- ip -6 ro add table ${VRF_TABLE} unreachable default metric 8192
+ ${IP} li add ${VRF} type vrf table ${VRF_TABLE}
+ ${IP} li set ${VRF} up
+ ${IP} ro add table ${VRF_TABLE} unreachable default metric 8192
+ ${IP} -6 ro add table ${VRF_TABLE} unreachable default metric 8192
# create test interfaces
- ip li add ${NETIFS[p1]} type veth peer name ${NETIFS[p2]}
- ip li add ${NETIFS[p3]} type veth peer name ${NETIFS[p4]}
- ip li add ${NETIFS[p5]} type veth peer name ${NETIFS[p6]}
- ip li add ${NETIFS[p7]} type veth peer name ${NETIFS[p8]}
+ ${IP} li add ${NETIFS[p1]} type veth peer name ${NETIFS[p2]}
+ ${IP} li add ${NETIFS[p3]} type veth peer name ${NETIFS[p4]}
+ ${IP} li add ${NETIFS[p5]} type veth peer name ${NETIFS[p6]}
+ ${IP} li add ${NETIFS[p7]} type veth peer name ${NETIFS[p8]}
# enslave vrf interfaces
for n in 5 7; do
- ip li set ${NETIFS[p${n}]} vrf ${VRF}
+ ${IP} li set ${NETIFS[p${n}]} vrf ${VRF}
done
# add addresses
for n in 1 3 5 7; do
- ip li set ${NETIFS[p${n}]} up
- ip addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
- ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
+ ${IP} li set ${NETIFS[p${n}]} up
+ ${IP} addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
+ ${IP} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
done
# move peer interfaces to namespace and add addresses
for n in 2 4 6 8; do
- ip li set ${NETIFS[p${n}]} netns ${PEER_NS} up
- ip -netns ${PEER_NS} addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
- ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
+ ${IP} li set ${NETIFS[p${n}]} netns ${ns2} up
+ ip -netns $ns2 addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
+ ip -netns $ns2 addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
done
- ip -6 ro add default via ${V6ADDRS[p3]/::[0-9]/::64}
- ip -6 ro add table ${VRF_TABLE} default via ${V6ADDRS[p7]/::[0-9]/::64}
+ ${IP} -6 ro add default via ${V6ADDRS[p3]/::[0-9]/::64}
+ ${IP} -6 ro add table ${VRF_TABLE} default via ${V6ADDRS[p7]/::[0-9]/::64}
set +e
}
-cleanup()
-{
- # make sure we start from a clean slate
- cleanup_ns ${PEER_NS} 2>/dev/null
- for n in 1 3 5 7; do
- ip link del ${NETIFS[p${n}]} 2>/dev/null
- done
- ip link del ${VRF} 2>/dev/null
- ip ro flush table ${VRF_TABLE}
- ip -6 ro flush table ${VRF_TABLE}
-}
-
################################################################################
# IPv4 tests
#
@@ -242,7 +232,7 @@ run_ip()
# dev arg may be empty
[ -n "${dev}" ] && dev="dev ${dev}"
- run_cmd ip ro add table "${table}" "${prefix}"/32 via "${gw}" "${dev}" onlink
+ run_cmd "${IP} ro add table ${table} ${prefix}/32 via ${gw} ${dev} onlink"
log_test $? ${exp_rc} "${desc}"
}
@@ -258,8 +248,8 @@ run_ip_mpath()
# dev arg may be empty
[ -n "${dev}" ] && dev="dev ${dev}"
- run_cmd ip ro add table "${table}" "${prefix}"/32 \
- nexthop via ${nh1} nexthop via ${nh2}
+ run_cmd "${IP} ro add table ${table} ${prefix}/32 \
+ nexthop via ${nh1} nexthop via ${nh2}"
log_test $? ${exp_rc} "${desc}"
}
@@ -342,7 +332,7 @@ run_ip6()
# dev arg may be empty
[ -n "${dev}" ] && dev="dev ${dev}"
- run_cmd ip -6 ro add table "${table}" "${prefix}"/128 via "${gw}" "${dev}" onlink
+ run_cmd "${IP} -6 ro add table ${table} ${prefix}/128 via ${gw} ${dev} onlink"
log_test $? ${exp_rc} "${desc}"
}
@@ -356,8 +346,8 @@ run_ip6_mpath()
local exp_rc="$6"
local desc="$7"
- run_cmd ip -6 ro add table "${table}" "${prefix}"/128 "${opts}" \
- nexthop via ${nh1} nexthop via ${nh2}
+ run_cmd "${IP} -6 ro add table ${table} ${prefix}/128 ${opts} \
+ nexthop via ${nh1} nexthop via ${nh2}"
log_test $? ${exp_rc} "${desc}"
}
@@ -491,10 +481,9 @@ do
esac
done
-cleanup
setup
run_onlink_tests
-cleanup
+cleanup_ns ${ns1} ${ns2}
if [ "$TESTS" != "none" ]; then
printf "\nTests passed: %3d\n" ${nsuccess}
diff --git a/tools/testing/selftests/net/ipvtap_test.sh b/tools/testing/selftests/net/ipvtap_test.sh
new file mode 100755
index 000000000000..354ca7ce8584
--- /dev/null
+++ b/tools/testing/selftests/net/ipvtap_test.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Simple tests for ipvtap
+
+
+#
+# The testing environment looks this way:
+#
+# |------HNS-------| |------PHY-------|
+# | veth<----------------->veth |
+# |------|--|------| |----------------|
+# | |
+# | | |-----TST0-------|
+# | |------------|----ipvlan |
+# | |----------------|
+# |
+# | |-----TST1-------|
+# |---------------|----ipvlan |
+# |----------------|
+#
+
+ALL_TESTS="
+ test_ip_set
+"
+
+source lib.sh
+
+DEBUG=0
+
+VETH_HOST=vethtst.h
+VETH_PHY=vethtst.p
+
+NS_COUNT=32
+IP_ITERATIONS=1024
+IPSET_TIMEOUT="60s"
+
+ns_run() {
+ ns=$1
+ shift
+ if [[ "$ns" == "global" ]]; then
+ "$@" >/dev/null
+ else
+ ip netns exec "$ns" "$@" >/dev/null
+ fi
+}
+
+test_ip_setup_env() {
+ setup_ns NS_PHY
+ setup_ns HST_NS
+
+ # setup simulated other-host (phy) and host itself
+ ns_run "$HST_NS" ip link add $VETH_HOST type veth peer name $VETH_PHY \
+ netns "$NS_PHY" >/dev/null
+ ns_run "$HST_NS" ip link set $VETH_HOST up
+ ns_run "$NS_PHY" ip link set $VETH_PHY up
+
+ for ((i=0; i<NS_COUNT; i++)); do
+ setup_ns ipvlan_ns_$i
+ ns="ipvlan_ns_$i"
+ if [ "$DEBUG" = "1" ]; then
+ echo "created NS ${!ns}"
+ fi
+ if ! ns_run "$HST_NS" ip link add netns ${!ns} ipvlan0 \
+ link $VETH_HOST \
+ type ipvtap mode l2 bridge; then
+ exit_error "FAIL: Failed to configure ipvlan link."
+ fi
+ done
+}
+
+test_ip_cleanup_env() {
+ ns_run "$HST_NS" ip link del $VETH_HOST
+ cleanup_all_ns
+}
+
+exit_error() {
+ echo "$1"
+ exit $ksft_fail
+}
+
+rnd() {
+ echo $(( RANDOM % 32 + 16 ))
+}
+
+test_ip_set_thread() {
+ # Here we are trying to create some IP conflicts between namespaces.
+ # If just add/remove IP, nothing interesting will happen.
+ # But if add random IP and then remove random IP,
+ # eventually conflicts start to apear.
+ ip link set ipvlan0 up
+ for ((i=0; i<IP_ITERATIONS; i++)); do
+ v=$(rnd)
+ ip a a "172.25.0.$v/24" dev ipvlan0 2>/dev/null
+ ip a a "fc00::$v/64" dev ipvlan0 2>/dev/null
+ v=$(rnd)
+ ip a d "172.25.0.$v/24" dev ipvlan0 2>/dev/null
+ ip a d "fc00::$v/64" dev ipvlan0 2>/dev/null
+ done
+}
+
+test_ip_set() {
+ RET=0
+
+ trap test_ip_cleanup_env EXIT
+
+ test_ip_setup_env
+
+ declare -A ns_pids
+ for ((i=0; i<NS_COUNT; i++)); do
+ ns="ipvlan_ns_$i"
+ ns_run ${!ns} timeout "$IPSET_TIMEOUT" \
+ bash -c "$0 test_ip_set_thread"&
+ ns_pids[$i]=$!
+ done
+
+ for ((i=0; i<NS_COUNT; i++)); do
+ wait "${ns_pids[$i]}"
+ done
+
+ declare -A all_ips
+ for ((i=0; i<NS_COUNT; i++)); do
+ ns="ipvlan_ns_$i"
+ ip_output=$(ip netns exec ${!ns} ip a l dev ipvlan0 | grep inet)
+ while IFS= read -r nsip_out; do
+ if [[ -z $nsip_out ]]; then
+ continue;
+ fi
+ nsip=$(awk '{print $2}' <<< "$nsip_out")
+ if [[ -v all_ips[$nsip] ]]; then
+ RET=$ksft_fail
+ log_test "conflict for $nsip"
+ return "$RET"
+ else
+ all_ips[$nsip]=$i
+ fi
+ done <<< "$ip_output"
+ done
+
+ if [ "$DEBUG" = "1" ]; then
+ for key in "${!all_ips[@]}"; do
+ echo "$key: ${all_ips[$key]}"
+ done
+ fi
+
+ trap - EXIT
+ test_ip_cleanup_env
+
+ log_test "test multithreaded ip set"
+}
+
+if [[ "$1" == "-d" ]]; then
+ DEBUG=1
+ shift
+fi
+
+if [[ "$1" == "-t" ]]; then
+ shift
+ TESTS="$*"
+fi
+
+if [[ "$1" == "test_ip_set_thread" ]]; then
+ test_ip_set_thread
+else
+ require_command ip
+
+ tests_run
+fi
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json
index e5cc31f265f8..0179c57104ad 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json
@@ -81,5 +81,30 @@
"$TC qdisc del dev $DUMMY handle 1: root",
"$IP link del dev $DUMMY"
]
+ },
+ {
+ "id": "124e",
+ "name": "Try to add teql as a child qdisc",
+ "category": [
+ "qdisc",
+ "ets",
+ "tbf"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle 1: qfq",
+ "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 15 maxpkt 16384"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent 1:1 handle 2:1 teql0",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s -j qdisc ls dev $DUMMY parent 1:1",
+ "matchJSON": [],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root handle 1:"
+ ]
}
]
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 83148875a12c..434065215d12 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -36,6 +36,7 @@ BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
CFLAGS := -O2 -g -std=gnu99 -pthread -Wall $(KHDR_INCLUDES)
+CFLAGS += -I $(top_srcdir)/tools/testing/selftests/
# call32_from_64 in thunks.S uses absolute addresses.
ifeq ($(CAN_BUILD_WITH_NOPIE),1)
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index 142c02a6834a..bf633cde82b0 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -25,7 +25,7 @@ enum transport {
};
static const char * const transport_ksyms[] = {
- #define x(name, symbol) "d " symbol "_transport",
+ #define x(name, symbol) " " symbol "_transport",
KNOWN_TRANSPORTS(x)
#undef x
};
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index bbe3723babdc..5bd20ccd9335 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -347,10 +347,12 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
}
#define SOCK_BUF_SIZE (2 * 1024 * 1024)
+#define SOCK_BUF_SIZE_SMALL (64 * 1024)
#define MAX_MSG_PAGES 4
static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
{
+ unsigned long long sock_buf_size;
unsigned long curr_hash;
size_t max_msg_size;
int page_size;
@@ -363,6 +365,16 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
+ sock_buf_size = SOCK_BUF_SIZE;
+
+ setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
+ sock_buf_size,
+ "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
+
+ setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+ sock_buf_size,
+ "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
+
/* Wait, until receiver sets buffer size. */
control_expectln("SRVREADY");
@@ -2219,6 +2231,101 @@ static void test_stream_accepted_setsockopt_server(const struct test_opts *opts)
close(fd);
}
+static void test_stream_tx_credit_bounds_client(const struct test_opts *opts)
+{
+ unsigned long long sock_buf_size;
+ size_t total = 0;
+ char buf[4096];
+ int fd;
+
+ memset(buf, 'A', sizeof(buf));
+
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ sock_buf_size = SOCK_BUF_SIZE_SMALL;
+
+ setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
+ sock_buf_size,
+ "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
+
+ setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+ sock_buf_size,
+ "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
+
+ if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK) < 0) {
+ perror("fcntl(F_SETFL)");
+ exit(EXIT_FAILURE);
+ }
+
+ control_expectln("SRVREADY");
+
+ for (;;) {
+ ssize_t sent = send(fd, buf, sizeof(buf), 0);
+
+ if (sent == 0) {
+ fprintf(stderr, "unexpected EOF while sending bytes\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (sent < 0) {
+ if (errno == EINTR)
+ continue;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+
+ perror("send");
+ exit(EXIT_FAILURE);
+ }
+
+ total += sent;
+ }
+
+ control_writeln("CLIDONE");
+ close(fd);
+
+ /* We should not be able to send more bytes than the value set as
+ * local buffer size.
+ */
+ if (total > sock_buf_size) {
+ fprintf(stderr,
+ "TX credit too large: queued %zu bytes (expected <= %llu)\n",
+ total, sock_buf_size);
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void test_stream_tx_credit_bounds_server(const struct test_opts *opts)
+{
+ unsigned long long sock_buf_size;
+ int fd;
+
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ sock_buf_size = SOCK_BUF_SIZE;
+
+ setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
+ sock_buf_size,
+ "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
+
+ setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
+ sock_buf_size,
+ "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
+
+ control_writeln("SRVREADY");
+ control_expectln("CLIDONE");
+
+ close(fd);
+}
+
static struct test_case test_cases[] = {
{
.name = "SOCK_STREAM connection reset",
@@ -2403,6 +2510,16 @@ static struct test_case test_cases[] = {
.run_client = test_stream_accepted_setsockopt_client,
.run_server = test_stream_accepted_setsockopt_server,
},
+ {
+ .name = "SOCK_STREAM virtio MSG_ZEROCOPY coalescence corruption",
+ .run_client = test_stream_msgzcopy_mangle_client,
+ .run_server = test_stream_msgzcopy_mangle_server,
+ },
+ {
+ .name = "SOCK_STREAM TX credit bounds",
+ .run_client = test_stream_tx_credit_bounds_client,
+ .run_server = test_stream_tx_credit_bounds_server,
+ },
{},
};
diff --git a/tools/testing/vsock/vsock_test_zerocopy.c b/tools/testing/vsock/vsock_test_zerocopy.c
index 9d9a6cb9614a..a31ddfc1cd0c 100644
--- a/tools/testing/vsock/vsock_test_zerocopy.c
+++ b/tools/testing/vsock/vsock_test_zerocopy.c
@@ -9,14 +9,18 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <poll.h>
#include <linux/errqueue.h>
#include <linux/kernel.h>
+#include <linux/sockios.h>
+#include <linux/time64.h>
#include <errno.h>
#include "control.h"
+#include "timeout.h"
#include "vsock_test_zerocopy.h"
#include "msg_zerocopy_common.h"
@@ -356,3 +360,73 @@ void test_stream_msgzcopy_empty_errq_server(const struct test_opts *opts)
control_expectln("DONE");
close(fd);
}
+
+#define GOOD_COPY_LEN 128 /* net/vmw_vsock/virtio_transport_common.c */
+
+void test_stream_msgzcopy_mangle_client(const struct test_opts *opts)
+{
+ char sbuf1[PAGE_SIZE + 1], sbuf2[GOOD_COPY_LEN];
+ unsigned long hash;
+ struct pollfd fds;
+ int fd, i;
+
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
+ if (fd < 0) {
+ perror("connect");
+ exit(EXIT_FAILURE);
+ }
+
+ enable_so_zerocopy_check(fd);
+
+ memset(sbuf1, 'x', sizeof(sbuf1));
+ send_buf(fd, sbuf1, sizeof(sbuf1), 0, sizeof(sbuf1));
+
+ for (i = 0; i < sizeof(sbuf2); i++)
+ sbuf2[i] = rand() & 0xff;
+
+ send_buf(fd, sbuf2, sizeof(sbuf2), MSG_ZEROCOPY, sizeof(sbuf2));
+
+ hash = hash_djb2(sbuf2, sizeof(sbuf2));
+ control_writeulong(hash);
+
+ fds.fd = fd;
+ fds.events = 0;
+
+ if (poll(&fds, 1, TIMEOUT * MSEC_PER_SEC) != 1 ||
+ !(fds.revents & POLLERR)) {
+ perror("poll");
+ exit(EXIT_FAILURE);
+ }
+
+ close(fd);
+}
+
+void test_stream_msgzcopy_mangle_server(const struct test_opts *opts)
+{
+ unsigned long local_hash, remote_hash;
+ char rbuf[PAGE_SIZE + 1];
+ int fd;
+
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
+ if (fd < 0) {
+ perror("accept");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Wait, don't race the (buggy) skbs coalescence. */
+ vsock_ioctl_int(fd, SIOCINQ, PAGE_SIZE + 1 + GOOD_COPY_LEN);
+
+ /* Discard the first packet. */
+ recv_buf(fd, rbuf, PAGE_SIZE + 1, 0, PAGE_SIZE + 1);
+
+ recv_buf(fd, rbuf, GOOD_COPY_LEN, 0, GOOD_COPY_LEN);
+ remote_hash = control_readulong();
+ local_hash = hash_djb2(rbuf, GOOD_COPY_LEN);
+
+ if (local_hash != remote_hash) {
+ fprintf(stderr, "Data received corrupted\n");
+ exit(EXIT_FAILURE);
+ }
+
+ close(fd);
+}
diff --git a/tools/testing/vsock/vsock_test_zerocopy.h b/tools/testing/vsock/vsock_test_zerocopy.h
index 3ef2579e024d..d46c91a69f16 100644
--- a/tools/testing/vsock/vsock_test_zerocopy.h
+++ b/tools/testing/vsock/vsock_test_zerocopy.h
@@ -12,4 +12,7 @@ void test_seqpacket_msgzcopy_server(const struct test_opts *opts);
void test_stream_msgzcopy_empty_errq_client(const struct test_opts *opts);
void test_stream_msgzcopy_empty_errq_server(const struct test_opts *opts);
+void test_stream_msgzcopy_mangle_client(const struct test_opts *opts);
+void test_stream_msgzcopy_mangle_server(const struct test_opts *opts);
+
#endif /* VSOCK_TEST_ZEROCOPY_H */