summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJason Liu <jason.hui.liu@nxp.com>2022-06-29 12:58:02 -0500
committerJason Liu <jason.hui.liu@nxp.com>2022-06-29 12:58:02 -0500
commit7928826df2e302fb0b9756e1b256ea269059a3a9 (patch)
treebca8b8524415a880b602bf9e7c8a8c62438c99e0 /drivers
parenteba369f0f66db8e57d52d788f455ebf80b52efa1 (diff)
parent18a33c8dabb88b50b860e0177a73933f2c0ddf68 (diff)
Merge tag 'v5.15.50' into lf-5.15.y
This is the 5.15.50 stable release * tag 'v5.15.50': (1395 commits) Linux 5.15.50 arm64: mm: Don't invalidate FROM_DEVICE buffers at start of DMA transfer serial: core: Initialize rs485 RTS polarity already on probe ... Signed-off-by: Jason Liu <jason.hui.liu@nxp.com> Conflicts: drivers/bus/fsl-mc/fsl-mc-bus.c drivers/crypto/caam/ctrl.c drivers/pci/controller/dwc/pci-imx6.c drivers/spi/spi-fsl-qspi.c drivers/tty/serial/fsl_lpuart.c include/uapi/linux/dma-buf.h
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c17
-rw-r--r--drivers/acpi/property.c18
-rw-r--r--drivers/acpi/sleep.c12
-rw-r--r--drivers/acpi/sysfs.c25
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c10
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/base/memory.c5
-rw-r--r--drivers/base/node.c1
-rw-r--r--drivers/base/power/domain.c1
-rw-r--r--drivers/base/property.c90
-rw-r--r--drivers/block/drbd/drbd_main.c18
-rw-r--r--drivers/block/floppy.c18
-rw-r--r--drivers/block/nbd.c50
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/bluetooth/hci_qca.c4
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/char/Kconfig47
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c23
-rw-r--r--drivers/char/random.c2942
-rw-r--r--drivers/char/tpm/tpm2-cmd.c11
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c1
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c4
-rw-r--r--drivers/char/xillybus/xillyusb.c1
-rw-r--r--drivers/clk/at91/clk-generated.c4
-rw-r--r--drivers/clk/imx/clk-imx8mp.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c12
-rw-r--r--drivers/clocksource/hyperv_timer.c1
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c2
-rw-r--r--drivers/clocksource/timer-riscv.c2
-rw-r--r--drivers/clocksource/timer-sp804.c10
-rw-r--r--drivers/comedi/drivers/vmk80xx.c2
-rw-r--r--drivers/cpufreq/cpufreq.c11
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c18
-rw-r--r--drivers/cpuidle/cpuidle-psci.c46
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c115
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c30
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c10
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h14
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c27
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c1
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c18
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c15
-rw-r--r--drivers/crypto/qcom-rng.c1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/dma-buf/dma-buf.c8
-rw-r--r--drivers/dma/idxd/cdev.c8
-rw-r--r--drivers/dma/idxd/dma.c23
-rw-r--r--drivers/dma/stm32-mdma.c23
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c5
-rw-r--r--drivers/edac/dmc520_edac.c2
-rw-r--r--drivers/extcon/extcon-axp288.c4
-rw-r--r--drivers/extcon/extcon-ptn5150.c11
-rw-r--r--drivers/extcon/extcon.c33
-rw-r--r--drivers/firmware/arm_ffa/driver.c4
-rw-r--r--drivers/firmware/arm_scmi/base.c2
-rw-r--r--drivers/firmware/dmi-sysfs.c2
-rw-r--r--drivers/firmware/stratix10-svc.c12
-rw-r--r--drivers/gpio/gpio-dwapb.c7
-rw-r--r--drivers/gpio/gpio-mvebu.c3
-rw-r--r--drivers/gpio/gpio-pca953x.c19
-rw-r--r--drivers/gpio/gpio-rockchip.c24
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c62
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c73
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c155
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c34
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c16
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c4
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c16
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c55
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c22
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h19
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c81
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c (renamed from drivers/gpu/drm/msm/hdmi/hdmi_connector.c)154
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/stm/ltdc.c16
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h12
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c14
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c11
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c4
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c7
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h4
-rw-r--r--drivers/hid/hid-bigbenff.c6
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-led.c2
-rw-r--r--drivers/hid/hid-multitouch.c9
-rw-r--r--drivers/hv/channel.c6
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c7
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c11
-rw-r--r--drivers/i2c/busses/i2c-cadence.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c13
-rw-r--r--drivers/i2c/busses/i2c-ismt.c17
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c10
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c106
-rw-r--r--drivers/i2c/busses/i2c-piix4.c213
-rw-r--r--drivers/i2c/busses/i2c-rcar.c15
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c1
-rw-r--r--drivers/iio/adc/ad7124.c1
-rw-r--r--drivers/iio/adc/sc27xx_adc.c20
-rw-r--r--drivers/iio/adc/stmpe-adc.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c24
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c20
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c7
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c24
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/input/input.c19
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/misc/soc_button_array.c4
-rw-r--r--drivers/input/misc/sparcspkr.c1
-rw-r--r--drivers/input/mouse/bcm5974.c7
-rw-r--r--drivers/input/touchscreen/ili210x.c4
-rw-r--r--drivers/input/touchscreen/stmfts.c8
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c10
-rw-r--r--drivers/interconnect/qcom/sc7180.c21
-rw-r--r--drivers/interconnect/qcom/sm8150.c1
-rw-r--r--drivers/interconnect/qcom/sm8250.c1
-rw-r--r--drivers/interconnect/qcom/sm8350.c1
-rw-r--r--drivers/iommu/amd/init.c2
-rw-r--r--drivers/iommu/amd/iommu.c7
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c13
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c5
-rw-r--r--drivers/iommu/dma-iommu.c7
-rw-r--r--drivers/iommu/intel/iommu.c2
-rw-r--r--drivers/iommu/msm_iommu.c11
-rw-r--r--drivers/iommu/mtk_iommu.c30
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c7
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c11
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c4
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c4
-rw-r--r--drivers/irqchip/irq-gic-realview.c1
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-sni-exiu.c25
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c18
-rw-r--r--drivers/macintosh/Kconfig6
-rw-r--r--drivers/macintosh/Makefile3
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/mailbox/mailbox.c19
-rw-r--r--drivers/md/bcache/btree.c58
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/journal.c31
-rw-r--r--drivers/md/bcache/journal.h2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/bcache/writeback.c101
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-crypt.c14
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-stats.c8
-rw-r--r--drivers/md/dm-verity-target.c1
-rw-r--r--drivers/md/md-bitmap.c44
-rw-r--r--drivers/md/md.c37
-rw-r--r--drivers/md/raid0.c32
-rw-r--r--drivers/md/raid5.c47
-rw-r--r--drivers/media/cec/core/cec-adap.c6
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c7
-rw-r--r--drivers/media/i2c/imx412.c8
-rw-r--r--drivers/media/i2c/max9286.c139
-rw-r--r--drivers/media/i2c/ov5648.c4
-rw-r--r--drivers/media/i2c/ov7670.c1
-rw-r--r--drivers/media/i2c/rdacm20.c2
-rw-r--r--drivers/media/i2c/rdacm21.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c2
-rw-r--r--drivers/media/platform/aspeed-video.c4
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c7
-rw-r--r--drivers/media/platform/coda/coda-common.c35
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c6
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c3
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c6
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c6
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c6
-rw-r--r--drivers/media/rc/imon.c99
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c7
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c20
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c5
-rw-r--r--drivers/mfd/davinci_voicecodec.c6
-rw-r--r--drivers/mfd/ipaq-micro.c2
-rw-r--r--drivers/misc/atmel-ssc.c4
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c1
-rw-r--r--drivers/misc/fastrpc.c9
-rw-r--r--drivers/misc/lkdtm/bugs.c10
-rw-r--r--drivers/misc/lkdtm/lkdtm.h8
-rw-r--r--drivers/misc/lkdtm/usercopy.c17
-rw-r--r--drivers/misc/mei/hbm.c3
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c2
-rw-r--r--drivers/misc/pvpanic/pvpanic.c10
-rw-r--r--drivers/mmc/core/block.c11
-rw-r--r--drivers/mmc/host/jz4740_mmc.c20
-rw-r--r--drivers/mmc/host/sdhci_am654.c23
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c103
-rw-r--r--drivers/mtd/mtdblock.c8
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c15
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c2
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c10
-rw-r--r--drivers/mtd/spi-nor/core.c9
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c69
-rw-r--r--drivers/mtd/ubi/fastmap.c11
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/vmt.c1
-rw-r--r--drivers/mtd/ubi/wl.c19
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can_pci.c48
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h2
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/mt7530.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c27
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c7
-rw-r--r--drivers/net/ethernet/broadcom/Makefile5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c42
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c15
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/ipa/gsi.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c40
-rw-r--r--drivers/net/phy/dp83867.c29
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/micrel.c11
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c6
-rw-r--r--drivers/net/wireguard/noise.c45
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c123
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c16
-rw-r--r--drivers/nfc/pn533/pn533.c5
-rw-r--r--drivers/nfc/st21nfca/se.c70
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h1
-rw-r--r--drivers/nvdimm/core.c9
-rw-r--r--drivers/nvdimm/security.c5
-rw-r--r--drivers/nvme/host/core.c26
-rw-r--r--drivers/nvme/host/multipath.c25
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/core.c24
-rw-r--r--drivers/nvme/target/fc.c8
-rw-r--r--drivers/nvme/target/fcloop.c16
-rw-r--r--drivers/nvme/target/io-cmd-file.c6
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c12
-rw-r--r--drivers/nvme/target/tcp.c10
-rw-r--r--drivers/of/kexec.c9
-rw-r--r--drivers/of/overlay.c4
-rw-r--r--drivers/opp/of.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c15
-rw-r--r--drivers/pci/controller/pci-aardvark.c48
-rw-r--r--drivers/pci/controller/pcie-mediatek.c1
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c3
-rw-r--r--drivers/pci/pci-acpi.c41
-rw-r--r--drivers/pci/pci.c22
-rw-r--r--drivers/pci/pcie/aer.c7
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c13
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c18
-rw-r--r--drivers/pinctrl/mediatek/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c64
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h7
-rw-r--r--drivers/pinctrl/renesas/core.c7
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c29
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c2
-rw-r--r--drivers/platform/chrome/cros_ec.c16
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c12
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c50
-rw-r--r--drivers/platform/mips/Kconfig2
-rw-r--r--drivers/platform/mips/cpu_hwmon.c127
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c2
-rw-r--r--drivers/platform/x86/intel/hid.c8
-rw-r--r--drivers/power/supply/axp288_charger.c17
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/supply/charger-manager.c7
-rw-r--r--drivers/power/supply/max8997_charger.c8
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c2
-rw-r--r--drivers/regulator/core.c7
-rw-r--r--drivers/regulator/da9121-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/qcom_smd-regulator.c35
-rw-r--r--drivers/regulator/scmi-regulator.c2
-rw-r--r--drivers/remoteproc/imx_rproc.c3
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c9
-rw-r--r--drivers/rtc/class.c9
-rw-r--r--drivers/rtc/rtc-ftrtc010.c38
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c16
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c3
-rw-r--r--drivers/rtc/rtc-sun6i.c14
-rw-r--r--drivers/s390/cio/chsc.c4
-rw-r--r--drivers/scsi/dc395x.c15
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c2
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c67
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c23
-rw-r--r--drivers/scsi/myrb.c11
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c6
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c14
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/scsi/ufs/ufshpb.c19
-rw-r--r--drivers/scsi/vmw_pvscsi.h4
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c3
-rw-r--r--drivers/soc/qcom/llcc-qcom.c1
-rw-r--r--drivers/soc/qcom/smp2p.c1
-rw-r--r--drivers/soc/qcom/smsm.c1
-rw-r--r--drivers/soc/rockchip/grf.c2
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soundwire/intel.c3
-rw-r--r--drivers/soundwire/qcom.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-rockchip.c113
-rw-r--r--drivers/spi/spi-rspi.c15
-rw-r--r--drivers/spi/spi-stm32-qspi.c3
-rw-r--r--drivers/spi/spi-ti-qspi.c5
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c2
-rw-r--r--drivers/staging/greybus/audio_codec.c4
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c2
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c8
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c37
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c4
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c29
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c108
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c12
-rw-r--r--drivers/staging/rtl8712/usb_ops.c27
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c12
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c5
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c3
-rw-r--r--drivers/thermal/devfreq_cooling.c25
-rw-r--r--drivers/thermal/imx_sc_thermal.c6
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/thunderbolt/tb.c19
-rw-r--r--drivers/thunderbolt/test.c16
-rw-r--r--drivers/thunderbolt/tunnel.c11
-rw-r--r--drivers/thunderbolt/tunnel.h4
-rw-r--r--drivers/tty/goldfish.c24
-rw-r--r--drivers/tty/n_gsm.c36
-rw-r--r--drivers/tty/n_tty.c38
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c2
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c8
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c15
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/icom.c2
-rw-r--r--drivers/tty/serial/meson_uart.c13
-rw-r--r--drivers/tty/serial/msm_serial.c5
-rw-r--r--drivers/tty/serial/owl-uart.c1
-rw-r--r--drivers/tty/serial/pch_uart.c27
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/sa1100.c4
-rw-r--r--drivers/tty/serial/serial_core.c34
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c6
-rw-r--r--drivers/tty/serial/sifive.c8
-rw-r--r--drivers/tty/serial/st-asc.c4
-rw-r--r--drivers/tty/serial/stm32-usart.c15
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/sysrq.c13
-rw-r--r--drivers/tty/tty_buffer.c3
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c19
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hcd.c29
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc3/drd.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/gadget/function/f_fs.c40
-rw-r--r--drivers/usb/gadget/function/u_ether.c11
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c1
-rw-r--r--drivers/usb/host/isp116x-hcd.c6
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/isp1760/isp1760-core.c8
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/phy/phy-omap-otg.c4
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h1
-rw-r--r--drivers/usb/serial/option.c8
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/typec/mux.c14
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c4
-rw-r--r--drivers/usb/usbip/stub_dev.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c3
-rw-r--r--drivers/vdpa/vdpa.c13
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c5
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c7
-rw-r--r--drivers/vhost/net.c15
-rw-r--r--drivers/vhost/vdpa.c5
-rw-r--r--drivers/vhost/vringh.c10
-rw-r--r--drivers/video/console/sticon.c5
-rw-r--r--drivers/video/console/sticore.c32
-rw-r--r--drivers/video/fbdev/amba-clcd.c5
-rw-r--r--drivers/video/fbdev/core/fbcon.c5
-rw-r--r--drivers/video/fbdev/core/fbmem.c5
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/hyperv_fb.c19
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c12
-rw-r--r--drivers/video/fbdev/sticore.h3
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c5
-rw-r--r--drivers/virtio/virtio_mmio.c1
-rw-r--r--drivers/virtio/virtio_pci_common.c3
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c1
-rw-r--r--drivers/watchdog/rti_wdt.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c334
-rw-r--r--drivers/watchdog/sp5100_tco.h7
-rw-r--r--drivers/watchdog/ts4800_wdt.c5
-rw-r--r--drivers/watchdog/wdat_wdt.c1
-rw-r--r--drivers/xen/xlate_mmu.c1
644 files changed, 6677 insertions, 5161 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 71b87c16f92d..ed1341030684 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -100,6 +100,16 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
+/* Check if a CPC register is in SystemMemory */
+#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
+ (cpc)->cpc_entry.reg.space_id == \
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+
+/* Check if a CPC register is in SystemIo */
+#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
+ (cpc)->cpc_entry.reg.space_id == \
+ ACPI_ADR_SPACE_SYSTEM_IO)
+
/* Evaluates to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
(reg)->address == 0 && \
@@ -1378,6 +1388,9 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* transition latency for performance change requests. The closest we have
* is the timing information from the PCCT tables which provides the info
* on the number and frequency of PCC commands the platform can handle.
+ *
+ * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
+ * then assume there is no latency.
*/
unsigned int cppc_get_transition_latency(int cpu_num)
{
@@ -1403,7 +1416,9 @@ unsigned int cppc_get_transition_latency(int cpu_num)
return CPUFREQ_ETERNAL;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
- if (!CPC_IN_PCC(desired_reg))
+ if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
+ return 0;
+ else if (!CPC_IN_PCC(desired_reg))
return CPUFREQ_ETERNAL;
if (pcc_ss_id < 0)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 11e4c930a981..b86d3b869a58 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -433,6 +433,16 @@ void acpi_init_properties(struct acpi_device *adev)
acpi_extract_apple_properties(adev);
}
+static void acpi_free_device_properties(struct list_head *list)
+{
+ struct acpi_device_properties *props, *tmp;
+
+ list_for_each_entry_safe(props, tmp, list, list) {
+ list_del(&props->list);
+ kfree(props);
+ }
+}
+
static void acpi_destroy_nondev_subnodes(struct list_head *list)
{
struct acpi_data_node *dn, *next;
@@ -445,22 +455,18 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
wait_for_completion(&dn->kobj_done);
list_del(&dn->sibling);
ACPI_FREE((void *)dn->data.pointer);
+ acpi_free_device_properties(&dn->data.properties);
kfree(dn);
}
}
void acpi_free_properties(struct acpi_device *adev)
{
- struct acpi_device_properties *props, *tmp;
-
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
- list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
- list_del(&props->list);
- kfree(props);
- }
+ acpi_free_device_properties(&adev->data.properties);
}
/**
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7ae09e4b4592..07515139141e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -374,6 +374,18 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
+ /*
+ * ASUS B1400CEAE hangs on resume from suspend (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
+ */
+ {
+ .callback = init_default_s3,
+ .ident = "ASUS B1400CEAE",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 00c0ebaab29f..6e23b76aef5d 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -415,19 +415,30 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
- void *base;
- ssize_t rc;
+ void __iomem *base;
+ ssize_t size;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
+ size = data_attr->attr.size;
+
+ if (offset < 0)
+ return -EINVAL;
+
+ if (offset >= size)
+ return 0;
- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
+ if (count > size - offset)
+ count = size - offset;
+
+ base = acpi_os_map_iomem(data_attr->addr, size);
if (!base)
return -ENOMEM;
- rc = memory_read_from_buffer(buf, count, &offset, base,
- data_attr->attr.size);
- acpi_os_unmap_memory(base, data_attr->attr.size);
- return rc;
+ memcpy_fromio(buf, base + offset, count);
+
+ acpi_os_unmap_iomem(base, size);
+
+ return count;
}
static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a0343b7c9add..413faa9330b2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5500,7 +5500,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi,
int n_ports)
{
- const struct ata_port_info *pi;
+ const struct ata_port_info *pi = &ata_dummy_port_info;
struct ata_host *host;
int i, j;
@@ -5508,7 +5508,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
if (!host)
return NULL;
- for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+ for (i = 0, j = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ppi[j])
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 34bb4608bdc6..93d6920cd86c 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -196,7 +196,7 @@ static struct {
{ XFER_PIO_0, "XFER_PIO_0" },
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
};
-ata_bitfield_name_match(xfer,ata_xfer_names)
+ata_bitfield_name_search(xfer, ata_xfer_names)
/*
* ATA Port attributes
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index b5a3f710d76d..4cc8a1027888 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
int i;
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
if (!res_dma) {
+ put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
+ put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
@@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
irq = i;
irq_handler = octeon_cf_interrupt;
}
+ put_device(&dma_dev->dev);
}
of_node_put(dma_node);
}
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index bdc98c5713d5..d171535fc18f 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv)
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
- goto out_unregister;
+ goto out_del_list;
}
module_add_driver(drv->owner, drv);
@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_del_list:
+ klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 5fc258073bc7..fddd08e482fc 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -564,6 +564,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -573,6 +579,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -584,6 +591,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
+ &dev_attr_mmio_stale_data.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 095025cbbb15..a54c6ac88e03 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -249,7 +249,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
int driver_deferred_probe_timeout;
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -304,7 +303,6 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
- wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -719,9 +717,6 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
- /* wait for probe timeout */
- wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
-
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -944,6 +939,7 @@ out_unlock:
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
+ bool async = false;
device_lock(dev);
if (dev->p->dead) {
@@ -982,7 +978,7 @@ static int __device_attach(struct device *dev, bool allow_async)
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
- async_schedule_dev(__device_attach_async_helper, dev);
+ async = true;
} else {
pm_request_idle(dev);
}
@@ -992,6 +988,8 @@ static int __device_attach(struct device *dev, bool allow_async)
}
out_unlock:
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index a9f57c22fb9e..dab8aa5d2888 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/memory.h>
#include <linux/of.h>
+#include <linux/backing-dev.h>
#include "base.h"
@@ -20,6 +21,7 @@
void __init driver_init(void)
{
/* These are the core pieces */
+ bdi_init(&noop_backing_dev_info);
devtmpfs_init();
devices_init();
buses_init();
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 60c38f9cf1a7..c0d501a3a714 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -634,10 +634,9 @@ int register_memory(struct memory_block *memory)
}
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
GFP_KERNEL));
- if (ret) {
- put_device(&memory->dev);
+ if (ret)
device_unregister(&memory->dev);
- }
+
return ret;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c56d34f8158f..0f5319b79fad 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -679,6 +679,7 @@ static int register_node(struct node *node, int num)
*/
void unregister_node(struct node *node)
{
+ compaction_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
node_remove_accesses(node);
node_remove_caches(node);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index da58c4adae14..1de11af7b8a9 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2104,6 +2104,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true;
+ genpd->next_wakeup = KTIME_MAX;
genpd->provider = NULL;
genpd->has_provider = false;
genpd->clks = NULL;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 4c77837769c6..735a23db1b5e 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -48,12 +48,14 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
{
bool ret;
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
ret = fwnode_call_bool_op(fwnode, property_present, propname);
- if (ret == false && !IS_ERR_OR_NULL(fwnode) &&
- !IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_bool_op(fwnode->secondary, property_present,
- propname);
- return ret;
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_present, propname);
}
EXPORT_SYMBOL_GPL(fwnode_property_present);
@@ -233,15 +235,16 @@ static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
{
int ret;
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
ret = fwnode_call_int_op(fwnode, property_read_int_array, propname,
elem_size, val, nval);
- if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
- !IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_int_op(
- fwnode->secondary, property_read_int_array, propname,
- elem_size, val, nval);
+ if (ret != -EINVAL)
+ return ret;
- return ret;
+ return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname,
+ elem_size, val, nval);
}
/**
@@ -372,14 +375,16 @@ int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
{
int ret;
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
ret = fwnode_call_int_op(fwnode, property_read_string_array, propname,
val, nval);
- if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
- !IS_ERR_OR_NULL(fwnode->secondary))
- ret = fwnode_call_int_op(fwnode->secondary,
- property_read_string_array, propname,
- val, nval);
- return ret;
+ if (ret != -EINVAL)
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname,
+ val, nval);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
@@ -479,7 +484,20 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
- return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ int ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -ENOENT;
+
+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+ if (ret == 0)
+ return ret;
+
+ if (IS_ERR_OR_NULL(fwnode->secondary))
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop,
nargs, index, args);
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
@@ -675,12 +693,13 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents);
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
unsigned int depth)
{
- unsigned int i;
-
fwnode_handle_get(fwnode);
- for (i = 0; i < depth && fwnode; i++)
+ do {
+ if (depth-- == 0)
+ break;
fwnode = fwnode_get_next_parent(fwnode);
+ } while (fwnode);
return fwnode;
}
@@ -699,17 +718,17 @@ EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor,
struct fwnode_handle *test_child)
{
- if (!test_ancestor)
+ if (IS_ERR_OR_NULL(test_ancestor))
return false;
fwnode_handle_get(test_child);
- while (test_child) {
+ do {
if (test_child == test_ancestor) {
fwnode_handle_put(test_child);
return true;
}
test_child = fwnode_get_next_parent(test_child);
- }
+ } while (test_child);
return false;
}
@@ -738,7 +757,7 @@ fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
{
struct fwnode_handle *next_child = child;
- if (!fwnode)
+ if (IS_ERR_OR_NULL(fwnode))
return NULL;
do {
@@ -762,16 +781,16 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
const struct fwnode_handle *fwnode = dev_fwnode(dev);
struct fwnode_handle *next;
+ if (IS_ERR_OR_NULL(fwnode))
+ return NULL;
+
/* Try to find a child in primary fwnode */
next = fwnode_get_next_child_node(fwnode, child);
if (next)
return next;
/* When no more children in primary, continue with secondary */
- if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
- next = fwnode_get_next_child_node(fwnode->secondary, child);
-
- return next;
+ return fwnode_get_next_child_node(fwnode->secondary, child);
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
@@ -838,6 +857,9 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
if (!fwnode_has_op(fwnode, device_is_available))
return true;
@@ -1045,14 +1067,14 @@ fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
parent = fwnode_graph_get_port_parent(prev);
else
parent = fwnode;
+ if (IS_ERR_OR_NULL(parent))
+ return NULL;
ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
+ if (ep)
+ return ep;
- if (IS_ERR_OR_NULL(ep) &&
- !IS_ERR_OR_NULL(parent) && !IS_ERR_OR_NULL(parent->secondary))
- ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
-
- return ep;
+ return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 548e0dd53528..8ba2fe356f01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
- struct drbd_request *req = NULL;
+ struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
- if (req->epoch == expect_epoch)
+ if (req->epoch == expect_epoch) {
+ tmp = req;
break;
+ }
+ req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;
@@ -3604,9 +3607,8 @@ const char *cmdname(enum drbd_packet cmd)
* when we want to support more than
* one PRO_VERSION */
static const char *cmdnames[] = {
+
[P_DATA] = "Data",
- [P_WSAME] = "WriteSame",
- [P_TRIM] = "Trim",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
@@ -3617,7 +3619,6 @@ const char *cmdname(enum drbd_packet cmd)
[P_DATA_REQUEST] = "DataRequest",
[P_RS_DATA_REQUEST] = "RSDataRequest",
[P_SYNC_PARAM] = "SyncParam",
- [P_SYNC_PARAM89] = "SyncParam89",
[P_PROTOCOL] = "ReportProtocol",
[P_UUIDS] = "ReportUUIDs",
[P_SIZES] = "ReportSizes",
@@ -3625,6 +3626,7 @@ const char *cmdname(enum drbd_packet cmd)
[P_SYNC_UUID] = "ReportSyncUUID",
[P_AUTH_CHALLENGE] = "AuthChallenge",
[P_AUTH_RESPONSE] = "AuthResponse",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
[P_PING] = "Ping",
[P_PING_ACK] = "PingAck",
[P_RECV_ACK] = "RecvAck",
@@ -3635,23 +3637,25 @@ const char *cmdname(enum drbd_packet cmd)
[P_NEG_DREPLY] = "NegDReply",
[P_NEG_RS_DREPLY] = "NegRSDReply",
[P_BARRIER_ACK] = "BarrierAck",
- [P_STATE_CHG_REQ] = "StateChgRequest",
[P_STATE_CHG_REPLY] = "StateChgReply",
[P_OV_REQUEST] = "OVRequest",
[P_OV_REPLY] = "OVReply",
[P_OV_RESULT] = "OVResult",
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
+ [P_SYNC_PARAM89] = "SyncParam89",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_DELAY_PROBE] = "DelayProbe",
[P_OUT_OF_SYNC] = "OutOfSync",
- [P_RETRY_WRITE] = "RetryWrite",
[P_RS_CANCEL] = "RSCancel",
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_PROTOCOL_UPDATE] = "protocol_update",
+ [P_TRIM] = "Trim",
[P_RS_THIN_REQ] = "rs_thin_req",
[P_RS_DEALLOCATED] = "rs_deallocated",
+ [P_WSAME] = "WriteSame",
+ [P_ZEROES] = "Zeroes",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1c152b542a52..db0b3e8982fe 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
-/* Errors during formatting are counted here. */
-static int format_errors;
+/* errors encountered on the current (or last) request */
+static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@@ -530,7 +530,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
-static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
- } else if (*errors >= drive_params[current_drive].max_errors.reporting) {
+ } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
if (!next_valid_format(current_drive))
return;
}
- err_count = ++(*errors);
+ err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
@@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL;
}
format_req = *tmp_format_req;
- format_errors = 0;
cont = &format_cont;
- errors = &format_errors;
+ floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@@ -2761,10 +2759,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
- current_req->error_count = 0;
+ floppy_errors = 0;
list_del_init(&current_req->queuelist);
+ return 1;
}
- return current_req != NULL;
+ return 0;
}
/* Starts or continues processing request. Will automatically unlock the
@@ -2823,7 +2822,6 @@ do_request:
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
- errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 582b23befb5c..b391ca062add 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -896,11 +896,15 @@ static int wait_for_reconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout)
return 0;
- if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
+
+ if (!wait_event_timeout(config->conn_wait,
+ test_bit(NBD_RT_DISCONNECTED,
+ &config->runtime_flags) ||
+ atomic_read(&config->live_connections) > 0,
+ config->dead_conn_timeout))
return 0;
- return wait_event_timeout(config->conn_wait,
- atomic_read(&config->live_connections) > 0,
- config->dead_conn_timeout) > 0;
+
+ return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
@@ -1364,7 +1368,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
- sock_shutdown(nbd);
+ nbd_clear_sock(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
@@ -1463,15 +1467,20 @@ static struct nbd_config *nbd_alloc_config(void)
{
struct nbd_config *config;
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-ENODEV);
+
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
- if (!config)
- return NULL;
+ if (!config) {
+ module_put(THIS_MODULE);
+ return ERR_PTR(-ENOMEM);
+ }
+
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
- try_module_get(THIS_MODULE);
return config;
}
@@ -1498,12 +1507,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&nbd->config_lock);
goto out;
}
- config = nbd->config = nbd_alloc_config();
- if (!config) {
- ret = -ENOMEM;
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
+ ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock);
goto out;
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
@@ -1910,13 +1920,14 @@ again:
nbd_put(nbd);
return -EINVAL;
}
- config = nbd->config = nbd_alloc_config();
- if (!nbd->config) {
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
printk(KERN_ERR "nbd: couldn't allocate config\n");
- return -ENOMEM;
+ return PTR_ERR(config);
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
@@ -2026,6 +2037,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
sock_shutdown(nbd);
+ wake_up(&nbd->config->conn_wait);
/*
* Make sure recv thread has finished, we can safely call nbd_clear_que()
* to cancel the inflight I/Os.
@@ -2473,6 +2485,12 @@ static void __exit nbd_cleanup(void)
struct nbd_device *nbd;
LIST_HEAD(del_list);
+ /*
+ * Unregister netlink interface prior to waiting
+ * for the completion of netlink commands.
+ */
+ genl_unregister_family(&nbd_genl_family);
+
nbd_dbg_close();
mutex_lock(&nbd_index_mutex);
@@ -2482,6 +2500,9 @@ static void __exit nbd_cleanup(void)
while (!list_empty(&del_list)) {
nbd = list_first_entry(&del_list, struct nbd_device, list);
list_del_init(&nbd->list);
+ if (refcount_read(&nbd->config_refs))
+ printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
+ refcount_read(&nbd->config_refs));
if (refcount_read(&nbd->refs) != 1)
printk(KERN_ERR "nbd: possibly leaking a device\n");
nbd_put(nbd);
@@ -2491,7 +2512,6 @@ static void __exit nbd_cleanup(void)
destroy_workqueue(nbd_del_wq);
idr_destroy(&nbd_index_idr);
- genl_unregister_family(&nbd_genl_family);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c0b8a26892a5..c05138a28475 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -855,11 +855,12 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_io_opt(q, blk_size * opt_io_size);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
- q->limits.discard_granularity = blk_size;
-
virtio_cread(vdev, struct virtio_blk_config,
discard_sector_alignment, &v);
- q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
+ if (v)
+ q->limits.discard_granularity = v << SECTOR_SHIFT;
+ else
+ q->limits.discard_granularity = blk_size;
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 8eb7fddfb930..ed91af4319b5 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -696,9 +696,9 @@ static int qca_close(struct hci_uart *hu)
skb_queue_purge(&qca->tx_wait_q);
skb_queue_purge(&qca->txq);
skb_queue_purge(&qca->rx_memdump_q);
- del_timer(&qca->tx_idle_timer);
- del_timer(&qca->wake_retrans_timer);
destroy_workqueue(qca->workqueue);
+ del_timer_sync(&qca->tx_idle_timer);
+ del_timer_sync(&qca->wake_retrans_timer);
qca->hu = NULL;
kfree_skb(qca->rx_skb);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 00d46f3ae22f..e93912e56f28 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3325,7 +3325,9 @@ static int sysc_remove(struct platform_device *pdev)
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
- cancel_delayed_work_sync(&ddata->idle_work);
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0a80ec9c02d9..f0c12c8a0328 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -430,27 +430,40 @@ config ADI
source "drivers/char/imx_amp/Kconfig"
config RANDOM_TRUST_CPU
- bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ bool "Initialize RNG using CPU RNG instructions"
+ default y
depends on ARCH_RANDOM
- default n
help
- Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
- RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
- for the purposes of initializing Linux's CRNG. Since this is not
- something that can be independently audited, this amounts to trusting
- that CPU manufacturer (perhaps with the insistence or mandate
- of a Nation State's intelligence or law enforcement agencies)
- has not installed a hidden back door to compromise the CPU's
- random number generation facilities. This can also be configured
- at boot with "random.trust_cpu=on/off".
+ Initialize the RNG using random numbers supplied by the CPU's
+ RNG instructions (e.g. RDRAND), if supported and available. These
+ random numbers are never used directly, but are rather hashed into
+ the main input pool, and this happens regardless of whether or not
+ this option is enabled. Instead, this option controls whether the
+ they are credited and hence can initialize the RNG. Additionally,
+ other sources of randomness are always used, regardless of this
+ setting. Enabling this implies trusting that the CPU can supply high
+ quality and non-backdoored random numbers.
+
+ Say Y here unless you have reason to mistrust your CPU or believe
+ its RNG facilities may be faulty. This may also be configured at
+ boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
- bool "Trust the bootloader to initialize Linux's CRNG"
+ bool "Initialize RNG using bootloader-supplied seed"
+ default y
help
- Some bootloaders can provide entropy to increase the kernel's initial
- device randomness. Say Y here to assume the entropy provided by the
- booloader is trustworthy so it will be added to the kernel's entropy
- pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool.
+ Initialize the RNG using a seed supplied by the bootloader or boot
+ environment (e.g. EFI or a bootloader-generated device tree). This
+ seed is not used directly, but is rather hashed into the main input
+ pool, and this happens regardless of whether or not this option is
+ enabled. Instead, this option controls whether the seed is credited
+ and hence can initialize the RNG. Additionally, other sources of
+ randomness are always used, regardless of this setting. Enabling
+ this implies trusting that the bootloader can supply high quality and
+ non-backdoored seeds.
+
+ Say Y here unless you have reason to mistrust your bootloader or
+ believe its RNG facilities may be faulty. This may also be configured
+ at boot time with "random.trust_bootloader=on/off".
endmenu
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a3db27916256..cfb085de876b 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
+#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index e0d77fa048fb..f06e4f95114f 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -92,7 +92,7 @@ static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev)
r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
if (r != 0) {
- clk_disable(ddata->clk);
+ clk_disable_unprepare(ddata->clk);
dev_err(dev, "HW init failed: %d\n", r);
return -EIO;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fe91090e04a4..2badf36d4816 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -11,8 +11,8 @@
* Copyright 2002 MontaVista Software Inc.
*/
-#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
-#define dev_fmt pr_fmt
+#define pr_fmt(fmt) "IPMI message handler: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8d7a8898e80b..f366e8e3eee3 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -814,6 +814,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
break;
case SSIF_GETTING_EVENTS:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting events\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
/* Error getting event, probably done. */
msg->done(msg);
@@ -838,6 +846,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
break;
case SSIF_GETTING_MESSAGES:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting messages\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
/* Error getting event, probably done. */
msg->done(msg);
@@ -861,6 +877,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
deliver_recv_msg(ssif_info, msg);
}
break;
+
+ default:
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "Invalid state in message done handling: %d\n",
+ ssif_info->ssif_state);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ebe86de9d0ac..7f6585c49380 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,310 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * random.c -- A strong random number generator
- *
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
- *
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
- *
- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
- * rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL are
- * required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * (now, with legal B.S. out of the way.....)
- *
- * This routine gathers environmental noise from device drivers, etc.,
- * and returns good random numbers, suitable for cryptographic use.
- * Besides the obvious cryptographic uses, these numbers are also good
- * for seeding TCP sequence numbers, and other places where it is
- * desirable to have numbers which are not only random, but hard to
- * predict by an attacker.
- *
- * Theory of operation
- * ===================
- *
- * Computers are very predictable devices. Hence it is extremely hard
- * to produce truly random numbers on a computer --- as opposed to
- * pseudo-random numbers, which can easily generated by using a
- * algorithm. Unfortunately, it is very easy for attackers to guess
- * the sequence of pseudo-random number generators, and for some
- * applications this is not acceptable. So instead, we must try to
- * gather "environmental noise" from the computer's environment, which
- * must be hard for outside attackers to observe, and use that to
- * generate random numbers. In a Unix environment, this is best done
- * from inside the kernel.
- *
- * Sources of randomness from the environment include inter-keyboard
- * timings, inter-interrupt timings from some interrupts, and other
- * events which are both (a) non-deterministic and (b) hard for an
- * outside observer to measure. Randomness from these sources are
- * added to an "entropy pool", which is mixed using a CRC-like function.
- * This is not cryptographically strong, but it is adequate assuming
- * the randomness is not chosen maliciously, and it is fast enough that
- * the overhead of doing it on every interrupt is very reasonable.
- * As random bytes are mixed into the entropy pool, the routines keep
- * an *estimate* of how many bits of randomness have been stored into
- * the random number generator's internal state.
- *
- * When random bytes are desired, they are obtained by taking the SHA
- * hash of the contents of the "entropy pool". The SHA hash avoids
- * exposing the internal state of the entropy pool. It is believed to
- * be computationally infeasible to derive any useful information
- * about the input of SHA from its output. Even if it is possible to
- * analyze SHA in some clever way, as long as the amount of data
- * returned from the generator is less than the inherent entropy in
- * the pool, the output data is totally unpredictable. For this
- * reason, the routine decreases its internal estimate of how many
- * bits of "true randomness" are contained in the entropy pool as it
- * outputs random numbers.
- *
- * If this estimate goes to zero, the routine can still generate
- * random numbers; however, an attacker may (at least in theory) be
- * able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of SHA, which is
- * not believed to be feasible, but there is a remote possibility.
- * Nonetheless, these numbers should be useful for the vast majority
- * of purposes.
- *
- * Exported interfaces ---- output
- * ===============================
- *
- * There are four exported interfaces; two for use within the kernel,
- * and two or use from userspace.
- *
- * Exported interfaces ---- userspace output
- * -----------------------------------------
- *
- * The userspace interfaces are two character devices /dev/random and
- * /dev/urandom. /dev/random is suitable for use when very high
- * quality randomness is desired (for example, for key generation or
- * one-time pads), as it will only return a maximum of the number of
- * bits of randomness (as estimated by the random number generator)
- * contained in the entropy pool.
- *
- * The /dev/urandom device does not have this limit, and will return
- * as many bytes as are requested. As more and more random bytes are
- * requested without giving time for the entropy pool to recharge,
- * this will result in random numbers that are merely cryptographically
- * strong. For many applications, however, this is acceptable.
- *
- * Exported interfaces ---- kernel output
- * --------------------------------------
- *
- * The primary kernel interface is
- *
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer. This is equivalent to a
- * read from /dev/urandom.
- *
- * For less critical applications, there are the functions:
- *
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
- *
- * These are produced by a cryptographic RNG seeded from get_random_bytes,
- * and so do not deplete the entropy pool as much. These are recommended
- * for most in-kernel operations *if the result is going to be stored in
- * the kernel*.
- *
- * Specifically, the get_random_int() family do not attempt to do
- * "anti-backtracking". If you capture the state of the kernel (e.g.
- * by snapshotting the VM), you can figure out previous get_random_int()
- * return values. But if the value is stored in the kernel anyway,
- * this is not a problem.
- *
- * It *is* safe to expose get_random_int() output to attackers (e.g. as
- * network cookies); given outputs 1..n, it's not feasible to predict
- * outputs 0 or n+1. The only concern is an attacker who breaks into
- * the kernel later; the get_random_int() engine is not reseeded as
- * often as the get_random_bytes() one.
- *
- * get_random_bytes() is needed for keys that need to stay secret after
- * they are erased from the kernel. For example, any key that will
- * be wrapped and stored encrypted. And session encryption keys: we'd
- * like to know that after the session is closed and the keys erased,
- * the plaintext is unrecoverable to someone who recorded the ciphertext.
- *
- * But for network ports/cookies, stack canaries, PRNG seeds, address
- * space layout randomization, session *authentication* keys, or other
- * applications where the sensitive data is stored in the kernel in
- * plaintext for as long as it's sensitive, the get_random_int() family
- * is just fine.
- *
- * Consider ASLR. We want to keep the address space secret from an
- * outside attacker while the process is running, but once the address
- * space is torn down, it's of no use to an attacker any more. And it's
- * stored in kernel data structures as long as it's alive, so worrying
- * about an attacker's ability to extrapolate it from the get_random_int()
- * CRNG is silly.
- *
- * Even some cryptographic keys are safe to generate with get_random_int().
- * In particular, keys for SipHash are generally fine. Here, knowledge
- * of the key authorizes you to do something to a kernel object (inject
- * packets to a network connection, or flood a hash table), and the
- * key is stored with the object being protected. Once it goes away,
- * we no longer care if anyone knows the key.
- *
- * prandom_u32()
- * -------------
- *
- * For even weaker applications, see the pseudorandom generator
- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
- * numbers aren't security-critical at all, these are *far* cheaper.
- * Useful for self-tests, random error simulation, randomized backoffs,
- * and any other application where you trust that nobody is trying to
- * maliciously mess with you by guessing the "random" numbers.
- *
- * Exported interfaces ---- input
- * ==============================
- *
- * The current exported interfaces for gathering environmental noise
- * from the devices are:
- *
- * void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
- * unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
- * void add_disk_randomness(struct gendisk *disk);
- *
- * add_device_randomness() is for adding data to the random pool that
- * is likely to differ between two devices (or possibly even per boot).
- * This would be things like MAC addresses or serial numbers, or the
- * read-out of the RTC. This does *not* add any actual entropy to the
- * pool, but it initializes the pool to different values for devices
- * that might otherwise be identical and have very little entropy
- * available to them (particularly common in the embedded world).
- *
- * add_input_randomness() uses the input layer interrupt timing, as well as
- * the event type information from the hardware.
- *
- * add_interrupt_randomness() uses the interrupt timing as random
- * inputs to the entropy pool. Using the cycle counters and the irq source
- * as inputs, it feeds the randomness roughly once a second.
- *
- * add_disk_randomness() uses what amounts to the seek time of block
- * layer request events, on a per-disk_devt basis, as input to the
- * entropy pool. Note that high-speed solid state drives with very low
- * seek times do not make for good sources of entropy, as their seek
- * times are usually fairly consistent.
- *
- * All of these routines try to estimate how many bits of randomness a
- * particular randomness source. They do this by keeping track of the
- * first and second order deltas of the event timings.
- *
- * Ensuring unpredictability at system startup
- * ============================================
- *
- * When any operating system starts up, it will go through a sequence
- * of actions that are fairly predictable by an adversary, especially
- * if the start-up does not involve interaction with a human operator.
- * This reduces the actual number of bits of unpredictability in the
- * entropy pool below the value in entropy_count. In order to
- * counteract this effect, it helps to carry information in the
- * entropy pool across shut-downs and start-ups. To do this, put the
- * following lines an appropriate script which is run during the boot
- * sequence:
- *
- * echo "Initializing random number generator..."
- * random_seed=/var/run/random-seed
- * # Carry a random seed from start-up to start-up
- * # Load and then save the whole entropy pool
- * if [ -f $random_seed ]; then
- * cat $random_seed >/dev/urandom
- * else
- * touch $random_seed
- * fi
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * and the following lines in an appropriate script which is run as
- * the system is shutdown:
- *
- * # Carry a random seed from shut-down to start-up
- * # Save the whole entropy pool
- * echo "Saving random seed..."
- * random_seed=/var/run/random-seed
- * touch $random_seed
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * For example, on most modern systems using the System V init
- * scripts, such code fragments would be found in
- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
- *
- * Effectively, these commands cause the contents of the entropy pool
- * to be saved at shut-down time and reloaded into the entropy pool at
- * start-up. (The 'dd' in the addition to the bootup script is to
- * make sure that /etc/random-seed is different for every start-up,
- * even if the system crashes without executing rc.0.) Even with
- * complete knowledge of the start-up activities, predicting the state
- * of the entropy pool requires knowledge of the previous history of
- * the system.
- *
- * Configuring the /dev/random driver under Linux
- * ==============================================
- *
- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
- * the /dev/mem major number (#1). So if your system does not have
- * /dev/random and /dev/urandom created already, they can be created
- * by using the commands:
- *
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
- *
- * Acknowledgements:
- * =================
- *
- * Ideas for constructing this random number generator were derived
- * from Pretty Good Privacy's random number generator, and from private
- * discussions with Phil Karn. Colin Plumb provided a faster random
- * number generator, which speed up the mixing function of the entropy
- * pool, taken from PGPfone. Dale Worley has also contributed many
- * useful ideas and suggestions to improve this driver.
- *
- * Any flaws in the design are solely my responsibility, and should
- * not be attributed to the Phil, Colin, or any of authors of PGP.
- *
- * Further background information on this topic may be obtained from
- * RFC 1750, "Randomness Recommendations for Security", by Donald
- * Eastlake, Steve Crocker, and Jeff Schiller.
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
+ *
+ * This driver produces cryptographically secure pseudorandom data. It is divided
+ * into roughly six sections, each with a section header:
+ *
+ * - Initialization and readiness waiting.
+ * - Fast key erasure RNG, the "crng".
+ * - Entropy accumulation and extraction routines.
+ * - Entropy collection routines.
+ * - Userspace reader/writer interfaces.
+ * - Sysctl interface.
+ *
+ * The high level overview is that there is one input pool, into which
+ * various pieces of data are hashed. Prior to initialization, some of that
+ * data is then "credited" as having a certain number of bits of entropy.
+ * When enough bits of entropy are available, the hash is finalized and
+ * handed as a key to a stream cipher that expands it indefinitely for
+ * various consumers. This key is periodically refreshed as the various
+ * entropy collectors, described below, add data to the input pool.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -327,7 +43,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -335,853 +50,1008 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <linux/uaccess.h>
+#include <linux/siphash.h>
+#include <linux/uio.h>
#include <crypto/chacha.h>
-#include <crypto/sha1.h>
-
+#include <crypto/blake2s.h>
#include <asm/processor.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/random.h>
-
-/* #define ADD_INTERRUPT_BENCH */
+/*********************************************************************
+ *
+ * Initialization and readiness waiting.
+ *
+ * Much of the RNG infrastructure is devoted to various dependencies
+ * being able to wait until the RNG has collected enough entropy and
+ * is ready for safe consumption.
+ *
+ *********************************************************************/
/*
- * Configuration information
+ * crng_init is protected by base_crng->lock, and only increases
+ * its value (from empty->early->ready).
*/
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE 10
-
+static enum {
+ CRNG_EMPTY = 0, /* Little to no entropy collected */
+ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
+ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
+} crng_init __read_mostly = CRNG_EMPTY;
+#define crng_ready() (likely(crng_init >= CRNG_READY))
+/* Various types of waiters for crng_init->CRNG_READY transition. */
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+static struct fasync_struct *fasync;
+static DEFINE_SPINLOCK(random_ready_chain_lock);
+static RAW_NOTIFIER_HEAD(random_ready_chain);
-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+/* Control how we warn userspace. */
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+static int ratelimit_disable __read_mostly =
+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
+ * Returns whether or not the input pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
*
- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
+ * Returns: true if the input pool has been seeded.
+ * false if the input pool has not been seeded.
*/
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
-/*
- * If the entropy count falls under this number of bits, then we
- * should wake up processes which are selecting or polling on write
- * access to /dev/random.
- */
-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
+static void try_to_generate_entropy(void);
/*
- * Originally, we used a primitive polynomial of degree .poolwords
- * over GF(2). The taps for various sizes are defined below. They
- * were chosen to be evenly spaced except for the last tap, which is 1
- * to get the twisting happening as fast as possible.
- *
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a (modified) twisted Generalized Feedback Shift
- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
- * generators. ACM Transactions on Modeling and Computer Simulation
- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Modeling and Computer
- * Simulation 4:254-266)
- *
- * Thanks to Colin Plumb for suggesting this.
- *
- * The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
- * it be a good non-cryptographic hash; i.e. it not produce collisions
- * when fed "random" data of the sort we expect to see. As long as
- * the pool state differs for different inputs, we have preserved the
- * input entropy and done a good job. The fact that an intelligent
- * attacker can construct inputs that will produce controlled
- * alterations to the pool's state is not important because we don't
- * consider such inputs to contribute any randomness. The only
- * property we need with respect to them is that the attacker can't
- * increase his/her knowledge of the pool's state. Since all
- * additions are reversible (knowing the final state and the input,
- * you can reconstruct the initial state), if an attacker has any
- * uncertainty about the initial state, he/she can only shuffle that
- * uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
+ * Wait for the input pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
*
- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
- * Videau in their paper, "The Linux Pseudorandom Number Generator
- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
- * paper, they point out that we are not using a true Twisted GFSR,
- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
- * is, with only three taps, instead of the six that we are using).
- * As a result, the resulting polynomial is neither primitive nor
- * irreducible, and hence does not have a maximal period over
- * GF(2**32). They suggest a slight change to the generator
- * polynomial which improves the resulting TGFSR polynomial to be
- * irreducible, which we have made here.
+ * Returns: 0 if the input pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
*/
-static const struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
-};
+int wait_for_random_bytes(void)
+{
+ while (!crng_ready()) {
+ int ret;
+
+ try_to_generate_entropy();
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
/*
- * Static global variables
+ * Add a callback function that will be invoked when the input
+ * pool is initialised.
+ *
+ * returns: 0 if callback is successfully added
+ * -EALREADY if pool is already initialised (callback not called)
*/
-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static struct fasync_struct *fasync;
-
-static DEFINE_SPINLOCK(random_ready_list_lock);
-static LIST_HEAD(random_ready_list);
+int __cold register_random_ready_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret = -EALREADY;
-struct crng_state {
- __u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
-};
+ if (crng_ready())
+ return ret;
-static struct crng_state primary_crng = {
- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
-};
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ if (!crng_ready())
+ ret = raw_notifier_chain_register(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
+}
/*
- * crng_init = 0 --> Uninitialized
- * 1 --> Initialized
- * 2 --> Initialized from input_pool
- *
- * crng_init is protected by primary_crng->lock, and only increases
- * its value (from 0->1->2).
+ * Delete a previously registered readiness callback function.
*/
-static int crng_init = 0;
-static bool crng_need_final_init = false;
-#define crng_ready() (likely(crng_init > 1))
-static int crng_init_cnt = 0;
-static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
-static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
-static void process_random_ready_list(void);
-static void _get_random_bytes(void *buf, int nbytes);
-
-static struct ratelimit_state unseeded_warning =
- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
-static struct ratelimit_state urandom_warning =
- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+int __cold unregister_random_ready_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
-static int ratelimit_disable __read_mostly;
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
+}
-module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
-MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+static void __cold process_random_ready_list(void)
+{
+ unsigned long flags;
-/**********************************************************************
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ raw_notifier_call_chain(&random_ready_chain, 0, NULL);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+}
+
+#define warn_unseeded_randomness() \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init)
+
+
+/*********************************************************************
*
- * OS independent entropy store. Here are the functions which handle
- * storing entropy in an entropy pool.
+ * Fast key erasure RNG, the "crng".
*
- **********************************************************************/
+ * These functions expand entropy from the entropy extractor into
+ * long streams for external consumption using the "fast key erasure"
+ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
+ *
+ * There are a few exported interfaces for use by other drivers:
+ *
+ * void get_random_bytes(void *buf, size_t len)
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
+ *
+ * These interfaces will return the requested number of random bytes
+ * into the given buffer or as a return value. This is equivalent to
+ * a read from /dev/urandom. The u32, u64, int, and long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
+ *
+ *********************************************************************/
-struct entropy_store;
-struct entropy_store {
- /* read-only data: */
- const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
+enum {
+ CRNG_RESEED_START_INTERVAL = HZ,
+ CRNG_RESEED_INTERVAL = 60 * HZ
+};
- /* read-write data: */
+static struct {
+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
+ unsigned long birth;
+ unsigned long generation;
spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
- int entropy_count;
- unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
+} base_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
};
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips);
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+struct crng {
+ u8 key[CHACHA_KEY_SIZE];
+ unsigned long generation;
+ local_lock_t lock;
+};
-static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
+static DEFINE_PER_CPU(struct crng, crngs) = {
+ .generation = ULONG_MAX,
+ .lock = INIT_LOCAL_LOCK(crngs.lock),
};
-static __u32 const twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
+static void extract_entropy(void *buf, size_t len);
-/*
- * This function adds bytes into the entropy "pool". It does not
- * update the entropy estimate. The caller should call
- * credit_entropy_bits if this is appropriate.
- *
- * The pool is stirred with a primitive polynomial of the appropriate
- * degree, and then twisted. We twist by three bits at a time because
- * it's cheap to do so and helps slightly in the expected case where
- * the entropy is concentrated in the low-order bits.
- */
-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+/* This extracts a new crng key from the input pool. */
+static void crng_reseed(void)
{
- unsigned long i, tap1, tap2, tap3, tap4, tap5;
- int input_rotate;
- int wordmask = r->poolinfo->poolwords - 1;
- const char *bytes = in;
- __u32 w;
-
- tap1 = r->poolinfo->tap1;
- tap2 = r->poolinfo->tap2;
- tap3 = r->poolinfo->tap3;
- tap4 = r->poolinfo->tap4;
- tap5 = r->poolinfo->tap5;
-
- input_rotate = r->input_rotate;
- i = r->add_ptr;
-
- /* mix one byte at a time to simplify size handling and churn faster */
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
-
- /* XOR in the various taps */
- w ^= r->pool[i];
- w ^= r->pool[(i + tap1) & wordmask];
- w ^= r->pool[(i + tap2) & wordmask];
- w ^= r->pool[(i + tap3) & wordmask];
- w ^= r->pool[(i + tap4) & wordmask];
- w ^= r->pool[(i + tap5) & wordmask];
-
- /* Mix the result back in with a twist */
- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
-
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
- }
+ unsigned long flags;
+ unsigned long next_gen;
+ u8 key[CHACHA_KEY_SIZE];
- r->input_rotate = input_rotate;
- r->add_ptr = i;
-}
+ extract_entropy(key, sizeof(key));
-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
-{
- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
- _mix_pool_bytes(r, in, nbytes);
+ /*
+ * We copy the new key into the base_crng, overwriting the old one,
+ * and update the generation counter. We avoid hitting ULONG_MAX,
+ * because the per-cpu crngs are initialized to ULONG_MAX, so this
+ * forces new CPUs that come online to always initialize.
+ */
+ spin_lock_irqsave(&base_crng.lock, flags);
+ memcpy(base_crng.key, key, sizeof(base_crng.key));
+ next_gen = base_crng.generation + 1;
+ if (next_gen == ULONG_MAX)
+ ++next_gen;
+ WRITE_ONCE(base_crng.generation, next_gen);
+ WRITE_ONCE(base_crng.birth, jiffies);
+ if (!crng_ready())
+ crng_init = CRNG_READY;
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ memzero_explicit(key, sizeof(key));
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+/*
+ * This generates a ChaCha block using the provided key, and then
+ * immediately overwites that key with half the block. It returns
+ * the resultant ChaCha state to the user, along with the second
+ * half of the block containing 32 bytes of random data that may
+ * be used; random_data_len may not be greater than 32.
+ *
+ * The returned ChaCha state contains within it a copy of the old
+ * key value, at index 4, so the state should always be zeroed out
+ * immediately after using in order to maintain forward secrecy.
+ * If the state cannot be erased in a timely manner, then it is
+ * safer to set the random_data parameter to &chacha_state[4] so
+ * that this function overwrites it before returning.
+ */
+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
+ u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- unsigned long flags;
+ u8 first_block[CHACHA_BLOCK_SIZE];
- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
- _mix_pool_bytes(r, in, nbytes);
- spin_unlock_irqrestore(&r->lock, flags);
+ BUG_ON(random_data_len > 32);
+
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ chacha20_block(chacha_state, first_block);
+
+ memcpy(key, first_block, CHACHA_KEY_SIZE);
+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memzero_explicit(first_block, sizeof(first_block));
}
-struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
-};
+/*
+ * Return whether the crng seed is considered to be sufficiently old
+ * that a reseeding is needed. This happens if the last reseeding
+ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
+ * proportional to the uptime.
+ */
+static bool crng_has_old_seed(void)
+{
+ static bool early_boot = true;
+ unsigned long interval = CRNG_RESEED_INTERVAL;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
+}
/*
- * This is a fast mixing routine used by the interrupt randomness
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
+ * This function returns a ChaCha state that you may use for generating
+ * random data. It also returns up to 32 bytes on its own of random data
+ * that may be used; random_data_len may not be greater than 32.
*/
-static void fast_mix(struct fast_pool *f)
+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
+ unsigned long flags;
+ struct crng *crng;
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+ BUG_ON(random_data_len > 32);
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
+ /*
+ * For the fast path, we check whether we're ready, unlocked first, and
+ * then re-check once locked later. In the case where we're really not
+ * ready, we do fast key erasure with the base_crng directly, extracting
+ * when crng_init is CRNG_EMPTY.
+ */
+ if (!crng_ready()) {
+ bool ready;
+
+ spin_lock_irqsave(&base_crng.lock, flags);
+ ready = crng_ready();
+ if (!ready) {
+ if (crng_init == CRNG_EMPTY)
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ random_data, random_data_len);
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ if (!ready)
+ return;
+ }
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+ /*
+ * If the base_crng is old enough, we reseed, which in turn bumps the
+ * generation counter that we check below.
+ */
+ if (unlikely(crng_has_old_seed()))
+ crng_reseed();
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
+ local_lock_irqsave(&crngs.lock, flags);
+ crng = raw_cpu_ptr(&crngs);
- f->pool[0] = a; f->pool[1] = b;
- f->pool[2] = c; f->pool[3] = d;
- f->count++;
+ /*
+ * If our per-cpu crng is older than the base_crng, then it means
+ * somebody reseeded the base_crng. In that case, we do fast key
+ * erasure on the base_crng, and use its output as the new key
+ * for our per-cpu crng. This brings us up to date with base_crng.
+ */
+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
+ spin_lock(&base_crng.lock);
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ crng->key, sizeof(crng->key));
+ crng->generation = base_crng.generation;
+ spin_unlock(&base_crng.lock);
+ }
+
+ /*
+ * Finally, when we've made it this far, our per-cpu crng has an up
+ * to date key, and we can do fast key erasure with it to produce
+ * some random data and a ChaCha state for the caller. All other
+ * branches of this function are "unlikely", so most of the time we
+ * should wind up here immediately.
+ */
+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
+ local_unlock_irqrestore(&crngs.lock, flags);
}
-static void process_random_ready_list(void)
+static void _get_random_bytes(void *buf, size_t len)
{
- unsigned long flags;
- struct random_ready_callback *rdy, *tmp;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 tmp[CHACHA_BLOCK_SIZE];
+ size_t first_block_len;
- spin_lock_irqsave(&random_ready_list_lock, flags);
- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
- struct module *owner = rdy->owner;
+ if (!len)
+ return;
+
+ first_block_len = min_t(size_t, 32, len);
+ crng_make_state(chacha_state, buf, first_block_len);
+ len -= first_block_len;
+ buf += first_block_len;
+
+ while (len) {
+ if (len < CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, tmp);
+ memcpy(buf, tmp, len);
+ memzero_explicit(tmp, sizeof(tmp));
+ break;
+ }
- list_del_init(&rdy->list);
- rdy->func(rdy);
- module_put(owner);
+ chacha20_block(chacha_state, buf);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ len -= CHACHA_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
}
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
}
/*
- * Credit (or debit) the entropy store with n bits of entropy.
- * Use credit_entropy_bits_safe() if the value comes from userspace
- * or otherwise should be checked for extreme values.
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not rely on the hardware random
+ * number generator. For random bytes direct from the hardware RNG
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
-static void credit_entropy_bits(struct entropy_store *r, int nbits)
+void get_random_bytes(void *buf, size_t len)
{
- int entropy_count, orig;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
+ warn_unseeded_randomness();
+ _get_random_bytes(buf, len);
+}
+EXPORT_SYMBOL(get_random_bytes);
- if (!nbits)
- return;
+static ssize_t get_random_bytes_user(struct iov_iter *iter)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 block[CHACHA_BLOCK_SIZE];
+ size_t ret = 0, copied;
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- if (nfrac < 0) {
- /* Debit */
- entropy_count += nfrac;
- } else {
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
- /* The +2 corresponds to the /4 in the denominator */
-
- do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
- unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
-
- entropy_count += add;
- pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
- }
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+ * bytes, in case userspace causes copy_to_user() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ /*
+ * However, if we're doing a read of len <= 32, we don't need to
+ * use chacha_state after, so we can simply return those bytes to
+ * the user directly.
+ */
+ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
+ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ goto out_zero_chacha;
+ }
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
+ for (;;) {
+ chacha20_block(chacha_state, block);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
- if (r == &input_pool) {
- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ copied = copy_to_iter(block, sizeof(block), iter);
+ ret += copied;
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, r);
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
}
+
+ memzero_explicit(block, sizeof(block));
+out_zero_chacha:
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ return ret ? ret : -EFAULT;
}
-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+/*
+ * Batched entropy returns random integers. The quality of the random
+ * number is good as /dev/urandom. In order to ensure that the randomness
+ * provided by this function is okay, the function wait_for_random_bytes()
+ * should be called and return 0 at least once at any point prior.
+ */
+
+#define DEFINE_BATCHED_ENTROPY(type) \
+struct batch_ ##type { \
+ /* \
+ * We make this 1.5x a ChaCha block, so that we get the \
+ * remaining 32 bytes from fast key erasure, plus one full \
+ * block from the detached ChaCha state. We can increase \
+ * the size of this later if needed so long as we keep the \
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
+ */ \
+ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ local_lock_t lock; \
+ unsigned long generation; \
+ unsigned int position; \
+}; \
+ \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
+ .position = UINT_MAX \
+}; \
+ \
+type get_random_ ##type(void) \
+{ \
+ type ret; \
+ unsigned long flags; \
+ struct batch_ ##type *batch; \
+ unsigned long next_gen; \
+ \
+ warn_unseeded_randomness(); \
+ \
+ if (!crng_ready()) { \
+ _get_random_bytes(&ret, sizeof(ret)); \
+ return ret; \
+ } \
+ \
+ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
+ batch = raw_cpu_ptr(&batched_entropy_##type); \
+ \
+ next_gen = READ_ONCE(base_crng.generation); \
+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
+ next_gen != batch->generation) { \
+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
+ batch->position = 0; \
+ batch->generation = next_gen; \
+ } \
+ \
+ ret = batch->entropy[batch->position]; \
+ batch->entropy[batch->position] = 0; \
+ ++batch->position; \
+ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u32)
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int __cold random_prepare_cpu(unsigned int cpu)
{
- const int nbits_max = r->poolinfo->poolwords * 32;
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
+}
+#endif
- if (nbits < 0)
- return -EINVAL;
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. It is not recommended for
+ * use. Use get_random_bytes() instead. It returns the number of
+ * bytes filled in.
+ */
+size_t __must_check get_random_bytes_arch(void *buf, size_t len)
+{
+ size_t left = len;
+ u8 *p = buf;
- /* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
+ while (left) {
+ unsigned long v;
+ size_t block_len = min_t(size_t, left, sizeof(unsigned long));
- credit_entropy_bits(r, nbits);
- return 0;
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(p, &v, block_len);
+ p += block_len;
+ left -= block_len;
+ }
+
+ return len - left;
}
+EXPORT_SYMBOL(get_random_bytes_arch);
-/*********************************************************************
+
+/**********************************************************************
*
- * CRNG using CHACHA20
+ * Entropy accumulation and extraction routines.
*
- *********************************************************************/
-
-#define CRNG_RESEED_INTERVAL (300*HZ)
-
-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+ * Callers may add entropy via:
+ *
+ * static void mix_pool_bytes(const void *buf, size_t len)
+ *
+ * After which, if added entropy should be credited:
+ *
+ * static void credit_init_bits(size_t bits)
+ *
+ * Finally, extract entropy via:
+ *
+ * static void extract_entropy(void *buf, size_t len)
+ *
+ **********************************************************************/
-#ifdef CONFIG_NUMA
-/*
- * Hack to deal with crazy userspace progams when they are all trying
- * to access /dev/urandom in parallel. The programs are almost
- * certainly doing something terribly wrong, but we'll work around
- * their brain damage.
- */
-static struct crng_state **crng_node_pool __read_mostly;
-#endif
+enum {
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
+ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
+};
-static void invalidate_batched_entropy(void);
-static void numa_crng_init(void);
+static struct {
+ struct blake2s_state hash;
+ spinlock_t lock;
+ unsigned int init_bits;
+} input_pool = {
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
+ .hash.outlen = BLAKE2S_HASH_SIZE,
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+};
-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static int __init parse_trust_cpu(char *arg)
+static void _mix_pool_bytes(const void *buf, size_t len)
{
- return kstrtobool(arg, &trust_cpu);
+ blake2s_update(&input_pool.hash, buf, len);
}
-early_param("random.trust_cpu", parse_trust_cpu);
-static bool crng_init_try_arch(struct crng_state *crng)
+/*
+ * This function adds bytes into the input pool. It does not
+ * update the initialization bit counter; the caller should call
+ * credit_init_bits if this is appropriate.
+ */
+static void mix_pool_bytes(const void *buf, size_t len)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
-
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
- }
- crng->state[i] ^= rv;
- }
+ unsigned long flags;
- return arch_init;
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(buf, len);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
+/*
+ * This is an HKDF-like construction for using the hashed collected entropy
+ * as a PRF key, that's then expanded block-by-block.
+ */
+static void extract_entropy(void *buf, size_t len)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
-
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long_early(&rv) &&
- !arch_get_random_long_early(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
- }
- crng->state[i] ^= rv;
+ unsigned long flags;
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+ struct {
+ unsigned long rdseed[32 / sizeof(long)];
+ size_t counter;
+ } block;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
+ if (!arch_get_random_seed_long(&block.rdseed[i]) &&
+ !arch_get_random_long(&block.rdseed[i]))
+ block.rdseed[i] = random_get_entropy();
}
- return arch_init;
-}
+ spin_lock_irqsave(&input_pool.lock, flags);
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
-{
- chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
- crng_init_try_arch(crng);
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
+ /* seed = HASHPRF(last_key, entropy_input) */
+ blake2s_final(&input_pool.hash, seed);
-static void __init crng_initialize_primary(struct crng_state *crng)
-{
- chacha_init_consts(crng->state);
- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- pr_notice("crng done (trusting CPU's manufacturer)\n");
+ /* next_key = HASHPRF(seed, RDSEED || 0) */
+ block.counter = 0;
+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
+
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ memzero_explicit(next_key, sizeof(next_key));
+
+ while (len) {
+ i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
+ /* output = HASHPRF(seed, RDSEED || ++counter) */
+ ++block.counter;
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ len -= i;
+ buf += i;
}
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+
+ memzero_explicit(seed, sizeof(seed));
+ memzero_explicit(&block, sizeof(block));
}
-static void crng_finalize_init(struct crng_state *crng)
+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+
+static void __cold _credit_init_bits(size_t bits)
{
- if (crng != &primary_crng || crng_init >= 2)
- return;
- if (!system_wq) {
- /* We can't call numa_crng_init until we have workqueues,
- * so mark this for processing later. */
- crng_need_final_init = true;
+ unsigned int new, orig, add;
+ unsigned long flags;
+
+ if (!bits)
return;
- }
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
-}
+ add = min_t(size_t, bits, POOL_BITS);
-#ifdef CONFIG_NUMA
-static void do_numa_crng_init(struct work_struct *work)
-{
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize_secondary(crng);
- pool[i] = crng;
- }
- /* pairs with READ_ONCE() in select_crng() */
- if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
- for_each_node(i)
- kfree(pool[i]);
- kfree(pool);
+ do {
+ orig = READ_ONCE(input_pool.init_bits);
+ new = min_t(unsigned int, POOL_BITS, orig + add);
+ } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
+
+ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
+ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (urandom_warning.missed)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
+ spin_lock_irqsave(&base_crng.lock, flags);
+ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
+ if (crng_init == CRNG_EMPTY) {
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_init = CRNG_EARLY;
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
}
}
-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
-static void numa_crng_init(void)
-{
- schedule_work(&numa_crng_init_work);
-}
+/**********************************************************************
+ *
+ * Entropy collection routines.
+ *
+ * The following exported functions are used for pushing entropy into
+ * the above entropy accumulation routines:
+ *
+ * void add_device_randomness(const void *buf, size_t len);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ * void add_bootloader_randomness(const void *buf, size_t len);
+ * void add_interrupt_randomness(int irq);
+ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_device_randomness() adds data to the input pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* credit any actual entropy to
+ * the pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
+ * and device tree, and credits its input depending on whether or not the
+ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the input pool roughly once a second or after 64
+ * interrupts, crediting 1 bit of entropy for whichever comes first.
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well
+ * as the event type information from the hardware.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * The last two routines try to estimate how many bits of entropy
+ * to credit. They do this by keeping track of the first and second
+ * order deltas of the event timings.
+ *
+ **********************************************************************/
-static struct crng_state *select_crng(void)
+static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+static int __init parse_trust_cpu(char *arg)
{
- struct crng_state **pool;
- int nid = numa_node_id();
-
- /* pairs with cmpxchg_release() in do_numa_crng_init() */
- pool = READ_ONCE(crng_node_pool);
- if (pool && pool[nid])
- return pool[nid];
-
- return &primary_crng;
+ return kstrtobool(arg, &trust_cpu);
}
-#else
-static void numa_crng_init(void) {}
-
-static struct crng_state *select_crng(void)
+static int __init parse_trust_bootloader(char *arg)
{
- return &primary_crng;
+ return kstrtobool(arg, &trust_bootloader);
}
-#endif
+early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
/*
- * crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally. Returns the number of
- * bytes processed from cp.
+ * The first collection of entropy occurs at system boot while interrupts
+ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
+ * utsname(), and the command line. Depending on the above configuration knob,
+ * RDSEED may be considered sufficient for initialization. Note that much
+ * earlier setup may already have pushed entropy into the input pool by the
+ * time we get here.
*/
-static size_t crng_fast_load(const char *cp, size_t len)
+int __init random_init(const char *command_line)
{
- unsigned long flags;
- char *p;
- size_t ret = 0;
+ ktime_t now = ktime_get_real();
+ unsigned int i, arch_bits;
+ unsigned long entropy;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- p = (unsigned char *) &primary_crng.state[4];
- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--; ret++;
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
- crng_init = 1;
- pr_notice("fast init done\n");
+#if defined(LATENT_ENTROPY_PLUGIN)
+ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
+#endif
+
+ for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
+ i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
+ if (!arch_get_random_seed_long_early(&entropy) &&
+ !arch_get_random_long_early(&entropy)) {
+ entropy = random_get_entropy();
+ arch_bits -= sizeof(entropy) * 8;
+ }
+ _mix_pool_bytes(&entropy, sizeof(entropy));
}
- return ret;
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+ _mix_pool_bytes(command_line, strlen(command_line));
+ add_latent_entropy();
+
+ if (crng_ready())
+ crng_reseed();
+ else if (trust_cpu)
+ _credit_init_bits(arch_bits);
+
+ return 0;
}
/*
- * crng_slow_load() is called by add_device_randomness, which has two
- * attributes. (1) We can't trust the buffer passed to it is
- * guaranteed to be unpredictable (so it might not have any entropy at
- * all), and (2) it doesn't have the performance constraints of
- * crng_fast_load().
+ * Add device- or boot-specific data to the input pool to help
+ * initialize it.
*
- * So we do something more comprehensive which is guaranteed to touch
- * all of the primary_crng's state, and which uses a LFSR with a
- * period of 255 as part of the mixing algorithm. Finally, we do
- * *not* advance crng_init_cnt since buffer we may get may be something
- * like a fixed DMI table (for example), which might very well be
- * unique to the machine, but is otherwise unvarying.
+ * None of this adds any entropy; it is meant to avoid the problem of
+ * the entropy pool having similar initial state across largely
+ * identical devices.
*/
-static int crng_slow_load(const char *cp, size_t len)
+void add_device_randomness(const void *buf, size_t len)
{
- unsigned long flags;
- static unsigned char lfsr = 1;
- unsigned char tmp;
- unsigned i, max = CHACHA_KEY_SIZE;
- const char * src_buf = cp;
- char * dest_buf = (char *) &primary_crng.state[4];
-
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- if (len > max)
- max = len;
-
- for (i = 0; i < max ; i++) {
- tmp = lfsr;
- lfsr >>= 1;
- if (tmp & 1)
- lfsr ^= 0xE1;
- tmp = dest_buf[i % CHACHA_KEY_SIZE];
- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
- lfsr += (tmp << 3) | (tmp >> 5);
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 1;
-}
+ unsigned long entropy = random_get_entropy();
+ unsigned long flags;
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
-{
- unsigned long flags;
- int i, num;
- union {
- __u8 block[CHACHA_BLOCK_SIZE];
- __u32 key[8];
- } buf;
-
- if (r) {
- num = extract_entropy(r, &buf, 32, 16, 0);
- if (num == 0)
- return;
- } else {
- _extract_crng(&primary_crng, buf.block);
- _crng_backtrack_protect(&primary_crng, buf.block,
- CHACHA_KEY_SIZE);
- }
- spin_lock_irqsave(&crng->lock, flags);
- for (i = 0; i < 8; i++) {
- unsigned long rv;
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- crng->state[i+4] ^= buf.key[i] ^ rv;
- }
- memzero_explicit(&buf, sizeof(buf));
- WRITE_ONCE(crng->init_time, jiffies);
- spin_unlock_irqrestore(&crng->lock, flags);
- crng_finalize_init(crng);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(buf, len);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
+EXPORT_SYMBOL(add_device_randomness);
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA_BLOCK_SIZE])
+/*
+ * Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ */
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
{
- unsigned long v, flags, init_time;
-
- if (crng_ready()) {
- init_time = READ_ONCE(crng->init_time);
- if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
- time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(crng, crng == &primary_crng ?
- &input_pool : NULL);
- }
- spin_lock_irqsave(&crng->lock, flags);
- if (arch_get_random_long(&v))
- crng->state[14] ^= v;
- chacha20_block(&crng->state[0], out);
- if (crng->state[12] == 0)
- crng->state[13]++;
- spin_unlock_irqrestore(&crng->lock, flags);
+ mix_pool_bytes(buf, len);
+ credit_init_bits(entropy);
+
+ /*
+ * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
+ * we're not yet initialized.
+ */
+ if (!kthread_should_stop() && crng_ready())
+ schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
+/*
+ * Handle random seed passed by bootloader, and credit it if
+ * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ */
+void __init add_bootloader_randomness(const void *buf, size_t len)
{
- _extract_crng(select_crng(), out);
+ mix_pool_bytes(buf, len);
+ if (trust_bootloader)
+ credit_init_bits(len * 8);
}
+struct fast_pool {
+ struct work_struct mix;
+ unsigned long pool[4];
+ unsigned long last;
+ unsigned int count;
+};
+
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+#ifdef CONFIG_64BIT
+#define FASTMIX_PERM SIPHASH_PERMUTATION
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
+#else
+#define FASTMIX_PERM HSIPHASH_PERMUTATION
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
+#endif
+};
+
/*
- * Use the leftover bytes from the CRNG block output (if there is
- * enough) to mutate the CRNG key to provide backtracking protection.
+ * This is [Half]SipHash-1-x, starting from an empty key. Because
+ * the key is fixed, it assumes that its inputs are non-malicious,
+ * and therefore this has no security on its own. s represents the
+ * four-word SipHash state, while v represents a two-word input.
*/
-static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
{
- unsigned long flags;
- __u32 *s, *d;
- int i;
-
- used = round_up(used, sizeof(__u32));
- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
- extract_crng(tmp);
- used = 0;
- }
- spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
- d = &crng->state[4];
- for (i=0; i < 8; i++)
- *d++ ^= *s++;
- spin_unlock_irqrestore(&crng->lock, flags);
+ s[3] ^= v1;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v1;
+ s[3] ^= v2;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v2;
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
+ */
+int __cold random_online_cpu(unsigned int cpu)
{
- _crng_backtrack_protect(select_crng(), tmp, used);
+ /*
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
+ */
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
}
+#endif
-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+static void mix_interrupt_randomness(struct work_struct *work)
{
- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
- int large_request = (nbytes > 256);
-
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current)) {
- if (ret == 0)
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
- }
-
- extract_crng(tmp);
- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
- }
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+ /*
+ * The size of the copied stack pool is explicitly 2 longs so that we
+ * only ever ingest half of the siphash output each time, retaining
+ * the other half as the next "key" that carries over. The entropy is
+ * supposed to be sufficiently dispersed between bits so on average
+ * we don't wind up "losing" some.
+ */
+ unsigned long pool[2];
+ unsigned int count;
- nbytes -= i;
- buf += i;
- ret += i;
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ local_irq_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ local_irq_enable();
+ return;
}
- crng_backtrack_protect(tmp, i);
- /* Wipe data just written to memory */
- memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
-}
+ /*
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view, before we reenable irqs again.
+ */
+ memcpy(pool, fast_pool->pool, sizeof(pool));
+ count = fast_pool->count;
+ fast_pool->count = 0;
+ fast_pool->last = jiffies;
+ local_irq_enable();
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_init_bits(max(1u, (count & U16_MAX) / 64));
-/*********************************************************************
- *
- * Entropy input management
- *
- *********************************************************************/
+ memzero_explicit(pool, sizeof(pool));
+}
-/* There is one of these per entropy source */
-struct timer_rand_state {
- cycles_t last_time;
- long last_delta, last_delta2;
-};
+void add_interrupt_randomness(int irq)
+{
+ enum { MIX_INFLIGHT = 1U << 31 };
+ unsigned long entropy = random_get_entropy();
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int new_count;
-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+ fast_mix(fast_pool->pool, entropy,
+ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
+ new_count = ++fast_pool->count;
-/*
- * Add device- or boot-specific data to the input pool to help
- * initialize it.
- *
- * None of this adds any entropy; it is meant to avoid the problem of
- * the entropy pool having similar initial state across largely
- * identical devices.
- */
-void add_device_randomness(const void *buf, unsigned int size)
-{
- unsigned long time = random_get_entropy() ^ jiffies;
- unsigned long flags;
+ if (new_count & MIX_INFLIGHT)
+ return;
- if (!crng_ready() && size)
- crng_slow_load(buf, size);
+ if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
+ return;
- trace_add_device_randomness(size, _RET_IP_);
- spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size);
- _mix_pool_bytes(&input_pool, &time, sizeof(time));
- spin_unlock_irqrestore(&input_pool.lock, flags);
+ if (unlikely(!fast_pool->mix.func))
+ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+ fast_pool->count |= MIX_INFLIGHT;
+ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
}
-EXPORT_SYMBOL(add_device_randomness);
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ unsigned long last_time;
+ long last_delta, last_delta2;
+};
/*
* This function adds entropy to the entropy "pool" by using timing
- * delays. It uses the timer_rand_state structure to make an estimate
- * of how many bits of entropy this call has added to the pool.
- *
- * The number "num" is also added to the pool - it should somehow describe
- * the type of event which just happened. This is currently 0-255 for
- * keyboard scan codes, and 256 upwards for interrupts.
- *
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool. The
+ * value "num" is also added to the pool; it should somehow describe
+ * the type of event that just happened.
*/
-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- struct entropy_store *r;
- struct {
- long jiffies;
- unsigned cycles;
- unsigned num;
- } sample;
+ unsigned long entropy = random_get_entropy(), now = jiffies, flags;
long delta, delta2, delta3;
+ unsigned int bits;
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
+ /*
+ * If we're in a hard IRQ, add_interrupt_randomness() will be called
+ * sometime after, so mix into the fast pool.
+ */
+ if (in_hardirq()) {
+ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
+ } else {
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(&num, sizeof(num));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ }
+
+ if (crng_ready())
+ return;
/*
* Calculate number of bits of randomness we probably added.
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - READ_ONCE(state->last_time);
- WRITE_ONCE(state->last_time, sample.jiffies);
+ delta = now - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, now);
delta2 = delta - READ_ONCE(state->last_delta);
WRITE_ONCE(state->last_delta, delta);
@@ -1201,378 +1071,65 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
delta = delta3;
/*
- * delta is now minimum absolute delta.
- * Round down by 1 bit on general principles,
- * and limit entropy estimate to 12 bits.
+ * delta is now minimum absolute delta. Round down by 1 bit
+ * on general principles, and limit entropy estimate to 11 bits.
*/
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ bits = min(fls(delta >> 1), 11);
+
+ /*
+ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
+ * will run after this, which uses a different crediting scheme of 1 bit
+ * per every 64 interrupts. In order to let that function do accounting
+ * close to the one in this function, we credit a full 64/64 bit per bit,
+ * and then subtract one to account for the extra one added.
+ */
+ if (in_hardirq())
+ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
+ else
+ _credit_init_bits(bits);
}
-void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value)
+void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
{
static unsigned char last_value;
+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
- /* ignore autorepeat and the like */
+ /* Ignore autorepeat and the like. */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
-
-#ifdef ADD_INTERRUPT_BENCH
-static unsigned long avg_cycles, avg_deviation;
-
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
-
-static void add_interrupt_bench(cycles_t start)
-{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
-}
-#else
-#define add_interrupt_bench(x)
-#endif
-
-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
-{
- __u32 *ptr = (__u32 *) regs;
- unsigned int idx;
-
- if (regs == NULL)
- return 0;
- idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
- idx = 0;
- ptr += idx++;
- WRITE_ONCE(f->reg_idx, idx);
- return *ptr;
-}
-
-void add_interrupt_randomness(int irq, int irq_flags)
-{
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- __u64 ip;
-
- if (cycles == 0)
- cycles = get_reg(fast_pool, regs);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, regs);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
-
- if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
- return;
- }
-
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
- return;
-
- r = &input_pool;
- if (!spin_trylock(&r->lock))
- return;
-
- fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
-
- fast_pool->count = 0;
-
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
-}
-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
-
#ifdef CONFIG_BLOCK
void add_disk_randomness(struct gendisk *disk)
{
if (!disk || !disk->random)
return;
- /* first major is 1, so we get >= 0x200 here */
+ /* First major is 1, so we get >= 0x200 here. */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
-#endif
-
-/*********************************************************************
- *
- * Entropy extraction routines
- *
- *********************************************************************/
-/*
- * This function decides how many bytes to actually take from the
- * given pool, and also debits the entropy count accordingly.
- */
-static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
+void __cold rand_initialize_disk(struct gendisk *disk)
{
- int entropy_count, orig, have_bytes;
- size_t ibytes, nfrac;
-
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
-
- /* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- ibytes = nbytes;
- /* never pull more than available */
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- if (ibytes < min)
- ibytes = 0;
-
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = 0;
- }
- nfrac = ibytes << (ENTROPY_SHIFT + 3);
- if ((size_t) entropy_count > nfrac)
- entropy_count -= nfrac;
- else
- entropy_count = 0;
-
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
-
- return ibytes;
-}
-
-/*
- * This function does the actual extraction for extract_entropy.
- *
- * Note: we assume that .poolwords is a multiple of 16 words.
- */
-static void extract_buf(struct entropy_store *r, __u8 *out)
-{
- int i;
- union {
- __u32 w[5];
- unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA1_WORKSPACE_WORDS];
- unsigned long flags;
-
- /*
- * If we have an architectural hardware random number
- * generator, use it for SHA's initial vector
- */
- sha1_init(hash.w);
- for (i = 0; i < LONGS(20); i++) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- hash.l[i] = v;
- }
-
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
-
- /*
- * We mix the hash back into the pool to prevent backtracking
- * attacks (where the attacker knows the state of the pool
- * plus the current outputs, and attempts to find previous
- * ouputs), unless the hash function can be inverted. By
- * mixing at least a SHA1 worth of hash data back, we make
- * brute-forcing the feedback as hard as brute-forcing the
- * hash.
- */
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
- spin_unlock_irqrestore(&r->lock, flags);
-
- memzero_explicit(workspace, sizeof(workspace));
+ struct timer_rand_state *state;
/*
- * In case the hash function has some recognizable output
- * pattern, we fold it in half. Thus, we always feed back
- * twice as much data as we output.
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
*/
- hash.w[0] ^= hash.w[3];
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
-}
-
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips)
-{
- ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- while (nbytes) {
- extract_buf(r, tmp);
-
- if (fips) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
- i = min_t(int, nbytes, EXTRACT_SIZE);
- memcpy(buf, tmp, i);
- nbytes -= i;
- buf += i;
- ret += i;
- }
-
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
-}
-
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
- */
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
-{
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!r->last_data_init) {
- r->last_data_init = 1;
- spin_unlock_irqrestore(&r->lock, flags);
- trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
- extract_buf(r, tmp);
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- }
- spin_unlock_irqrestore(&r->lock, flags);
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state) {
+ state->last_time = INITIAL_JIFFIES;
+ disk->random = state;
}
-
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- nbytes = account(r, nbytes, min, reserved);
-
- return _extract_entropy(r, buf, nbytes, fips_enabled);
}
-
-#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
-
-static void _warn_unseeded_randomness(const char *func_name, void *caller,
- void **previous)
-{
-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- const bool print_once = false;
-#else
- static bool print_once __read_mostly;
#endif
- if (print_once ||
- crng_ready() ||
- (previous && (caller == READ_ONCE(*previous))))
- return;
- WRITE_ONCE(*previous, caller);
-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- print_once = true;
-#endif
- if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS "
- "with crng_init=%d\n", func_name, caller,
- crng_init);
-}
-
-/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. It does not rely on the hardware random
- * number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch(). In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
- */
-static void _get_random_bytes(void *buf, int nbytes)
-{
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
-
- trace_get_random_bytes(nbytes, _RET_IP_);
-
- while (nbytes >= CHACHA_BLOCK_SIZE) {
- extract_crng(buf);
- buf += CHACHA_BLOCK_SIZE;
- nbytes -= CHACHA_BLOCK_SIZE;
- }
-
- if (nbytes > 0) {
- extract_crng(tmp);
- memcpy(buf, tmp, nbytes);
- crng_backtrack_protect(tmp, nbytes);
- } else
- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
- memzero_explicit(tmp, sizeof(tmp));
-}
-
-void get_random_bytes(void *buf, int nbytes)
-{
- static void *previous;
-
- warn_unseeded_randomness(&previous);
- _get_random_bytes(buf, nbytes);
-}
-EXPORT_SYMBOL(get_random_bytes);
-
-
/*
* Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another
@@ -1586,353 +1143,176 @@ EXPORT_SYMBOL(get_random_bytes);
*
* So the re-arming always happens in the entropy loop itself.
*/
-static void entropy_timer(struct timer_list *t)
+static void __cold entropy_timer(struct timer_list *t)
{
- credit_entropy_bits(&input_pool, 1);
+ credit_init_bits(1);
}
/*
* If we have an actual cycle counter, see if we can
* generate enough entropy with timing noise
*/
-static void try_to_generate_entropy(void)
+static void __cold try_to_generate_entropy(void)
{
struct {
- unsigned long now;
+ unsigned long entropy;
struct timer_list timer;
} stack;
- stack.now = random_get_entropy();
+ stack.entropy = random_get_entropy();
/* Slow counter - or none. Don't even bother */
- if (stack.now == random_get_entropy())
+ if (stack.entropy == random_get_entropy())
return;
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
- while (!crng_ready()) {
+ while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies+1);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mod_timer(&stack.timer, jiffies + 1);
+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
schedule();
- stack.now = random_get_entropy();
+ stack.entropy = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
}
-/*
- * Wait for the urandom pool to be seeded and thus guaranteed to supply
- * cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
- *
- * Returns: 0 if the urandom pool has been seeded.
- * -ERESTARTSYS if the function was interrupted by a signal.
- */
-int wait_for_random_bytes(void)
-{
- if (likely(crng_ready()))
- return 0;
-
- do {
- int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
- if (ret)
- return ret > 0 ? 0 : ret;
- try_to_generate_entropy();
- } while (!crng_ready());
-
- return 0;
-}
-EXPORT_SYMBOL(wait_for_random_bytes);
-
-/*
- * Returns whether or not the urandom pool has been seeded and thus guaranteed
- * to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+/**********************************************************************
*
- * Returns: true if the urandom pool has been seeded.
- * false if the urandom pool has not been seeded.
- */
-bool rng_is_initialized(void)
-{
- return crng_ready();
-}
-EXPORT_SYMBOL(rng_is_initialized);
-
-/*
- * Add a callback function that will be invoked when the nonblocking
- * pool is initialised.
+ * Userspace reader/writer interfaces.
*
- * returns: 0 if callback is successfully added
- * -EALREADY if pool is already initialised (callback not called)
- * -ENOENT if module for callback is not alive
- */
-int add_random_ready_callback(struct random_ready_callback *rdy)
-{
- struct module *owner;
- unsigned long flags;
- int err = -EALREADY;
-
- if (crng_ready())
- return err;
-
- owner = rdy->owner;
- if (!try_module_get(owner))
- return -ENOENT;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (crng_ready())
- goto out;
-
- owner = NULL;
-
- list_add(&rdy->list, &random_ready_list);
- err = 0;
-
-out:
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-
- return err;
-}
-EXPORT_SYMBOL(add_random_ready_callback);
-
-/*
- * Delete a previously registered readiness callback function.
- */
-void del_random_ready_callback(struct random_ready_callback *rdy)
-{
- unsigned long flags;
- struct module *owner = NULL;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (!list_empty(&rdy->list)) {
- list_del_init(&rdy->list);
- owner = rdy->owner;
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-}
-EXPORT_SYMBOL(del_random_ready_callback);
-
-/*
- * This function will use the architecture-specific hardware random
- * number generator if it is available. The arch-specific hw RNG will
- * almost certainly be faster than what we can do in software, but it
- * is impossible to verify that it is implemented securely (as
- * opposed, to, say, the AES encryption of a sequence number using a
- * key known by the NSA). So it's useful if we need the speed, but
- * only if we're willing to trust the hardware manufacturer not to
- * have put in a back door.
+ * getrandom(2) is the primary modern interface into the RNG and should
+ * be used in preference to anything else.
*
- * Return number of bytes filled in.
- */
-int __must_check get_random_bytes_arch(void *buf, int nbytes)
-{
- int left = nbytes;
- char *p = buf;
-
- trace_get_random_bytes_arch(left, _RET_IP_);
- while (left) {
- unsigned long v;
- int chunk = min_t(int, left, sizeof(unsigned long));
-
- if (!arch_get_random_long(&v))
- break;
-
- memcpy(p, &v, chunk);
- p += chunk;
- left -= chunk;
- }
-
- return nbytes - left;
-}
-EXPORT_SYMBOL(get_random_bytes_arch);
-
-/*
- * init_std_data - initialize pool with system data
+ * Reading from /dev/random has the same functionality as calling
+ * getrandom(2) with flags=0. In earlier versions, however, it had
+ * vastly different semantics and should therefore be avoided, to
+ * prevent backwards compatibility issues.
*
- * @r: pool to initialize
+ * Reading from /dev/urandom has the same functionality as calling
+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
+ * waiting for the RNG to be ready, it should not be used.
*
- * This function clears the pool's entropy count and mixes some system
- * data into the pool to prepare it for use. The pool is not cleared
- * as that can only decrease the entropy in the pool.
- */
-static void __init init_std_data(struct entropy_store *r)
-{
- int i;
- ktime_t now = ktime_get_real();
- unsigned long rv;
-
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv));
- }
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
-}
+ * Writing to either /dev/random or /dev/urandom adds entropy to
+ * the input pool but does not credit it.
+ *
+ * Polling on /dev/random indicates when the RNG is initialized, on
+ * the read side, and when it wants new entropy, on the write side.
+ *
+ * Both /dev/random and /dev/urandom have the same set of ioctls for
+ * adding entropy, getting the entropy count, zeroing the count, and
+ * reseeding the crng.
+ *
+ **********************************************************************/
-/*
- * Note that setup_arch() may call add_device_randomness()
- * long before we get here. This allows seeding of the pools
- * with some platform dependent data very early in the boot
- * process. But it limits our options here. We must use
- * statically allocated structures that already have all
- * initializations complete at compile time. We should also
- * take care not to overwrite the precious per platform data
- * we were given.
- */
-int __init rand_initialize(void)
+SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
{
- init_std_data(&input_pool);
- if (crng_need_final_init)
- crng_finalize_init(&primary_crng);
- crng_initialize_primary(&primary_crng);
- crng_global_init_time = jiffies;
- if (ratelimit_disable) {
- urandom_warning.interval = 0;
- unseeded_warning.interval = 0;
- }
- return 0;
-}
+ struct iov_iter iter;
+ struct iovec iov;
+ int ret;
-#ifdef CONFIG_BLOCK
-void rand_initialize_disk(struct gendisk *disk)
-{
- struct timer_rand_state *state;
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+ return -EINVAL;
/*
- * If kzalloc returns null, we just won't use that entropy
- * source.
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
*/
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state) {
- state->last_time = INITIAL_JIFFIES;
- disk->random = state;
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
+ return -EINVAL;
+
+ if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (flags & GRND_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
}
+
+ ret = import_single_range(READ, ubuf, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ return get_random_bytes_user(&iter);
}
-#endif
-static ssize_t
-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
- int ret;
-
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
- ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
- return ret;
+ poll_wait(file, &crng_init_wait, wait);
+ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
-static ssize_t
-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t write_pool_user(struct iov_iter *iter)
{
- unsigned long flags;
- static int maxwarn = 10;
+ u8 block[BLAKE2S_BLOCK_SIZE];
+ ssize_t ret = 0;
+ size_t copied;
- if (!crng_ready() && maxwarn > 0) {
- maxwarn--;
- if (__ratelimit(&urandom_warning))
- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
- current->comm, nbytes);
- spin_lock_irqsave(&primary_crng.lock, flags);
- crng_init_cnt = 0;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- }
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
- return urandom_read_nowarn(file, buf, nbytes, ppos);
-}
+ for (;;) {
+ copied = copy_from_iter(block, sizeof(block), iter);
+ ret += copied;
+ mix_pool_bytes(block, copied);
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
-{
- int ret;
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ }
- ret = wait_for_random_bytes();
- if (ret != 0)
- return ret;
- return urandom_read_nowarn(file, buf, nbytes, ppos);
+ memzero_explicit(block, sizeof(block));
+ return ret ? ret : -EFAULT;
}
-static __poll_t
-random_poll(struct file *file, poll_table * wait)
+static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
- __poll_t mask;
-
- poll_wait(file, &crng_init_wait, wait);
- poll_wait(file, &random_write_wait, wait);
- mask = 0;
- if (crng_ready())
- mask |= EPOLLIN | EPOLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
- mask |= EPOLLOUT | EPOLLWRNORM;
- return mask;
+ return write_pool_user(iter);
}
-static int
-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
- size_t bytes;
- __u32 t, buf[16];
- const char __user *p = buffer;
-
- while (count > 0) {
- int b, i = 0;
-
- bytes = min(count, sizeof(buf));
- if (copy_from_user(&buf, p, bytes))
- return -EFAULT;
+ static int maxwarn = 10;
- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
- if (!arch_get_random_int(&t))
- break;
- buf[i] ^= t;
+ if (!crng_ready()) {
+ if (!ratelimit_disable && maxwarn <= 0)
+ ++urandom_warning.missed;
+ else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
+ --maxwarn;
+ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
+ current->comm, iov_iter_count(iter));
}
-
- count -= bytes;
- p += bytes;
-
- mix_pool_bytes(r, buf, bytes);
- cond_resched();
}
- return 0;
+ return get_random_bytes_user(iter);
}
-static ssize_t random_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
- size_t ret;
+ int ret;
- ret = write_pool(&input_pool, buffer, count);
- if (ret)
+ ret = wait_for_random_bytes();
+ if (ret != 0)
return ret;
-
- return (ssize_t)count;
+ return get_random_bytes_user(iter);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
- int size, ent_count;
int __user *p = (int __user *)arg;
- int retval;
+ int ent_count;
switch (cmd) {
case RNDGETENTCNT:
- /* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
- if (put_user(ent_count, p))
+ /* Inherently racy, no point locking. */
+ if (put_user(input_pool.init_bits, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1940,41 +1320,48 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(&input_pool, ent_count);
- case RNDADDENTROPY:
+ if (ent_count < 0)
+ return -EINVAL;
+ credit_init_bits(ent_count);
+ return 0;
+ case RNDADDENTROPY: {
+ struct iov_iter iter;
+ struct iovec iov;
+ ssize_t ret;
+ int len;
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p++))
return -EFAULT;
if (ent_count < 0)
return -EINVAL;
- if (get_user(size, p++))
+ if (get_user(len, p++))
return -EFAULT;
- retval = write_pool(&input_pool, (const char __user *)p,
- size);
- if (retval < 0)
- return retval;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ ret = import_single_range(WRITE, p, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ ret = write_pool_user(&iter);
+ if (unlikely(ret < 0))
+ return ret;
+ /* Since we're crediting, enforce that it was all written into the pool. */
+ if (unlikely(ret != len))
+ return -EFAULT;
+ credit_init_bits(ent_count);
+ return 0;
+ }
case RNDZAPENTCNT:
case RNDCLEARPOOL:
- /*
- * Clear the entropy pool counters. We no longer clear
- * the entropy pool, as that's silly.
- */
+ /* No longer has any effect. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (crng_init < 2)
+ if (!crng_ready())
return -ENODATA;
- crng_reseed(&primary_crng, &input_pool);
- WRITE_ONCE(crng_global_init_time, jiffies - 1);
+ crng_reseed();
return 0;
default:
return -EINVAL;
@@ -1987,55 +1374,56 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
- .read = random_read,
- .write = random_write,
- .poll = random_poll,
+ .read_iter = random_read_iter,
+ .write_iter = random_write_iter,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
- .write = random_write,
+ .read_iter = urandom_read_iter,
+ .write_iter = random_write_iter,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
- unsigned int, flags)
-{
- int ret;
-
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
- return -EINVAL;
-
- /*
- * Requesting insecure and blocking randomness at the same time makes
- * no sense.
- */
- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
- return -EINVAL;
-
- if (count > INT_MAX)
- count = INT_MAX;
-
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
- if (flags & GRND_NONBLOCK)
- return -EAGAIN;
- ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
- }
- return urandom_read_nowarn(NULL, buf, count, NULL);
-}
/********************************************************************
*
- * Sysctl interface
+ * Sysctl interface.
+ *
+ * These are partly unused legacy knobs with dummy values to not break
+ * userspace and partly still useful things. They are usually accessible
+ * in /proc/sys/kernel/random/ and are as follows:
+ *
+ * - boot_id - a UUID representing the current boot.
+ *
+ * - uuid - a random UUID, different each time the file is read.
+ *
+ * - poolsize - the number of bits of entropy that the input pool can
+ * hold, tied to the POOL_BITS constant.
+ *
+ * - entropy_avail - the number of bits of entropy currently in the
+ * input pool. Always <= poolsize.
+ *
+ * - write_wakeup_threshold - the amount of entropy in the input pool
+ * below which write polls to /dev/random will unblock, requesting
+ * more entropy, tied to the POOL_READY_BITS constant. It is writable
+ * to avoid breaking old userspaces, but writing to it does not
+ * change any behavior of the RNG.
+ *
+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
+ * It is writable to avoid breaking old userspaces, but writing
+ * to it does not change any behavior of the RNG.
*
********************************************************************/
@@ -2043,25 +1431,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
-static int min_write_thresh;
-static int max_write_thresh = INPUT_POOL_WORDS * 32;
-static int random_min_urandom_seed = 60;
-static char sysctl_bootid[16];
+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
+static int sysctl_poolsize = POOL_BITS;
+static u8 sysctl_bootid[UUID_SIZE];
/*
* This function is used to return both the bootid UUID, and random
- * UUID. The difference is in whether table->data is NULL; if it is,
+ * UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
- *
- * If the user accesses this via the proc interface, the UUID will be
- * returned as an ASCII string in the standard UUID format; if via the
- * sysctl system call, as 16 bytes of binary data.
*/
-static int proc_do_uuid(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table fake_table;
- unsigned char buf[64], tmp_uuid[16], *uuid;
+static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
+{
+ u8 tmp_uuid[UUID_SIZE], *uuid;
+ char uuid_string[UUID_STRING_LEN + 1];
+ struct ctl_table fake_table = {
+ .data = uuid_string,
+ .maxlen = UUID_STRING_LEN
+ };
+
+ if (write)
+ return -EPERM;
uuid = table->data;
if (!uuid) {
@@ -2076,32 +1467,17 @@ static int proc_do_uuid(struct ctl_table *table, int write,
spin_unlock(&bootid_spinlock);
}
- sprintf(buf, "%pU", uuid);
-
- fake_table.data = buf;
- fake_table.maxlen = sizeof(buf);
-
- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+ return proc_dostring(&fake_table, 0, buf, lenp, ppos);
}
-/*
- * Return entropy available scaled to integral bits
- */
-static int proc_do_entropy(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+/* The same as proc_dointvec, but writes don't change anything. */
+static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- int entropy_count;
-
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
-
- fake_table.data = &entropy_count;
- fake_table.maxlen = sizeof(entropy_count);
-
- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
extern struct ctl_table random_table[];
struct ctl_table random_table[] = {
{
@@ -2113,222 +1489,36 @@ struct ctl_table random_table[] = {
},
{
.procname = "entropy_avail",
+ .data = &input_pool.init_bits,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_do_entropy,
- .data = &input_pool.entropy_count,
+ .proc_handler = proc_dointvec,
},
{
.procname = "write_wakeup_threshold",
- .data = &random_write_wakeup_bits,
+ .data = &sysctl_random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_write_thresh,
- .extra2 = &max_write_thresh,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "urandom_min_reseed_secs",
- .data = &random_min_urandom_seed,
+ .data = &sysctl_random_min_urandom_seed,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "boot_id",
.data = &sysctl_bootid,
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
{
.procname = "uuid",
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
-#ifdef ADD_INTERRUPT_BENCH
- {
- .procname = "add_interrupt_avg_cycles",
- .data = &avg_cycles,
- .maxlen = sizeof(avg_cycles),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
- {
- .procname = "add_interrupt_avg_deviation",
- .data = &avg_deviation,
- .maxlen = sizeof(avg_deviation),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
-#endif
{ }
};
-#endif /* CONFIG_SYSCTL */
-
-struct batched_entropy {
- union {
- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
- };
- unsigned int position;
- spinlock_t batch_lock;
-};
-
-/*
- * Get a random word for internal kernel use only. The quality of the random
- * number is good as /dev/urandom, but there is no backtrack protection, with
- * the goal of being quite fast and not depleting entropy. In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once at any
- * point prior.
- */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
-};
-
-u64 get_random_u64(void)
-{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u64);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
- batch->position = 0;
- }
- ret = batch->entropy_u64[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
-};
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u32);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
- batch->position = 0;
- }
- ret = batch->entropy_u32[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
-
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
- int cpu;
- unsigned long flags;
-
- for_each_possible_cpu (cpu) {
- struct batched_entropy *batched_entropy;
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
- batched_entropy->position = 0;
- spin_unlock(&batched_entropy->batch_lock);
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
- spin_lock(&batched_entropy->batch_lock);
- batched_entropy->position = 0;
- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
- }
-}
-
-/**
- * randomize_page - Generate a random, page aligned address
- * @start: The smallest acceptable address the caller will take.
- * @range: The size of the area, starting at @start, within which the
- * random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned. We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range). On error,
- * @start is returned.
- */
-unsigned long
-randomize_page(unsigned long start, unsigned long range)
-{
- if (!PAGE_ALIGNED(start)) {
- range -= PAGE_ALIGN(start) - start;
- start = PAGE_ALIGN(start);
- }
-
- if (start > ULONG_MAX - range)
- range = ULONG_MAX - start;
-
- range >>= PAGE_SHIFT;
-
- if (range == 0)
- return start;
-
- return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
-/* Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
- */
-void add_hwgenerator_randomness(const char *buffer, size_t count,
- size_t entropy)
-{
- struct entropy_store *poolp = &input_pool;
-
- if (unlikely(crng_init == 0)) {
- size_t ret = crng_fast_load(buffer, count);
- count -= ret;
- buffer += ret;
- if (!count || crng_init == 0)
- return;
- }
-
- /* Suspend writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait,
- !system_wq || kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
- mix_pool_bytes(poolp, buffer, count);
- credit_entropy_bits(poolp, entropy);
-}
-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-
-/* Handle random seed passed by bootloader.
- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
- * it would be regarded as device data.
- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
- */
-void add_bootloader_randomness(const void *buf, unsigned int size)
-{
- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
- add_hwgenerator_randomness(buf, size, size * 8);
- else
- add_device_randomness(buf, size);
-}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+#endif /* CONFIG_SYSCTL */
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index a25815a6f625..de92065394be 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
if (!rc) {
out = (struct tpm2_get_cap_out *)
&buf.data[TPM_HEADER_SIZE];
- *value = be32_to_cpu(out->value);
+ /*
+ * To prevent failing boot up of some systems, Infineon TPM2.0
+ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
+ * the TPM2_Getcapability command returns a zero length list
+ * in field upgrade mode.
+ */
+ if (be32_to_cpu(out->property_cnt) > 0)
+ *value = be32_to_cpu(out->value);
+ else
+ rc = -ENODATA;
}
tpm_buf_destroy(&buf);
return rc;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 3af4c07a9342..d3989b257f42 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
+ rc = -ENODEV;
dev_err(dev, "CRQ response timed out\n");
goto init_irq_cleanup;
}
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index c89278103703..e2ab6a329732 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -754,8 +754,8 @@ static int tpm_cr50_i2c_remove(struct i2c_client *client)
struct device *dev = &client->dev;
if (!chip) {
- dev_err(dev, "Could not get client data at remove\n");
- return -ENODEV;
+ dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n");
+ return 0;
}
tpm_chip_unregister(chip);
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index dc3551796e5e..39bcbfd908b4 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref)
if (xdev->workq)
destroy_workqueue(xdev->workq);
+ usb_put_dev(xdev->udev);
kfree(xdev->channels); /* Argument may be NULL, and that's fine */
kfree(xdev);
}
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index b656d25a9767..fe772baeb15f 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -106,6 +106,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
tmp_rate = parent_rate;
else
tmp_rate = parent_rate / div;
+
+ if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
+ return;
+
tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) {
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index aa80334196ff..140b9297ed83 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -975,7 +975,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
- hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
+ hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0);
hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 6144447f86c6..62238dca9a53 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -271,6 +271,7 @@ struct tegra_dfll {
struct clk *ref_clk;
struct clk *i2c_clk;
struct clk *dfll_clk;
+ struct reset_control *dfll_rst;
struct reset_control *dvco_rst;
unsigned long ref_rate;
unsigned long i2c_clk_rate;
@@ -1464,6 +1465,7 @@ static int dfll_init(struct tegra_dfll *td)
return -EINVAL;
}
+ reset_control_deassert(td->dfll_rst);
reset_control_deassert(td->dvco_rst);
ret = clk_prepare(td->ref_clk);
@@ -1509,6 +1511,7 @@ di_err1:
clk_unprepare(td->ref_clk);
reset_control_assert(td->dvco_rst);
+ reset_control_assert(td->dfll_rst);
return ret;
}
@@ -1530,6 +1533,7 @@ int tegra_dfll_suspend(struct device *dev)
}
reset_control_assert(td->dvco_rst);
+ reset_control_assert(td->dfll_rst);
return 0;
}
@@ -1548,6 +1552,7 @@ int tegra_dfll_resume(struct device *dev)
{
struct tegra_dfll *td = dev_get_drvdata(dev);
+ reset_control_deassert(td->dfll_rst);
reset_control_deassert(td->dvco_rst);
pm_runtime_get_sync(td->dev);
@@ -1951,6 +1956,12 @@ int tegra_dfll_register(struct platform_device *pdev,
td->soc = soc;
+ td->dfll_rst = devm_reset_control_get_optional(td->dev, "dfll");
+ if (IS_ERR(td->dfll_rst)) {
+ dev_err(td->dev, "couldn't get dfll reset\n");
+ return PTR_ERR(td->dfll_rst);
+ }
+
td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
if (IS_ERR(td->dvco_rst)) {
dev_err(td->dev, "couldn't get dvco reset\n");
@@ -2087,6 +2098,7 @@ struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev)
clk_unprepare(td->i2c_clk);
reset_control_assert(td->dvco_rst);
+ reset_control_assert(td->dfll_rst);
return td->soc;
}
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index ff188ab68496..bb47610bbd1c 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -565,4 +565,3 @@ void __init hv_init_clocksource(void)
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
-EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index 56c0cc32d0ac..d514b44e67dd 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
}
rps->irq = irq_of_parse_and_map(np, 0);
- if (rps->irq < 0) {
+ if (!rps->irq) {
ret = -EINVAL;
goto err_iomap;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c51c5ed15aa7..0e7748df4be3 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -32,7 +32,7 @@ static int riscv_clock_next_event(unsigned long delta,
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 401d592e85f5..e6a87f4af2b5 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -259,6 +259,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
+ if (initialized) {
+ pr_debug("%pOF: skipping further SP804 timer device\n", np);
+ return 0;
+ }
+
base = of_iomap(np, 0);
if (!base)
return -ENXIO;
@@ -270,11 +275,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
writel(0, timer1_base + timer->ctrl);
writel(0, timer2_base + timer->ctrl);
- if (initialized || !of_device_is_available(np)) {
- ret = -EINVAL;
- goto err;
- }
-
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
clk1 = NULL;
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4b00a9ea611a..9a1d146b7ebb 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -685,7 +685,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
if (!devpriv->usb_rx_buf)
return -ENOMEM;
- size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
+ size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf)
return -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index eeac6d809229..cddf7e13c232 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -28,6 +28,7 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
+#include <linux/units.h>
#include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list);
@@ -1701,6 +1702,16 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
return new_freq;
if (policy->cur != new_freq) {
+ /*
+ * For some platforms, the frequency returned by hardware may be
+ * slightly different from what is provided in the frequency
+ * table, for example hardware may return 499 MHz instead of 500
+ * MHz. In such cases it is better to avoid getting into
+ * unnecessary frequency updates.
+ */
+ if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
+ return policy->cur;
+
cpufreq_out_of_sync(policy, new_freq);
if (update)
schedule_work(&policy->update);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 866163883b48..bfe240c726e3 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -44,6 +44,8 @@ struct mtk_cpu_dvfs_info {
bool need_voltage_tracking;
};
+static struct platform_device *cpufreq_pdev;
+
static LIST_HEAD(dvfs_info_list);
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
@@ -547,7 +549,6 @@ static int __init mtk_cpufreq_driver_init(void)
{
struct device_node *np;
const struct of_device_id *match;
- struct platform_device *pdev;
int err;
np = of_find_node_by_path("/");
@@ -571,16 +572,23 @@ static int __init mtk_cpufreq_driver_init(void)
* and the device registration codes are put here to handle defer
* probing.
*/
- pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ cpufreq_pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
+ if (IS_ERR(cpufreq_pdev)) {
pr_err("failed to register mtk-cpufreq platform device\n");
platform_driver_unregister(&mtk_cpufreq_platdrv);
- return PTR_ERR(pdev);
+ return PTR_ERR(cpufreq_pdev);
}
return 0;
}
-device_initcall(mtk_cpufreq_driver_init);
+module_init(mtk_cpufreq_driver_init)
+
+static void __exit mtk_cpufreq_driver_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&mtk_cpufreq_platdrv);
+}
+module_exit(mtk_cpufreq_driver_exit)
MODULE_DESCRIPTION("MediaTek CPUFreq driver");
MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b51b5df08450..540105ca0781 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -23,6 +23,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/syscore_ops.h>
#include <asm/cpuidle.h>
@@ -131,6 +132,49 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
return 0;
}
+static void psci_idle_syscore_switch(bool suspend)
+{
+ bool cleared = false;
+ struct device *dev;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev;
+
+ if (dev && suspend) {
+ dev_pm_genpd_suspend(dev);
+ } else if (dev) {
+ dev_pm_genpd_resume(dev);
+
+ /* Account for userspace having offlined a CPU. */
+ if (pm_runtime_status_suspended(dev))
+ pm_runtime_set_active(dev);
+
+ /* Clear domain state to re-start fresh. */
+ if (!cleared) {
+ psci_set_domain_state(0);
+ cleared = true;
+ }
+ }
+ }
+}
+
+static int psci_idle_syscore_suspend(void)
+{
+ psci_idle_syscore_switch(true);
+ return 0;
+}
+
+static void psci_idle_syscore_resume(void)
+{
+ psci_idle_syscore_switch(false);
+}
+
+static struct syscore_ops psci_idle_syscore_ops = {
+ .suspend = psci_idle_syscore_suspend,
+ .resume = psci_idle_syscore_resume,
+};
+
static void psci_idle_init_cpuhp(void)
{
int err;
@@ -138,6 +182,8 @@ static void psci_idle_init_cpuhp(void)
if (!psci_cpuidle_use_cpuhp)
return;
+ register_syscore_ops(&psci_idle_syscore_ops);
+
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 554e400d41ca..70e2e6e37389 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -93,6 +93,68 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
return err;
}
+static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sg = areq->src;
+ unsigned int todo, offset;
+ unsigned int len = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+ int i = 0;
+ u32 a;
+ int err;
+
+ rctx->ivlen = ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(sf->biv, areq->src, offset,
+ ivsize, 0);
+ }
+
+ /* we need to copy all IVs from source in case DMA is bi-directionnal */
+ while (sg && len) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
+ if (i == 0)
+ memcpy(sf->iv[0], areq->iv, ivsize);
+ a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, a)) {
+ memzero_explicit(sf->iv[i], ivsize);
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
+ goto dma_iv_error;
+ }
+ rctx->p_iv[i] = a;
+ /* we need to setup all others IVs only in the decrypt way */
+ if (rctx->op_dir & SS_ENCRYPTION)
+ return 0;
+ todo = min(len, sg_dma_len(sg));
+ len -= todo;
+ i++;
+ if (i < MAX_SG) {
+ offset = sg->length - ivsize;
+ scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
+ }
+ rctx->niv = i;
+ sg = sg_next(sg);
+ }
+
+ return 0;
+dma_iv_error:
+ i--;
+ while (i >= 0) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ }
+ return err;
+}
+
static int sun8i_ss_cipher(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -101,9 +163,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
- void *backup_iv = NULL;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
@@ -134,30 +196,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
- rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!rctx->biv) {
- err = -ENOMEM;
+ err = sun8i_ss_setup_ivs(areq);
+ if (err)
goto theend_key;
- }
- if (rctx->op_dir & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
- offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(backup_iv, areq->src, offset,
- ivsize, 0);
- }
- memcpy(rctx->biv, areq->iv, ivsize);
- rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ss->dev, rctx->p_iv)) {
- dev_err(ss->dev, "Cannot DMA MAP IV\n");
- err = -ENOMEM;
- goto theend_iv;
- }
}
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
@@ -243,21 +284,19 @@ theend_sgs:
}
theend_iv:
- if (rctx->p_iv)
- dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
- DMA_TO_DEVICE);
-
if (areq->iv && ivsize > 0) {
- if (rctx->biv) {
- offset = areq->cryptlen - ivsize;
- if (rctx->op_dir & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
- } else {
- scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
- ivsize, 0);
- }
- kfree(rctx->biv);
+ for (i = 0; i < rctx->niv; i++) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ }
+
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, sf->biv, ivsize);
+ memzero_explicit(sf->biv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
}
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 319fe3279a71..657530578643 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
const char *name)
{
int flow = rctx->flow;
+ unsigned int ivlen = rctx->ivlen;
u32 v = SS_START;
int i;
@@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
mutex_lock(&ss->mlock);
writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
- if (i == 0) {
- if (rctx->p_iv)
- writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
- } else {
- if (rctx->biv) {
- if (rctx->op_dir == SS_ENCRYPTION)
- writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ if (ivlen) {
+ if (rctx->op_dir == SS_ENCRYPTION) {
+ if (i == 0)
+ writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
else
- writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
+ } else {
+ writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
}
}
@@ -464,7 +464,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
*/
static int allocate_flows(struct sun8i_ss_dev *ss)
{
- int i, err;
+ int i, j, err;
ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
GFP_KERNEL);
@@ -474,6 +474,18 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
for (i = 0; i < MAXFLOW; i++) {
init_completion(&ss->flows[i].complete);
+ ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].biv)
+ goto error_engine;
+
+ for (j = 0; j < MAX_SG; j++) {
+ ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].iv[j])
+ goto error_engine;
+ }
+
ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
if (!ss->flows[i].engine) {
dev_err(ss->dev, "Cannot allocate engine\n");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 1a71ed49d233..ca4f280af35d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -380,13 +380,21 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
}
len = areq->nbytes;
- for_each_sg(areq->src, sg, nr_sgs, i) {
+ sg = areq->src;
+ i = 0;
+ while (len > 0 && sg) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
rctx->t_src[i].addr = sg_dma_address(sg);
todo = min(len, sg_dma_len(sg));
rctx->t_src[i].len = todo / 4;
len -= todo;
rctx->t_dst[i].addr = addr_res;
rctx->t_dst[i].len = digestsize / 4;
+ sg = sg_next(sg);
+ i++;
}
if (len > 0) {
dev_err(ss->dev, "remaining len %d\n", len);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 28188685b910..57ada8653855 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -121,11 +121,15 @@ struct sginfo {
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
* @stat_req: number of request done by this flow
+ * @iv: list of IV to use for each step
+ * @biv: buffer which contain the backuped IV
*/
struct sun8i_ss_flow {
struct crypto_engine *engine;
struct completion complete;
int status;
+ u8 *iv[MAX_SG];
+ u8 *biv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
#endif
@@ -164,28 +168,28 @@ struct sun8i_ss_dev {
* @t_src: list of mapped SGs with their size
* @t_dst: list of mapped SGs with their size
* @p_key: DMA address of the key
- * @p_iv: DMA address of the IV
+ * @p_iv: DMA address of the IVs
+ * @niv: Number of IVs DMA mapped
* @method: current algorithm for this request
* @op_mode: op_mode for this request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of biv
+ * @ivlen: size of IVs
* @keylen: keylen for this request
- * @biv: buffer which contain the IV
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG];
u32 p_key;
- u32 p_iv;
+ u32 p_iv[MAX_SG];
+ int niv;
u32 method;
u32 op_mode;
u32 op_dir;
int flow;
unsigned int ivlen;
unsigned int keylen;
- void *biv;
struct skcipher_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 11e0278c8631..6140e4927322 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
req_ctx->mlli_params.mlli_dma_addr);
}
- dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
-
if (src != dst) {
- dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ } else {
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
}
}
@@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
int rc = 0;
u32 mapped_nents = 0;
+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
@@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
/* Map the src SGL */
- rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc)
goto cipher_exit;
@@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
- rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
@@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
@@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
else
size_for_map -= authsize;
- rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
&areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
@@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
size_to_map += authsize;
}
- rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->src, size_to_map,
+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
&areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index b739d3b873dc..c6f2fa753b7c 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -624,7 +624,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 32a036ada5d0..f418817c0f43 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -827,7 +827,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
goto err_out;
vas_init_rx_win_attr(&rxattr, coproc->ct);
- rxattr.rx_fifo = (void *)rx_fifo;
+ rxattr.rx_fifo = rx_fifo;
rxattr.rx_fifo_size = fifo_size;
rxattr.lnotify_lpid = lpid;
rxattr.lnotify_pid = pid;
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 9e560c7d4163..0ba62b286a85 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -161,21 +161,33 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
u32 legfuses;
u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
if ((straps | fuses) & ADF_POWERGATE_PKE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ if ((straps | fuses) & ADF_POWERGATE_DC)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index 756b0ddfac5e..2aaf02ccbb3a 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -111,6 +111,7 @@ do { \
(ADF_ARB_REG_SLOT * (index)), value)
/* Power gating */
+#define ADF_POWERGATE_DC BIT(23)
#define ADF_POWERGATE_PKE BIT(24)
/* WDT timers
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 0a9ce365a544..c2c73ee279b2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -86,17 +86,26 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
return capabilities;
}
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 11f30fd48c14..031b5f701a0a 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
+ break;
}
} while (currsize < max);
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index be1bf39a317d..90a920e7f664 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(crc->dev);
return ret;
+ }
spin_lock(&crc_list.lock);
list_del(&crc->list);
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 293857ebfd75..538e8dc74f40 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -477,6 +477,8 @@ static int rk3399_dmcfreq_remove(struct platform_device *pdev)
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
+ devfreq_event_disable_edev(dmcfreq->edev);
+
/*
* Before remove the opp table we need to unregister the opp notifier.
*/
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 15373afb356c..91bdcc1441fb 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -468,6 +468,7 @@ static inline int is_dma_buf_file(struct file *file)
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -477,6 +478,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index b9b2b4a4124e..033df43db0ce 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -369,10 +369,16 @@ int idxd_cdev_register(void)
rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
ictx[i].name);
if (rc)
- return rc;
+ goto err_free_chrdev_region;
}
return 0;
+
+err_free_chrdev_region:
+ for (i--; i >= 0; i--)
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+
+ return rc;
}
void idxd_cdev_remove(void)
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index c39e9483206a..29af898f3c24 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -78,6 +78,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
}
static struct dma_async_tx_descriptor *
+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
+ 0, 0, 0, desc->compl_dma, desc_flags);
+ desc->txd.flags = flags;
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{
@@ -181,10 +202,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels);
dma->dev = dev;
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
+ dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index f17a9ffcd00d..21a7bdc88970 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -40,7 +40,6 @@
STM32_MDMA_SHIFT(mask))
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
-#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
/* MDMA Channel x interrupt/status register */
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
@@ -196,7 +195,7 @@
#define STM32_MDMA_MAX_BUF_LEN 128
#define STM32_MDMA_MAX_BLOCK_LEN 65536
-#define STM32_MDMA_MAX_CHANNELS 63
+#define STM32_MDMA_MAX_CHANNELS 32
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
@@ -1345,26 +1344,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{
struct stm32_mdma_device *dmadev = devid;
- struct stm32_mdma_chan *chan = devid;
+ struct stm32_mdma_chan *chan;
u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
- if (status) {
- id = __ffs(status);
- } else {
- status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
- if (!status) {
- dev_dbg(mdma2dev(dmadev), "spurious it\n");
- return IRQ_NONE;
- }
- id = __ffs(status);
- /*
- * As GISR0 provides status for channel id from 0 to 31,
- * so GISR1 provides status for channel id from 32 to 62
- */
- id += 32;
+ if (!status) {
+ dev_dbg(mdma2dev(dmadev), "spurious it\n");
+ return IRQ_NONE;
}
+ id = __ffs(status);
chan = &dmadev->chan[id];
if (!chan) {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 97f02f8eb03a..5257bdbf77fb 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -232,7 +232,7 @@ struct zynqmp_dma_chan {
bool is_dmacoherent;
struct tasklet_struct tasklet;
bool idle;
- u32 desc_size;
+ size_t desc_size;
bool err;
u32 bus_width;
u32 src_burst_len;
@@ -489,7 +489,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
}
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
+ ZYNQMP_DMA_NUM_DESCS),
&chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index b8a7d9594afd..1fa5ca57e9ec 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -489,7 +489,7 @@ static int dmc520_edac_probe(struct platform_device *pdev)
dev = &pdev->dev;
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
- irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name);
+ irq = platform_get_irq_byname_optional(pdev, dmc520_irq_configs[idx].name);
irqs[idx] = irq;
masks[idx] = dmc520_irq_configs[idx].mask;
if (irq >= 0) {
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index fdb31954cf2b..8073bc7d3e61 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -375,8 +375,8 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
put_device(&adev->dev);
- if (!info->id_extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(info->id_extcon))
+ return PTR_ERR(info->id_extcon);
dev_info(dev, "controlling USB role\n");
} else {
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 5b9a3cf8df26..2a7874108df8 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -194,6 +194,13 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
return 0;
}
+static void ptn5150_work_sync_and_put(void *data)
+{
+ struct ptn5150_info *info = data;
+
+ cancel_work_sync(&info->irq_work);
+}
+
static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
@@ -284,6 +291,10 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c)
if (ret)
return -EINVAL;
+ ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
+ if (ret)
+ return ret;
+
/*
* Update current extcon state if for example OTG connection was there
* before the probe
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e7a9561a826d..f305503ec27e 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -863,6 +863,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability);
* @extcon_name: the extcon name provided with extcon_dev_register()
*
* Return the pointer of extcon device if success or ERR_PTR(err) if fail.
+ * NOTE: This function returns -EPROBE_DEFER so it may only be called from
+ * probe() functions.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -876,7 +878,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
if (!strcmp(sd->name, extcon_name))
goto out;
}
- sd = NULL;
+ sd = ERR_PTR(-EPROBE_DEFER);
out:
mutex_unlock(&extcon_dev_list_lock);
return sd;
@@ -1230,19 +1232,14 @@ int extcon_dev_register(struct extcon_dev *edev)
edev->dev.type = &edev->extcon_dev_type;
}
- ret = device_register(&edev->dev);
- if (ret) {
- put_device(&edev->dev);
- goto err_dev;
- }
-
spin_lock_init(&edev->lock);
- edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
- sizeof(*edev->nh), GFP_KERNEL);
- if (!edev->nh) {
- ret = -ENOMEM;
- device_unregister(&edev->dev);
- goto err_dev;
+ if (edev->max_supported) {
+ edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
+ GFP_KERNEL);
+ if (!edev->nh) {
+ ret = -ENOMEM;
+ goto err_alloc_nh;
+ }
}
for (index = 0; index < edev->max_supported; index++)
@@ -1253,6 +1250,12 @@ int extcon_dev_register(struct extcon_dev *edev)
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
+ ret = device_register(&edev->dev);
+ if (ret) {
+ put_device(&edev->dev);
+ goto err_dev;
+ }
+
mutex_lock(&extcon_dev_list_lock);
list_add(&edev->entry, &extcon_dev_list);
mutex_unlock(&extcon_dev_list_lock);
@@ -1261,6 +1264,9 @@ int extcon_dev_register(struct extcon_dev *edev)
err_dev:
if (edev->max_supported)
+ kfree(edev->nh);
+err_alloc_nh:
+ if (edev->max_supported)
kfree(edev->extcon_dev_type.groups);
err_alloc_groups:
if (edev->max_supported && edev->mutually_exclusive) {
@@ -1320,6 +1326,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
if (edev->max_supported) {
kfree(edev->extcon_dev_type.groups);
kfree(edev->cables);
+ kfree(edev->nh);
}
put_device(&edev->dev);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index c9fb56afbcb4..814d3bf32489 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -556,7 +556,7 @@ static int ffa_partition_info_get(const char *uuid_str,
return -ENODEV;
}
- count = ffa_partition_probe(&uuid_null, &pbuf);
+ count = ffa_partition_probe(&uuid, &pbuf);
if (count <= 0)
return -ENOENT;
@@ -645,8 +645,6 @@ static void ffa_setup_partitions(void)
__func__, tpbuf->id);
continue;
}
-
- ffa_dev_set_drvdata(ffa_dev, drv_info);
}
kfree(pbuf);
}
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index f5219334fd3a..3fe172c03c24 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -197,7 +197,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
break;
loop_num_ret = le32_to_cpu(*num_ret);
- if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
+ if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) {
dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
break;
}
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8b8127fa8955..4a93fb490cb4 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -603,7 +603,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
"%d-%d", dh->type, entry->instance);
if (*ret) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return;
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 53c7e3f8cfde..7dd0ac1a0cfc 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -941,17 +941,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
- size_t size = 0;
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
- size = pmem->size;
- break;
+ gen_pool_free(chan->ctrl->genpool,
+ (unsigned long)kaddr, pmem->size);
+ pmem->vaddr = NULL;
+ list_del(&pmem->node);
+ return;
}
- gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
- pmem->vaddr = NULL;
- list_del(&pmem->node);
+ list_del(&svc_data_mem);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index f98fa33e1679..e981e7a46fc1 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -653,10 +653,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio)
gpio->clks[1].id = "db";
err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
gpio->clks);
- if (err) {
- dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(gpio->dev, err,
+ "Cannot get APB/Debounce clocks\n");
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
if (err) {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index ad8822da7c27..1448dc874dfc 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long flags;
unsigned int on, off;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
if (val > UINT_MAX + 1ULL)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0cee7a40854a..d4a90aef618e 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1120,20 +1120,21 @@ static int pca953x_regcache_sync(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
+ u8 regaddr;
/*
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1141,16 +1142,18 @@ static int pca953x_regcache_sync(struct device *dev)
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
- ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
- PCAL953X_IN_LATCH + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
- PCAL953X_INT_MASK + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip));
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 24155c038f6d..22b8f0aa80f1 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/regmap.h>
#include "../pinctrl/core.h"
@@ -691,7 +692,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
struct device_node *pctlnp = of_get_parent(np);
struct pinctrl_dev *pctldev = NULL;
struct rockchip_pin_bank *bank = NULL;
- struct rockchip_pin_output_deferred *cfg;
+ struct rockchip_pin_deferred *cfg;
static int gpio;
int id, ret;
@@ -732,15 +733,22 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
return ret;
}
- while (!list_empty(&bank->deferred_output)) {
- cfg = list_first_entry(&bank->deferred_output,
- struct rockchip_pin_output_deferred, head);
+ while (!list_empty(&bank->deferred_pins)) {
+ cfg = list_first_entry(&bank->deferred_pins,
+ struct rockchip_pin_deferred, head);
list_del(&cfg->head);
- ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
- if (ret)
- dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
-
+ switch (cfg->param) {
+ case PIN_CONFIG_OUTPUT:
+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
+ if (ret)
+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
+ cfg->arg);
+ break;
+ default:
+ dev_warn(dev, "unknown deferred config param %d\n", cfg->param);
+ break;
+ }
kfree(cfg);
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a5b34c248767..66e434f59f60 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -933,6 +933,11 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!np)
return 0;
+ if (!of_property_read_bool(np, "gpio-ranges") &&
+ chip->of_gpio_ranges_fallback) {
+ return chip->of_gpio_ranges_fallback(chip, np);
+ }
+
group_names = of_find_property(np, group_names_propname, NULL);
for (;; index++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7e73ac6fb21d..2eebefd26fa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1411,9 +1411,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 0e12315fa0cb..98ac53ee6bb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1046,6 +1046,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
}
/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+}
+
+/**
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ab36cce59d2e..21c02f817a84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1828,9 +1828,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
return -EINVAL;
}
- /* delete kgd_mem from kfd_bo_list to avoid re-validating
- * this BO in BO's restoring after eviction.
- */
mutex_lock(&mem->process_info->lock);
ret = amdgpu_bo_reserve(bo, true);
@@ -1853,7 +1850,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
amdgpu_amdkfd_remove_eviction_fence(
bo, mem->process_info->eviction_fence);
- list_del_init(&mem->validate_list.head);
if (size)
*size = amdgpu_bo_size(bo);
@@ -2399,12 +2395,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Attach new eviction fence to all BOs */
+ /* Attach new eviction fence to all BOs except pinned ones */
list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
+ validate_list.head) {
+ if (mem->bo->tbo.pin_count)
+ continue;
+
amdgpu_bo_fence(mem->bo,
&process_info->eviction_fence->base, true);
-
+ }
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index aa823f154199..2fd4d8ad7e40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -115,7 +115,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
int ret;
if (cs->in.num_chunks == 0)
- return 0;
+ return -EINVAL;
chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2bd7b9fe6005..f65b4b233ffb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1949,6 +1949,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0, 0, 0}
@@ -2259,7 +2260,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- if (!adev->in_s0ix)
+ if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 86e2090bbd6e..57e9932d8a04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -314,7 +314,39 @@ static int psp_sw_init(void *handle)
}
}
+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+ amdgpu_sriov_vf(adev) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr,
+ &psp->fence_buf);
+ if (ret)
+ goto failed1;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+ if (ret)
+ goto failed2;
+
return 0;
+
+failed2:
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+failed1:
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+ return ret;
}
static int psp_sw_fini(void *handle)
@@ -344,6 +376,13 @@ static int psp_sw_fini(void *handle)
kfree(cmd);
cmd = NULL;
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+
return 0;
}
@@ -2580,51 +2619,18 @@ static int psp_load_fw(struct amdgpu_device *adev)
struct psp_context *psp = &adev->psp;
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
- psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
- goto skip_memalloc;
- }
-
- if (amdgpu_sriov_vf(adev)) {
- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
+ /* should not destroy ring, only stop */
+ psp_ring_stop(psp, PSP_RING_TYPE__KM);
} else {
- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
- }
-
- if (ret)
- goto failed;
-
- ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->fence_buf_bo,
- &psp->fence_buf_mc_addr,
- &psp->fence_buf);
- if (ret)
- goto failed;
-
- ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
- (void **)&psp->cmd_buf_mem);
- if (ret)
- goto failed;
+ memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
- memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
-
- ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
- if (ret) {
- DRM_ERROR("PSP ring init failed!\n");
- goto failed;
+ ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
+ if (ret) {
+ DRM_ERROR("PSP ring init failed!\n");
+ goto failed;
+ }
}
-skip_memalloc:
ret = psp_hw_start(psp);
if (ret)
goto failed;
@@ -2730,13 +2736,6 @@ static int psp_hw_fini(void *handle)
psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
- amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
- (void **)&psp->cmd_buf_mem);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index abd8469380e5..0ed0736d515a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -723,8 +723,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
{
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
- amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
&adev->firmware.fw_buf_ptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 01efda4398e5..9cbed9a8f1c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs yc_video_codecs_decode = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e37948c15769..9014f71d52dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -770,8 +770,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ "lower_32_bits(ring->wptr << 2) == 0x%08x "
+ "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 50bf3b71bc93..0f75864365d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -400,8 +400,8 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ "lower_32_bits(ring->wptr << 2) == 0x%08x "
+ "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
@@ -782,9 +782,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr) << 2);
+ lower_32_bits(ring->wptr << 2));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr) << 2);
+ upper_32_bits(ring->wptr << 2));
}
doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index e32efcfb0c8b..f643b977b5f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -287,8 +287,8 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ "lower_32_bits(ring->wptr << 2) == 0x%08x "
+ "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
@@ -660,8 +660,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 830809b694dd..74e6f613be02 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2181,6 +2181,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
if (range->event == MMU_NOTIFY_RELEASE)
return true;
+ if (!mmget_not_zero(mni->mm))
+ return true;
start = mni->interval_tree.start;
last = mni->interval_tree.last;
@@ -2207,6 +2209,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
}
svm_range_unlock(prange);
+ mmput(mni->mm);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ec75613618b1..d2aecf7bf66b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -989,6 +989,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
@@ -2268,9 +2294,7 @@ static int dm_resume(void *handle)
amdgpu_dm_outbox_init(adev);
/* Before powering on DC we need to re-initialize DMUB. */
- r = dm_dmub_hw_init(adev);
- if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ dm_dmub_hw_resume(adev);
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -2417,7 +2441,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
- u32 max_cll, min_cll, max, min, q, r;
+ u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
@@ -2447,7 +2471,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[i];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
@@ -2475,8 +2499,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
* The results of the above expressions can be verified at
* pre_computed_values.
*/
- q = max_cll >> 5;
- r = max_cll % 32;
+ q = max_avg >> 5;
+ r = max_avg % 32;
max = (1 << q) * pre_computed_values[r];
// min luminance: maxLum * (CV/255)^2 / 100
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 2c7eb982eabc..054823d12403 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1013,9 +1013,12 @@ static bool get_pixel_clk_frequency_100hz(
* not be programmed equal to DPREFCLK
*/
modulo_hz = REG_READ(MODULO[inst]);
- *pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
- modulo_hz);
+ if (modulo_hz)
+ *pixel_clk_khz = div_u64((uint64_t)clock_hz*
+ clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ modulo_hz);
+ else
+ *pixel_clk_khz = 0;
} else {
/* NOTE: There is agreement with VBIOS here that MODULO is
* programmed equal to DPREFCLK, in which case PHASE will be
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index b0892443fbd5..c7c27a605f15 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -168,9 +168,7 @@ void enc31_hw_init(struct link_encoder *enc)
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
- AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
-
- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index a5ef9d5e7685..b60ab3cc0f11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -957,6 +957,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.optc = false,
}
},
+ .disable_z10 = true,
.optimize_edp_link_rate = true,
.enable_sw_cntl_psr = true,
};
@@ -1293,12 +1294,6 @@ static struct stream_encoder *dcn31_stream_encoder_create(
if (!enc1 || !vpg || !afmt)
return NULL;
- if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
- ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
- if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
- eng_id = eng_id + 3; // For B0 only. C->F, D->G.
- }
-
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
index bcae42cef374..6ba4c2ae69a6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
@@ -1609,19 +1609,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
{
- u8 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].clk >= 0) /* XXX */
- break;
- }
-
- if (i >= table->count)
- i = table->count - 1;
-
- return i;
+ return 0;
}
static void kv_update_acp_boot_level(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index 81f82aa05ec2..66fc63f1f1c1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -7247,17 +7247,15 @@ static int si_parse_power_table(struct amdgpu_device *adev)
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
+ for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
non_clock_info,
@@ -7279,8 +7277,8 @@ static int si_parse_power_table(struct amdgpu_device *adev)
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ adev->pm.dpm.num_ps++;
}
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 87b055466a33..e6c93396434f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -772,7 +772,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
goto failed;
}
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 145f13b8c977..138466081875 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1127,6 +1127,39 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
return size;
}
+static void renoir_get_ss_power_percent(SmuMetrics_t *metrics,
+ uint32_t *apu_percent, uint32_t *dgpu_percent)
+{
+ uint32_t apu_boost = 0;
+ uint32_t dgpu_boost = 0;
+ uint16_t apu_limit = 0;
+ uint16_t dgpu_limit = 0;
+ uint16_t apu_power = 0;
+ uint16_t dgpu_power = 0;
+
+ apu_power = metrics->ApuPower;
+ apu_limit = metrics->StapmOriginalLimit;
+ if (apu_power > apu_limit && apu_limit != 0)
+ apu_boost = ((apu_power - apu_limit) * 100) / apu_limit;
+ apu_boost = (apu_boost > 100) ? 100 : apu_boost;
+
+ dgpu_power = metrics->dGpuPower;
+ if (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit)
+ dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOriginalLimit;
+ if (dgpu_power > dgpu_limit && dgpu_limit != 0)
+ dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit;
+ dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost;
+
+ if (dgpu_boost >= apu_boost)
+ apu_boost = 0;
+ else
+ dgpu_boost = 0;
+
+ *apu_percent = apu_boost;
+ *dgpu_percent = dgpu_boost;
+}
+
+
static int renoir_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
@@ -1135,6 +1168,9 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
+ uint32_t apu_percent = 0;
+ uint32_t dgpu_percent = 0;
+
mutex_lock(&smu->metrics_lock);
@@ -1183,26 +1219,18 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->Voltage[1];
break;
case METRICS_SS_APU_SHARE:
- /* return the percentage of APU power with respect to APU's power limit.
- * percentage is reported, this isn't boost value. Smartshift power
- * boost/shift is only when the percentage is more than 100.
+ /* return the percentage of APU power boost
+ * with respect to APU's power limit.
*/
- if (metrics->StapmOriginalLimit > 0)
- *value = (metrics->ApuPower * 100) / metrics->StapmOriginalLimit;
- else
- *value = 0;
+ renoir_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
+ *value = apu_percent;
break;
case METRICS_SS_DGPU_SHARE:
- /* return the percentage of dGPU power with respect to dGPU's power limit.
- * percentage is reported, this isn't boost value. Smartshift power
- * boost/shift is only when the percentage is more than 100.
+ /* return the percentage of dGPU power boost
+ * with respect to dGPU's power limit.
*/
- if ((metrics->dGpuPower > 0) &&
- (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit))
- *value = (metrics->dGpuPower * 100) /
- (metrics->StapmCurrentLimit - metrics->StapmOriginalLimit);
- else
- *value = 0;
+ renoir_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
+ *value = dgpu_percent;
break;
default:
*value = UINT_MAX;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index c9cfeb094750..d0c6b864d00a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1627,6 +1627,7 @@ static const struct throttling_logging_label {
uint32_t feature_mask;
const char *label;
} logging_label[] = {
+ {(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 8d4aa16b2ae7..6211570fb64f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -721,7 +721,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 0e1a843608e4..33bd5430c6de 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -305,6 +305,42 @@ static int yellow_carp_mode2_reset(struct smu_context *smu)
return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2);
}
+
+static void yellow_carp_get_ss_power_percent(SmuMetrics_t *metrics,
+ uint32_t *apu_percent, uint32_t *dgpu_percent)
+{
+ uint32_t apu_boost = 0;
+ uint32_t dgpu_boost = 0;
+ uint16_t apu_limit = 0;
+ uint16_t dgpu_limit = 0;
+ uint16_t apu_power = 0;
+ uint16_t dgpu_power = 0;
+
+ /* APU and dGPU power values are reported in milli Watts
+ * and STAPM power limits are in Watts */
+ apu_power = metrics->ApuPower/1000;
+ apu_limit = metrics->StapmOpnLimit;
+ if (apu_power > apu_limit && apu_limit != 0)
+ apu_boost = ((apu_power - apu_limit) * 100) / apu_limit;
+ apu_boost = (apu_boost > 100) ? 100 : apu_boost;
+
+ dgpu_power = metrics->dGpuPower/1000;
+ if (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)
+ dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOpnLimit;
+ if (dgpu_power > dgpu_limit && dgpu_limit != 0)
+ dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit;
+ dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost;
+
+ if (dgpu_boost >= apu_boost)
+ apu_boost = 0;
+ else
+ dgpu_boost = 0;
+
+ *apu_percent = apu_boost;
+ *dgpu_percent = dgpu_boost;
+
+}
+
static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
@@ -313,6 +349,8 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
+ uint32_t apu_percent = 0;
+ uint32_t dgpu_percent = 0;
mutex_lock(&smu->metrics_lock);
@@ -365,26 +403,18 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->Voltage[1];
break;
case METRICS_SS_APU_SHARE:
- /* return the percentage of APU power with respect to APU's power limit.
- * percentage is reported, this isn't boost value. Smartshift power
- * boost/shift is only when the percentage is more than 100.
+ /* return the percentage of APU power boost
+ * with respect to APU's power limit.
*/
- if (metrics->StapmOpnLimit > 0)
- *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
- else
- *value = 0;
+ yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
+ *value = apu_percent;
break;
case METRICS_SS_DGPU_SHARE:
- /* return the percentage of dGPU power with respect to dGPU's power limit.
- * percentage is reported, this isn't boost value. Smartshift power
- * boost/shift is only when the percentage is more than 100.
+ /* return the percentage of dGPU power boost
+ * with respect to dGPU's power limit.
*/
- if ((metrics->dGpuPower > 0) &&
- (metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
- *value = (metrics->dGpuPower * 100) /
- (metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
- else
- *value = 0;
+ yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
+ *value = dgpu_percent;
break;
default:
*value = UINT_MAX;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index d63d83800a8a..517b94c3bcaf 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -265,6 +265,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
layer->layer_type, &n_formats);
+ if (!formats) {
+ kfree(kplane);
+ return -ENOMEM;
+ }
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
@@ -275,8 +279,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
komeda_put_fourcc_list(formats);
- if (err)
- goto cleanup;
+ if (err) {
+ kfree(kplane);
+ return err;
+ }
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 55890334385d..081beb102150 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -487,7 +487,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
malidp_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index d5c98f79d58d..08ed0d08d03b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -474,7 +474,10 @@ static void ast_set_color_reg(struct ast_private *ast,
static void ast_set_crtthd_reg(struct ast_private *ast)
{
/* Set Threshold */
- if (ast->chip == AST2300 || ast->chip == AST2400 ||
+ if (ast->chip == AST2600) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
+ } else if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index fe28b869b9b7..66737d15a436 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1408,6 +1408,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return 0;
err_unregister_cec:
+ cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index b7d2e4449cfa..873cf6882bd3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1269,6 +1269,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
}
static
+struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
+ struct drm_atomic_state *state)
+{
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_old_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
struct drm_atomic_state *state)
{
@@ -1448,14 +1467,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
{
struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_crtc *crtc;
+ struct drm_crtc *old_crtc, *new_crtc;
+ struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
- crtc = analogix_dp_get_new_crtc(dp, old_state);
- if (!crtc)
+ new_crtc = analogix_dp_get_new_crtc(dp, old_state);
+ if (!new_crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
if (!new_crtc_state)
goto out;
@@ -1464,6 +1485,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
return;
out:
+ old_crtc = analogix_dp_get_old_crtc(dp, old_state);
+ if (old_crtc) {
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
+ old_crtc);
+
+ /* When moving from PSR to fully disabled, exit PSR first. */
+ if (old_crtc_state && old_crtc_state->self_refresh_active) {
+ ret = analogix_dp_disable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to disable psr (%d)\n", ret);
+ }
+ }
+
analogix_dp_bridge_disable(bridge);
}
@@ -1632,8 +1666,19 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct analogix_dp_device *dp = to_dp(aux);
+ int ret;
+
+ pm_runtime_get_sync(dp->dev);
- return analogix_dp_transfer(dp, msg);
+ ret = analogix_dp_detect_hpd(dp);
+ if (ret)
+ goto out;
+
+ ret = analogix_dp_transfer(dp, msg);
+out:
+ pm_runtime_put(dp->dev);
+
+ return ret;
}
struct analogix_dp_device *
@@ -1698,8 +1743,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dp->reg_base))
- return ERR_CAST(dp->reg_base);
+ if (IS_ERR(dp->reg_base)) {
+ ret = PTR_ERR(dp->reg_base);
+ goto err_disable_clk;
+ }
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1711,7 +1758,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (IS_ERR(dp->hpd_gpiod)) {
dev_err(dev, "error getting HDP GPIO: %ld\n",
PTR_ERR(dp->hpd_gpiod));
- return ERR_CAST(dp->hpd_gpiod);
+ ret = PTR_ERR(dp->hpd_gpiod);
+ goto err_disable_clk;
}
if (dp->hpd_gpiod) {
@@ -1731,7 +1779,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- return ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_disable_clk;
}
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1740,11 +1789,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- return ERR_PTR(ret);
+ goto err_disable_clk;
}
disable_irq(dp->irq);
return dp;
+
+err_disable_clk:
+ clk_disable_unprepare(dp->clock);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(analogix_dp_probe);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index a6151db95586..d7eedf35e841 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -14,8 +14,19 @@
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
-#include <video/mipi_display.h>
-
+#define VENDOR_ID 0x00
+#define DEVICE_ID_H 0x01
+#define DEVICE_ID_L 0x02
+#define VERSION_ID 0x03
+#define FIRMWARE_VERSION 0x08
+#define CONFIG_FINISH 0x09
+#define PD_CTRL(n) (0x0a + ((n) & 0x3)) /* 0..3 */
+#define RST_CTRL(n) (0x0e + ((n) & 0x1)) /* 0..1 */
+#define SYS_CTRL(n) (0x10 + ((n) & 0x7)) /* 0..4 */
+#define RGB_DRV(n) (0x18 + ((n) & 0x3)) /* 0..3 */
+#define RGB_DLY(n) (0x1c + ((n) & 0x1)) /* 0..1 */
+#define RGB_TEST_CTRL 0x1e
+#define ATE_PLL_EN 0x1f
#define HACTIVE_LI 0x20
#define VACTIVE_LI 0x21
#define VACTIVE_HACTIVE_HI 0x22
@@ -23,9 +34,101 @@
#define HSYNC_LI 0x24
#define HBP_LI 0x25
#define HFP_HSW_HBP_HI 0x26
+#define HFP_HSW_HBP_HI_HFP(n) (((n) & 0x300) >> 4)
+#define HFP_HSW_HBP_HI_HS(n) (((n) & 0x300) >> 6)
+#define HFP_HSW_HBP_HI_HBP(n) (((n) & 0x300) >> 8)
#define VFP 0x27
#define VSYNC 0x28
#define VBP 0x29
+#define BIST_POL 0x2a
+#define BIST_POL_BIST_MODE(n) (((n) & 0xf) << 4)
+#define BIST_POL_BIST_GEN BIT(3)
+#define BIST_POL_HSYNC_POL BIT(2)
+#define BIST_POL_VSYNC_POL BIT(1)
+#define BIST_POL_DE_POL BIT(0)
+#define BIST_RED 0x2b
+#define BIST_GREEN 0x2c
+#define BIST_BLUE 0x2d
+#define BIST_CHESS_X 0x2e
+#define BIST_CHESS_Y 0x2f
+#define BIST_CHESS_XY_H 0x30
+#define BIST_FRAME_TIME_L 0x31
+#define BIST_FRAME_TIME_H 0x32
+#define FIFO_MAX_ADDR_LOW 0x33
+#define SYNC_EVENT_DLY 0x34
+#define HSW_MIN 0x35
+#define HFP_MIN 0x36
+#define LOGIC_RST_NUM 0x37
+#define OSC_CTRL(n) (0x48 + ((n) & 0x7)) /* 0..5 */
+#define BG_CTRL 0x4e
+#define LDO_PLL 0x4f
+#define PLL_CTRL(n) (0x50 + ((n) & 0xf)) /* 0..15 */
+#define PLL_CTRL_6_EXTERNAL 0x90
+#define PLL_CTRL_6_MIPI_CLK 0x92
+#define PLL_CTRL_6_INTERNAL 0x93
+#define PLL_REM(n) (0x60 + ((n) & 0x3)) /* 0..2 */
+#define PLL_DIV(n) (0x63 + ((n) & 0x3)) /* 0..2 */
+#define PLL_FRAC(n) (0x66 + ((n) & 0x3)) /* 0..2 */
+#define PLL_INT(n) (0x69 + ((n) & 0x1)) /* 0..1 */
+#define PLL_REF_DIV 0x6b
+#define PLL_REF_DIV_P(n) ((n) & 0xf)
+#define PLL_REF_DIV_Pe BIT(4)
+#define PLL_REF_DIV_S(n) (((n) & 0x7) << 5)
+#define PLL_SSC_P(n) (0x6c + ((n) & 0x3)) /* 0..2 */
+#define PLL_SSC_STEP(n) (0x6f + ((n) & 0x3)) /* 0..2 */
+#define PLL_SSC_OFFSET(n) (0x72 + ((n) & 0x3)) /* 0..3 */
+#define GPIO_OEN 0x79
+#define MIPI_CFG_PW 0x7a
+#define MIPI_CFG_PW_CONFIG_DSI 0xc1
+#define MIPI_CFG_PW_CONFIG_I2C 0x3e
+#define GPIO_SEL(n) (0x7b + ((n) & 0x1)) /* 0..1 */
+#define IRQ_SEL 0x7d
+#define DBG_SEL 0x7e
+#define DBG_SIGNAL 0x7f
+#define MIPI_ERR_VECTOR_L 0x80
+#define MIPI_ERR_VECTOR_H 0x81
+#define MIPI_ERR_VECTOR_EN_L 0x82
+#define MIPI_ERR_VECTOR_EN_H 0x83
+#define MIPI_MAX_SIZE_L 0x84
+#define MIPI_MAX_SIZE_H 0x85
+#define DSI_CTRL 0x86
+#define DSI_CTRL_UNKNOWN 0x28
+#define DSI_CTRL_DSI_LANES(n) ((n) & 0x3)
+#define MIPI_PN_SWAP 0x87
+#define MIPI_PN_SWAP_CLK BIT(4)
+#define MIPI_PN_SWAP_D(n) BIT((n) & 0x3)
+#define MIPI_SOT_SYNC_BIT_(n) (0x88 + ((n) & 0x1)) /* 0..1 */
+#define MIPI_ULPS_CTRL 0x8a
+#define MIPI_CLK_CHK_VAR 0x8e
+#define MIPI_CLK_CHK_INI 0x8f
+#define MIPI_T_TERM_EN 0x90
+#define MIPI_T_HS_SETTLE 0x91
+#define MIPI_T_TA_SURE_PRE 0x92
+#define MIPI_T_LPX_SET 0x94
+#define MIPI_T_CLK_MISS 0x95
+#define MIPI_INIT_TIME_L 0x96
+#define MIPI_INIT_TIME_H 0x97
+#define MIPI_T_CLK_TERM_EN 0x99
+#define MIPI_T_CLK_SETTLE 0x9a
+#define MIPI_TO_HS_RX_L 0x9e
+#define MIPI_TO_HS_RX_H 0x9f
+#define MIPI_PHY_(n) (0xa0 + ((n) & 0x7)) /* 0..5 */
+#define MIPI_PD_RX 0xb0
+#define MIPI_PD_TERM 0xb1
+#define MIPI_PD_HSRX 0xb2
+#define MIPI_PD_LPTX 0xb3
+#define MIPI_PD_LPRX 0xb4
+#define MIPI_PD_CK_LANE 0xb5
+#define MIPI_FORCE_0 0xb6
+#define MIPI_RST_CTRL 0xb7
+#define MIPI_RST_NUM 0xb8
+#define MIPI_DBG_SET_(n) (0xc0 + ((n) & 0xf)) /* 0..9 */
+#define MIPI_DBG_SEL 0xe0
+#define MIPI_DBG_DATA 0xe1
+#define MIPI_ATE_TEST_SEL 0xe2
+#define MIPI_ATE_STATUS_(n) (0xe3 + ((n) & 0x1)) /* 0..1 */
+#define MIPI_ATE_STATUS_1 0xe4
+#define ICN6211_MAX_REGISTER MIPI_ATE_STATUS(1)
struct chipone {
struct device *dev;
@@ -65,14 +168,15 @@ static void chipone_enable(struct drm_bridge *bridge)
{
struct chipone *icn = bridge_to_chipone(bridge);
struct drm_display_mode *mode = bridge_to_mode(bridge);
+ u16 hfp, hbp, hsync;
- ICN6211_DSI(icn, 0x7a, 0xc1);
+ ICN6211_DSI(icn, MIPI_CFG_PW, MIPI_CFG_PW_CONFIG_DSI);
ICN6211_DSI(icn, HACTIVE_LI, mode->hdisplay & 0xff);
ICN6211_DSI(icn, VACTIVE_LI, mode->vdisplay & 0xff);
- /**
+ /*
* lsb nibble: 2nd nibble of hdisplay
* msb nibble: 2nd nibble of vdisplay
*/
@@ -80,13 +184,18 @@ static void chipone_enable(struct drm_bridge *bridge)
((mode->hdisplay >> 8) & 0xf) |
(((mode->vdisplay >> 8) & 0xf) << 4));
- ICN6211_DSI(icn, HFP_LI, mode->hsync_start - mode->hdisplay);
-
- ICN6211_DSI(icn, HSYNC_LI, mode->hsync_end - mode->hsync_start);
-
- ICN6211_DSI(icn, HBP_LI, mode->htotal - mode->hsync_end);
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsync = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
- ICN6211_DSI(icn, HFP_HSW_HBP_HI, 0x00);
+ ICN6211_DSI(icn, HFP_LI, hfp & 0xff);
+ ICN6211_DSI(icn, HSYNC_LI, hsync & 0xff);
+ ICN6211_DSI(icn, HBP_LI, hbp & 0xff);
+ /* Top two bits of Horizontal Front porch/Sync/Back porch */
+ ICN6211_DSI(icn, HFP_HSW_HBP_HI,
+ HFP_HSW_HBP_HI_HFP(hfp) |
+ HFP_HSW_HBP_HI_HS(hsync) |
+ HFP_HSW_HBP_HI_HBP(hbp));
ICN6211_DSI(icn, VFP, mode->vsync_start - mode->vdisplay);
@@ -95,21 +204,21 @@ static void chipone_enable(struct drm_bridge *bridge)
ICN6211_DSI(icn, VBP, mode->vtotal - mode->vsync_end);
/* dsi specific sequence */
- ICN6211_DSI(icn, MIPI_DCS_SET_TEAR_OFF, 0x80);
- ICN6211_DSI(icn, MIPI_DCS_SET_ADDRESS_MODE, 0x28);
- ICN6211_DSI(icn, 0xb5, 0xa0);
- ICN6211_DSI(icn, 0x5c, 0xff);
- ICN6211_DSI(icn, MIPI_DCS_SET_COLUMN_ADDRESS, 0x01);
- ICN6211_DSI(icn, MIPI_DCS_GET_POWER_SAVE, 0x92);
- ICN6211_DSI(icn, 0x6b, 0x71);
- ICN6211_DSI(icn, 0x69, 0x2b);
- ICN6211_DSI(icn, MIPI_DCS_ENTER_SLEEP_MODE, 0x40);
- ICN6211_DSI(icn, MIPI_DCS_EXIT_SLEEP_MODE, 0x98);
+ ICN6211_DSI(icn, SYNC_EVENT_DLY, 0x80);
+ ICN6211_DSI(icn, HFP_MIN, hfp & 0xff);
+ ICN6211_DSI(icn, MIPI_PD_CK_LANE, 0xa0);
+ ICN6211_DSI(icn, PLL_CTRL(12), 0xff);
+ ICN6211_DSI(icn, BIST_POL, BIST_POL_DE_POL);
+ ICN6211_DSI(icn, PLL_CTRL(6), PLL_CTRL_6_MIPI_CLK);
+ ICN6211_DSI(icn, PLL_REF_DIV, 0x71);
+ ICN6211_DSI(icn, PLL_INT(0), 0x2b);
+ ICN6211_DSI(icn, SYS_CTRL(0), 0x40);
+ ICN6211_DSI(icn, SYS_CTRL(1), 0x98);
/* icn6211 specific sequence */
- ICN6211_DSI(icn, 0xb6, 0x20);
- ICN6211_DSI(icn, 0x51, 0x20);
- ICN6211_DSI(icn, 0x09, 0x10);
+ ICN6211_DSI(icn, MIPI_FORCE_0, 0x20);
+ ICN6211_DSI(icn, PLL_CTRL(1), 0x20);
+ ICN6211_DSI(icn, CONFIG_FINISH, 0x10);
usleep_range(10000, 11000);
}
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 06b59b422c69..64912b770086 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -227,7 +227,7 @@ static const struct regmap_range_cfg it66121_regmap_banks[] = {
.selector_mask = 0x1,
.selector_shift = 0,
.window_start = 0x00,
- .window_len = 0x130,
+ .window_len = 0x100,
},
};
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index a32f70bc68ea..c901c0e1a3b0 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -608,10 +608,14 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ctx->host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
- return -EINVAL;
- if (!ctx->host_node)
- return -ENODEV;
+ if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) {
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+ if (!ctx->host_node) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -638,16 +642,22 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
if (ret < 0)
- return ret;
+ goto err_put_node;
if (panel) {
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ if (IS_ERR(panel_bridge)) {
+ ret = PTR_ERR(panel_bridge);
+ goto err_put_node;
+ }
}
ctx->panel_bridge = panel_bridge;
return 0;
+
+err_put_node:
+ of_node_put(ctx->host_node);
+ return ret;
}
static int sn65dsi83_probe(struct i2c_client *client,
@@ -680,8 +690,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
return ret;
ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
- if (IS_ERR(ctx->regmap))
- return PTR_ERR(ctx->regmap);
+ if (IS_ERR(ctx->regmap)) {
+ ret = PTR_ERR(ctx->regmap);
+ goto err_put_node;
+ }
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
@@ -691,6 +703,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
drm_bridge_add(&ctx->bridge);
return 0;
+
+err_put_node:
+ of_node_put(ctx->host_node);
+ return ret;
}
static int sn65dsi83_remove(struct i2c_client *client)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ff2bc9a11801..aa09a19fae37 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -996,9 +996,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
return drm_atomic_crtc_effectively_active(old_state);
/*
- * We need to run through the crtc_funcs->disable() function if the CRTC
- * is currently on, if it's transitioning to self refresh mode, or if
- * it's in self refresh mode and needs to be fully disabled.
+ * We need to disable bridge(s) and CRTC if we're transitioning out of
+ * self-refresh and changing CRTCs at the same time, because the
+ * bridge tracks self-refresh status via CRTC state.
+ */
+ if (old_state->self_refresh_active &&
+ old_state->crtc != new_state->crtc)
+ return true;
+
+ /*
+ * We also need to run through the crtc_funcs->disable() function if
+ * the CRTC is currently on, if it's transitioning to self refresh
+ * mode, or if it's in self refresh mode and needs to be fully
+ * disabled.
*/
return old_state->active ||
(old_state->self_refresh_active && !new_state->active) ||
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 791379816837..4f20137ef21d 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -369,8 +369,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc);
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
- if (bridge_connector->bridge_hpd)
+ if (bridge_connector->bridge_hpd) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
+ drm_bridge_connector_enable_hpd(connector);
+ }
else if (bridge_connector->bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 86d13d6bc463..b3e3babe18c0 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4834,6 +4834,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
+ kfree(mst_edid);
}
/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ee6f44f9a81c..6ab048ba8021 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1994,9 +1994,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
connector_bad_edid(connector, edid, edid[0x7e] + 1);
- edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
- edid[0x7e] = valid_extensions;
-
new = kmalloc_array(valid_extensions + 1, EDID_LENGTH,
GFP_KERNEL);
if (!new)
@@ -2013,6 +2010,9 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
base += EDID_LENGTH;
}
+ new[EDID_LENGTH - 1] += new[0x7e] - valid_extensions;
+ new[0x7e] = valid_extensions;
+
kfree(edid);
edid = new;
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 82afb854141b..fd0bf90fb4c2 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -249,6 +249,13 @@ static int __drm_universal_plane_init(struct drm_device *dev,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ /*
+ * First driver to need more than 64 formats needs to fix this. Each
+ * format is encoded as a bit and the current code only supports a u64.
+ */
+ if (WARN_ON(format_count > 64))
+ return -EINVAL;
+
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
@@ -270,13 +277,6 @@ static int __drm_universal_plane_init(struct drm_device *dev,
return -ENOMEM;
}
- /*
- * First driver to need more than 64 formats needs to fix this. Each
- * format is encoded as a bit and the current code only supports a u64.
- */
- if (WARN_ON(format_count > 64))
- return -EINVAL;
-
if (format_modifiers) {
const uint64_t *temp_modifiers = format_modifiers;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 9fb1a2aadbcb..aabb997a74eb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -286,6 +286,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
mutex_lock(&context->lock);
+ /* Bail if the mapping has been reaped by another thread */
+ if (!mapping->context) {
+ mutex_unlock(&context->lock);
+ return;
+ }
+
/* If the vram node is on the mm, unmap and remove the node */
if (mapping->vram_node.mm == &context->mm)
etnaviv_iommu_remove_mapping(context, mapping);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 359606429316..6a578078e02f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -536,14 +536,15 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
- struct drm_crtc *crtc = NULL;
+ struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
if (gma_crtc->pipe == pipe)
- break;
+ return crtc;
}
- return crtc;
+ return NULL;
}
int gma_connector_clones(struct drm_device *dev, int type_mask)
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 73076737add7..0e04d4dd1c13 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -375,6 +375,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
}
}
+static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
+ const u32 *mmioaddr, u32 mmio_count,
+ int header_ver, u8 dmc_id)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ u32 start_range, end_range;
+ int i;
+
+ if (dmc_id >= DMC_FW_MAX) {
+ drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
+ return false;
+ }
+
+ if (header_ver == 1) {
+ start_range = DMC_MMIO_START_RANGE;
+ end_range = DMC_MMIO_END_RANGE;
+ } else if (dmc_id == DMC_FW_MAIN) {
+ start_range = TGL_MAIN_MMIO_START;
+ end_range = TGL_MAIN_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 13) {
+ start_range = ADLP_PIPE_MMIO_START;
+ end_range = ADLP_PIPE_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ start_range = TGL_PIPE_MMIO_START(dmc_id);
+ end_range = TGL_PIPE_MMIO_END(dmc_id);
+ } else {
+ drm_warn(&i915->drm, "Unknown mmio range for sanity check");
+ return false;
+ }
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
+ return false;
+ }
+
+ return true;
+}
+
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
@@ -444,6 +482,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
+ dmc_header->header_ver, dmc_id)) {
+ drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
+ return 0;
+ }
+
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index c2a2cd1f84dc..0a88088a11e8 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -121,9 +121,25 @@ struct i2c_adapter_lookup {
#define ICL_GPIO_DDPA_CTRLCLK_2 8
#define ICL_GPIO_DDPA_CTRLDATA_2 9
-static enum port intel_dsi_seq_port_to_port(u8 port)
+static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
+ u8 seq_port)
{
- return port ? PORT_C : PORT_A;
+ /*
+ * If single link DSI is being used on any port, the VBT sequence block
+ * send packet apparently always has 0 for the port. Just use the port
+ * we have configured, and ignore the sequence block port.
+ */
+ if (hweight8(intel_dsi->ports) == 1)
+ return ffs(intel_dsi->ports) - 1;
+
+ if (seq_port) {
+ if (intel_dsi->ports & PORT_B)
+ return PORT_B;
+ else if (intel_dsi->ports & PORT_C)
+ return PORT_C;
+ }
+
+ return PORT_A;
}
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
@@ -145,15 +161,10 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
- /* For DSI single link on Port A & C, the seq_port value which is
- * parsed from Sequence Block#53 of VBT has been set to 0
- * Now, read/write of packets for the DSI single link on Port A and
- * Port C will based on the DVO port from VBT block 2.
- */
- if (intel_dsi->ports == (1 << PORT_C))
- port = PORT_C;
- else
- port = intel_dsi_seq_port_to_port(seq_port);
+ port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
+
+ if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 08c20869d7e9..f7f49b69830f 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -376,21 +376,6 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
return -EINVAL;
}
- /*
- * The port numbering and mapping here is bizarre. The now-obsolete
- * swsci spec supports ports numbered [0..4]. Port E is handled as a
- * special case, but port F and beyond are not. The functionality is
- * supposed to be obsolete for new platforms. Just bail out if the port
- * number is out of bounds after mapping.
- */
- if (port > 4) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
- intel_encoder->base.base.id, intel_encoder->base.name,
- port_name(intel_encoder->port), port);
- return -EINVAL;
- }
-
if (!enable)
parm |= 4 << 8;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2f01b8c0284c..f3c8f87d25ae 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4008,8 +4008,8 @@ addr_err:
return ERR_PTR(err);
}
-static ssize_t show_dynamic_id(struct device *dev,
- struct device_attribute *attr,
+static ssize_t show_dynamic_id(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
struct i915_oa_config *oa_config =
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index aa14354a5120..f682c7a6474d 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -55,7 +55,7 @@ struct i915_oa_config {
struct attribute_group sysfs_metric;
struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
+ struct kobj_attribute sysfs_metric_id;
struct kref ref;
struct rcu_head rcu;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bb64e7baa1cc..3c70aa5229e5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7818,6 +7818,22 @@ enum {
/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
#define DMC_MMIO_START_RANGE 0x80000
#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
+
+#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
+ _TGL_PIPEB_MMIO_START)
+
+#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
+ _TGL_PIPEB_MMIO_END)
+
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cdf0e9c6fd73..313c0000a814 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -445,7 +445,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
- ssize_t ret;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
@@ -457,8 +464,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
const char *str = "No error state collected\n";
size_t len = strlen(str);
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
}
return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 57c1dda76b94..1a27e4833adf 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2863,7 +2863,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[8])
+ u16 wm[])
{
struct intel_uncore *uncore = &dev_priv->uncore;
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
index adcc381be200..cf35c2e10097 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
@@ -70,7 +70,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index e9cef5c0c8f7..cdfa648910b2 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -85,7 +85,7 @@ static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
u32 tmp = readl(cec->regs + offset) & ~mask;
tmp |= val & mask;
- writel(val, cec->regs + offset);
+ writel(tmp, cec->regs + offset);
}
void mtk_cec_set_hpd_event(struct device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 4554e2de1430..e61cd67b978f 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -819,8 +819,8 @@ static const struct mtk_dpi_conf mt8192_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
- .output_fmts = mt8173_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8173_output_fmts),
+ .output_fmts = mt8183_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
};
static int mtk_dpi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 904535eda0c4..91b09cda8a9c 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -19,7 +19,7 @@ msm-y := \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
- hdmi/hdmi_connector.o \
+ hdmi/hdmi_hpd.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 9b41e2f82fc2..c0dec5b919d4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1872,6 +1872,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
BUG_ON(!node);
ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 6bde3e234ec8..5f236395677e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -168,6 +168,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
+ * @dp: msm_dp pointer, for DP encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -206,6 +207,8 @@ struct dpu_encoder_virt {
struct msm_display_topology topology;
u32 idle_timeout;
+
+ struct msm_dp *dp;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -1000,8 +1003,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
- msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS)
+ msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode);
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
@@ -1182,9 +1185,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- ret = msm_dp_display_enable(priv->dp,
- drm_enc);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ ret = msm_dp_display_enable(dpu_enc->dp, drm_enc);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
ret);
@@ -1224,8 +1226,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- if (msm_dp_display_pre_disable(priv->dp, drm_enc))
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
}
@@ -1253,8 +1255,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- if (msm_dp_display_disable(priv->dp, drm_enc))
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ if (msm_dp_display_disable(dpu_enc->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
}
@@ -2170,7 +2172,8 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
-
+ else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
+ dpu_enc->dp = priv->dp;
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 116e2b5b1a90..284f5610dc35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -148,6 +148,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
active_hctl = (active_h_end << 16) | active_h_start;
display_hctl = active_hctl;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 93d916858d5a..8b7693883e7c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -728,8 +728,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ }
}
}
@@ -999,7 +1001,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms_parse_data_bus_icc_path(dpu_kms);
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
+ if (rc < 0)
+ goto error;
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -1184,7 +1188,7 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
priv->kms = &dpu_kms->base;
- return ret;
+ return 0;
}
static void dpu_unbind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index bb7d066618e6..0e02e252ff89 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -612,9 +612,15 @@ static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
if (ret)
return ret;
- mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
if (old_r_mixer) {
- mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
if (!need_right_mixer)
pipeline->r_mixer = NULL;
}
@@ -983,8 +989,10 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
- if (ret)
+ if (ret) {
+ drm_gem_object_put(cursor_bo);
return -EINVAL;
+ }
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index b3b42672b2d4..a2b276ae9673 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -598,9 +598,9 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq < 0) {
- ret = irq;
- DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 954db683ae44..2536def2a000 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -116,21 +116,28 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
return 0;
}
-void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
- struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
+ struct mdp5_hw_mixer_state *new_state;
if (!mixer)
- return;
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 43c9ba43ce18..545ee223b9d7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -30,7 +30,7 @@ void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
-void mdp5_mixer_release(struct drm_atomic_state *s,
- struct mdp5_hw_mixer *mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ba6695963aa6..a4f5cb90f3e8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -119,18 +119,23 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
return 0;
}
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *state = mdp5_get_global_state(s);
- struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+ struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
- return;
+ return 0;
+
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from plane %s", hwpipe->name,
new_state->hwpipe_to_plane[hwpipe->idx]->name);
@@ -141,6 +146,8 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
}
new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
}
void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index 9b26d0761bd4..cca67938cab2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -37,7 +37,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 50e854207c70..c0d947bce9e9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -297,12 +297,24 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_state->r_hwpipe = NULL;
- mdp5_pipe_release(state->state, old_hwpipe);
- mdp5_pipe_release(state->state, old_right_hwpipe);
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
}
} else {
- mdp5_pipe_release(state->state, mdp5_state->hwpipe);
- mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 4af281d97493..1992347537e6 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1515,7 +1515,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
@@ -1682,8 +1682,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
- ctrl->link->phy_params.p_level = 0;
- ctrl->link->phy_params.v_level = 0;
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
@@ -1805,12 +1803,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
}
}
- if (!dp_ctrl_channel_eq_ok(ctrl))
- dp_ctrl_link_retrain(ctrl);
-
- /* stop txing train pattern to end link training */
- dp_ctrl_clear_training_pattern(ctrl);
-
ret = dp_ctrl_enable_stream_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
@@ -1822,6 +1814,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
return 0;
}
+ if (!dp_ctrl_channel_eq_ok(ctrl))
+ dp_ctrl_link_retrain(ctrl);
+
+ /* stop txing train pattern to end link training */
+ dp_ctrl_clear_training_pattern(ctrl);
+
/*
* Set up transfer unit values and set controller state to send
* video.
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d5198b435638..7b624191abf1 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -110,6 +110,7 @@ struct dp_display_private {
u32 hpd_state;
u32 event_pndx;
u32 event_gndx;
+ struct task_struct *ev_tsk;
struct dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
@@ -193,6 +194,8 @@ void dp_display_signal_audio_complete(struct msm_dp *dp_display)
complete_all(&dp->audio_comp);
}
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
+
static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
@@ -230,9 +233,18 @@ static int dp_display_bind(struct device *dev, struct device *master,
}
rc = dp_register_audio_driver(dev, dp->audio);
- if (rc)
+ if (rc) {
DRM_ERROR("Audio registration Dp failed\n");
+ goto end;
+ }
+ rc = dp_hpd_event_thread_start(dp);
+ if (rc) {
+ DRM_ERROR("Event thread create failed\n");
+ goto end;
+ }
+
+ return 0;
end:
return rc;
}
@@ -247,6 +259,11 @@ static void dp_display_unbind(struct device *dev, struct device *master,
dp = container_of(g_dp_display,
struct dp_display_private, dp_display);
+ /* disable all HPD interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+
+ kthread_stop(dp->ev_tsk);
+
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
priv->dp = NULL;
@@ -1055,12 +1072,17 @@ static int hpd_event_thread(void *data)
while (1) {
if (timeout_mode) {
wait_event_timeout(dp_priv->event_q,
- (dp_priv->event_pndx == dp_priv->event_gndx),
- EVENT_TIMEOUT);
+ (dp_priv->event_pndx == dp_priv->event_gndx) ||
+ kthread_should_stop(), EVENT_TIMEOUT);
} else {
wait_event_interruptible(dp_priv->event_q,
- (dp_priv->event_pndx != dp_priv->event_gndx));
+ (dp_priv->event_pndx != dp_priv->event_gndx) ||
+ kthread_should_stop());
}
+
+ if (kthread_should_stop())
+ break;
+
spin_lock_irqsave(&dp_priv->event_lock, flag);
todo = &dp_priv->event_list[dp_priv->event_gndx];
if (todo->delay) {
@@ -1130,12 +1152,17 @@ static int hpd_event_thread(void *data)
return 0;
}
-static void dp_hpd_event_setup(struct dp_display_private *dp_priv)
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
{
- init_waitqueue_head(&dp_priv->event_q);
- spin_lock_init(&dp_priv->event_lock);
+ /* set event q to empty */
+ dp_priv->event_gndx = 0;
+ dp_priv->event_pndx = 0;
- kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ if (IS_ERR(dp_priv->ev_tsk))
+ return PTR_ERR(dp_priv->ev_tsk);
+
+ return 0;
}
static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
@@ -1194,10 +1221,9 @@ int dp_display_request_irq(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
- if (dp->irq < 0) {
- rc = dp->irq;
- DRM_ERROR("failed to get irq: %d\n", rc);
- return rc;
+ if (!dp->irq) {
+ DRM_ERROR("failed to get irq\n");
+ return -EINVAL;
}
rc = devm_request_irq(&dp->pdev->dev, dp->irq,
@@ -1236,8 +1262,11 @@ static int dp_display_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ /* setup event q */
mutex_init(&dp->event_mutex);
g_dp_display = &dp->dp_display;
+ init_waitqueue_head(&dp->event_q);
+ spin_lock_init(&dp->event_lock);
/* Store DP audio handle inside DP display */
g_dp_display->dp_audio = dp->audio;
@@ -1414,8 +1443,6 @@ void msm_dp_irq_postinstall(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp_hpd_event_setup(dp);
-
dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index eea679a52e86..eb60ce125a1f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1375,10 +1375,10 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
dsi_get_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host, msg);
- if (!len) {
+ if (len < 0) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
- return -EINVAL;
+ return len;
}
/* for video mode, do not send cmds more than
@@ -1397,10 +1397,14 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
}
ret = dsi_cmd_dma_tx(msm_host, len);
- if (ret < len) {
- pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
- __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
- return -ECOMM;
+ if (ret < 0) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
+ return ret;
+ } else if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
+ return -EIO;
}
return len;
@@ -2135,9 +2139,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
}
ret = dsi_cmds2buf_tx(msm_host, msg);
- if (ret < msg->tx_len) {
+ if (ret < 0) {
pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
return ret;
+ } else if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
+ return -ECOMM;
}
/*
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 1c1e9861b93f..6d3abcdc57bf 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -1064,6 +1064,6 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
- .io_start = { 0xc994400, 0xc996000 },
+ .io_start = { 0xc994400, 0xc996400 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 94f948ef279d..23fb88b53324 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,6 +8,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <drm/drm_bridge_connector.h>
+
#include <sound/hdmi-codec.h>
#include "hdmi.h"
@@ -41,7 +43,7 @@ static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
struct hdmi *hdmi = dev_id;
/* Process HPD: */
- msm_hdmi_connector_irq(hdmi->connector);
+ msm_hdmi_hpd_irq(hdmi->bridge);
/* Process DDC: */
msm_hdmi_i2c_irq(hdmi->i2c);
@@ -142,6 +144,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
/* HDCP needs physical address of hdmi register */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
config->mmio_name);
+ if (!res) {
+ ret = -EINVAL;
+ goto fail;
+ }
hdmi->mmio_phy_addr = res->start;
hdmi->qfprom_mmio = msm_ioremap(pdev,
@@ -302,7 +308,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- hdmi->connector = msm_hdmi_connector_init(hdmi);
+ hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
@@ -310,10 +316,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
+ drm_connector_attach_encoder(hdmi->connector, hdmi->encoder);
+
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
+ if (!hdmi->irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev->dev, "failed to get irq\n");
goto fail;
}
@@ -326,7 +334,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->connector);
+ drm_bridge_connector_enable_hpd(hdmi->connector);
+
+ ret = msm_hdmi_hpd_enable(hdmi->bridge);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0b84f0abee1..8d2706bec3b9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -114,6 +114,13 @@ struct hdmi_platform_config {
struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO];
};
+struct hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+ struct work_struct hpd_work;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
@@ -230,13 +237,11 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
-/*
- * hdmi connector:
- */
-
-void msm_hdmi_connector_irq(struct drm_connector *connector);
-struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
-int msm_hdmi_hpd_enable(struct drm_connector *connector);
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge);
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 6e380db9287b..efcfdd70a02e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -5,17 +5,16 @@
*/
#include <linux/delay.h>
+#include <drm/drm_bridge_connector.h>
+#include "msm_kms.h"
#include "hdmi.h"
-struct hdmi_bridge {
- struct drm_bridge base;
- struct hdmi *hdmi;
-};
-#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
-
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+
+ msm_hdmi_hpd_disable(hdmi_bridge);
}
static void msm_hdmi_power_on(struct drm_bridge *bridge)
@@ -259,14 +258,76 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
}
+static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+
+ return edid;
+}
+
+static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct msm_drm_private *priv = bridge->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_bridge->hdmi->encoder);
+
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.pre_enable = msm_hdmi_bridge_pre_enable,
.enable = msm_hdmi_bridge_enable,
.disable = msm_hdmi_bridge_disable,
.post_disable = msm_hdmi_bridge_post_disable,
.mode_set = msm_hdmi_bridge_mode_set,
+ .mode_valid = msm_hdmi_bridge_mode_valid,
+ .get_edid = msm_hdmi_bridge_get_edid,
+ .detect = msm_hdmi_bridge_detect,
};
+static void
+msm_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct hdmi_bridge *hdmi_bridge =
+ container_of(work, struct hdmi_bridge, hpd_work);
+ struct drm_bridge *bridge = &hdmi_bridge->base;
+
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+}
/* initialize bridge */
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
@@ -283,11 +344,17 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
}
hdmi_bridge->hdmi = hdmi;
+ INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
+ bridge->ddc = hdmi->i2c;
+ bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->ops = DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 58707a1f3878..c3a236bb952c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -11,13 +11,6 @@
#include "msm_kms.h"
#include "hdmi.h"
-struct hdmi_connector {
- struct drm_connector base;
- struct hdmi *hdmi;
- struct work_struct hpd_work;
-};
-#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
-
static void msm_hdmi_phy_reset(struct hdmi *hdmi)
{
unsigned int val;
@@ -139,10 +132,10 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-int msm_hdmi_hpd_enable(struct drm_connector *connector)
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
@@ -202,9 +195,9 @@ fail:
return ret;
}
-static void hdp_disable(struct hdmi_connector *hdmi_connector)
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
{
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int i, ret = 0;
@@ -233,19 +226,10 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
}
}
-static void
-msm_hdmi_hotplug_work(struct work_struct *work)
-{
- struct hdmi_connector *hdmi_connector =
- container_of(work, struct hdmi_connector, hpd_work);
- struct drm_connector *connector = &hdmi_connector->base;
- drm_helper_hpd_irq_event(connector->dev);
-}
-
-void msm_hdmi_connector_irq(struct drm_connector *connector)
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
/* Process HPD: */
@@ -268,7 +252,7 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- queue_work(hdmi->workq, &hdmi_connector->hpd_work);
+ queue_work(hdmi->workq, &hdmi_bridge->hpd_work);
}
}
@@ -299,11 +283,11 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
connector_status_disconnected;
}
-static enum drm_connector_status hdmi_connector_detect(
- struct drm_connector *connector, bool force)
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
enum drm_connector_status stat_gpio, stat_reg;
@@ -337,115 +321,3 @@ static enum drm_connector_status hdmi_connector_detect(
return stat_gpio;
}
-
-static void hdmi_connector_destroy(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-
- hdp_disable(hdmi_connector);
-
- drm_connector_cleanup(connector);
-
- kfree(hdmi_connector);
-}
-
-static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
- struct edid *edid;
- uint32_t hdmi_ctrl;
- int ret = 0;
-
- hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
-
- edid = drm_get_edid(connector, hdmi->i2c);
-
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return ret;
-}
-
-static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_connector->hdmi->encoder);
-
- /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
- * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
- * instead):
- */
- if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], actual);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return 0;
-}
-
-static const struct drm_connector_funcs hdmi_connector_funcs = {
- .detect = hdmi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
- .get_modes = msm_hdmi_connector_get_modes,
- .mode_valid = msm_hdmi_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
-{
- struct drm_connector *connector = NULL;
- struct hdmi_connector *hdmi_connector;
-
- hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
- if (!hdmi_connector)
- return ERR_PTR(-ENOMEM);
-
- hdmi_connector->hdmi = hdmi;
- INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
-
- connector = &hdmi_connector->base;
-
- drm_connector_init_with_ddc(hdmi->dev, connector,
- &hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->i2c);
- drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, hdmi->encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index bbf999c66517..9712582886aa 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -236,6 +237,8 @@ static int msm_irq_postinstall(struct drm_device *dev)
static int msm_irq_install(struct drm_device *dev, unsigned int irq)
{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
int ret;
if (irq == IRQ_NOTCONNECTED)
@@ -247,6 +250,8 @@ static int msm_irq_install(struct drm_device *dev, unsigned int irq)
if (ret)
return ret;
+ kms->irq_requested = true;
+
ret = msm_irq_postinstall(dev);
if (ret) {
free_irq(irq, dev);
@@ -262,7 +267,8 @@ static void msm_irq_uninstall(struct drm_device *dev)
struct msm_kms *kms = priv->kms;
kms->funcs->irq_uninstall(kms);
- free_irq(kms->irq, dev);
+ if (kms->irq_requested)
+ free_irq(kms->irq, dev);
}
struct msm_vblank_work {
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index fc94e061d6a7..8a2d94bd5df2 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -17,7 +17,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
- return NULL;
+ return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index de2bc3467bb5..afa30e2ba1f1 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -149,6 +149,7 @@ struct msm_kms {
/* irq number to be passed on to msm_irq_install */
int irq;
+ bool irq_requested;
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 3d82b3c67dec..93f8f4f64578 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -160,14 +160,14 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
static inline struct drm_encoder *
nv50_head_atom_get_encoder(struct nv50_head_atom *atom)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
/* We only ever have a single encoder */
drm_for_each_encoder_mask(encoder, atom->state.crtc->dev,
atom->state.encoder_mask)
- break;
+ return encoder;
- return encoder;
+ return NULL;
}
#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 66f32d965c72..5624a716e11c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -411,9 +411,18 @@ void nv50_crc_atomic_check_outp(struct nv50_atom *atom)
struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_outp_atom *outp_atom;
- struct nouveau_encoder *outp =
- nv50_real_outp(nv50_head_atom_get_encoder(armh));
- struct drm_encoder *encoder = &outp->base.base;
+ struct nouveau_encoder *outp;
+ struct drm_encoder *encoder, *enc;
+
+ enc = nv50_head_atom_get_encoder(armh);
+ if (!enc)
+ continue;
+
+ outp = nv50_real_outp(enc);
+ if (!outp)
+ continue;
+
+ encoder = &outp->base.base;
if (!asyh->clr.crc)
continue;
@@ -464,8 +473,16 @@ void nv50_crc_atomic_set(struct nv50_head *head,
struct drm_device *dev = crtc->dev;
struct nv50_crc *crc = &head->crc;
const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
- struct nouveau_encoder *outp =
- nv50_real_outp(nv50_head_atom_get_encoder(asyh));
+ struct nouveau_encoder *outp;
+ struct drm_encoder *encoder;
+
+ encoder = nv50_head_atom_get_encoder(asyh);
+ if (!encoder)
+ return;
+
+ outp = nv50_real_outp(encoder);
+ if (!outp)
+ return;
func->set_src(head, outp->or,
nv50_crc_source_type(outp, asyh->crc.src),
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 1665738948fb..96113c8bee8c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -62,4 +62,6 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG, info, f, ##a)
#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE, info, f, ##a)
#define nvkm_spam(s,f,a...) nvkm_printk((s), SPAM, dbg, f, ##a)
+
+#define nvkm_error_ratelimited(s,f,a...) nvkm_printk((s), ERROR, err_ratelimited, f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index 53a6651ac225..80b5aaceeaad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -35,13 +35,13 @@ gf100_bus_intr(struct nvkm_bus *bus)
u32 addr = nvkm_rd32(device, 0x009084);
u32 data = nvkm_rd32(device, 0x009088);
- nvkm_error(subdev,
- "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
- (addr & 0x00000002) ? "write" : "read", data,
- (addr & 0x00fffffc),
- (stat & 0x00000002) ? "!ENGINE " : "",
- (stat & 0x00000004) ? "PRIVRING " : "",
- (stat & 0x00000008) ? "TIMEOUT " : "");
+ nvkm_error_ratelimited(subdev,
+ "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc),
+ (stat & 0x00000002) ? "!ENGINE " : "",
+ (stat & 0x00000004) ? "PRIVRING " : "",
+ (stat & 0x00000008) ? "TIMEOUT " : "");
nvkm_wr32(device, 0x009084, 0x00000000);
nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index ad8da523bb22..c75e463f3501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -45,9 +45,9 @@ nv31_bus_intr(struct nvkm_bus *bus)
u32 addr = nvkm_rd32(device, 0x009084);
u32 data = nvkm_rd32(device, 0x009088);
- nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
- (addr & 0x00000002) ? "write" : "read", data,
- (addr & 0x00fffffc));
+ nvkm_error_ratelimited(subdev, "MMIO %s of %08x FAULT at %06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
stat &= ~0x00000008;
nvkm_wr32(device, 0x001100, 0x00000008);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 3a1e45adeedc..2055d0b100d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -60,9 +60,9 @@ nv50_bus_intr(struct nvkm_bus *bus)
u32 addr = nvkm_rd32(device, 0x009084);
u32 data = nvkm_rd32(device, 0x009088);
- nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
- (addr & 0x00000002) ? "write" : "read", data,
- (addr & 0x00fffffc));
+ nvkm_error_ratelimited(subdev, "MMIO %s of %08x FAULT at %06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
stat &= ~0x00000008;
nvkm_wr32(device, 0x001100, 0x00000008);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 57199be082fd..c2b5cc5f97ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -135,10 +135,10 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
- break;
+ return cstate;
}
- return cstate;
+ return NULL;
}
static struct nvkm_cstate *
@@ -169,6 +169,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
if (!list_empty(&pstate->list)) {
cstate = nvkm_cstate_get(clk, pstate, cstatei);
cstate = nvkm_cstate_find_best(clk, pstate, cstate);
+ if (!cstate)
+ return -EINVAL;
} else {
cstate = &pstate->base;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 4f78bbf63f33..bbbccdcd3a90 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -862,7 +862,7 @@ static const struct drm_display_mode ampire_am_1280800n3tzqw_t00h_mode = {
static const struct panel_desc ampire_am_1280800n3tzqw_t00h = {
.modes = &ampire_am_1280800n3tzqw_t00h_mode,
.num_modes = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
@@ -2522,6 +2522,7 @@ static const struct panel_desc innolux_g070y2_l01 = {
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1546abcadacf..d157bb9072e8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a25b98b7f5bd..8b4287d40379 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -2116,10 +2116,10 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
vop_win_init(vop);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vop->len = resource_size(res);
vop->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ vop->len = resource_size(res);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 195de30eb90c..9d235b60b428 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -528,8 +528,8 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct drm_connector_list_iter iter;
struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_bridge *bridge = NULL;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_bridge *bridge = NULL, *br_iter;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
@@ -538,15 +538,19 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
int ret;
/* get encoder from crtc */
- drm_for_each_encoder(encoder, ddev)
- if (encoder->crtc == crtc)
+ drm_for_each_encoder(en_iter, ddev)
+ if (en_iter->crtc == crtc) {
+ encoder = en_iter;
break;
+ }
if (encoder) {
/* get bridge from encoder */
- list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
- if (bridge->encoder == encoder)
+ list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
+ if (br_iter->encoder == encoder) {
+ bridge = br_iter;
break;
+ }
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 7594cf6e186e..3b86d002ef62 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -60,11 +60,13 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
int tilcdc_add_component_encoder(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = NULL, *iter;
- list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
- if (encoder->possible_crtcs & (1 << priv->crtc->index))
+ list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
+ if (iter->possible_crtcs & (1 << priv->crtc->index)) {
+ encoder = iter;
break;
+ }
if (!encoder) {
dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 0288ef063513..f6a88abccc7d 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -25,11 +25,12 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
{
unsigned int i;
u32 mask;
- u8 ncounters = perfmon->ncounters;
+ u8 ncounters;
if (WARN_ON_ONCE(!perfmon || v3d->active_perfmon))
return;
+ ncounters = perfmon->ncounters;
mask = GENMASK(ncounters - 1, 0);
for (i = 0; i < ncounters; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 3e61184e194c..88dbb282d15c 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -123,7 +123,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
*vpos /= 2;
/* Use hpos to correct for field offset in interlaced mode. */
- if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2)
+ if (vc4_hvs_get_fifo_frame_count(dev, vc4_crtc_state->assigned_channel) % 2)
*hpos += mode->crtc_htotal / 2;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 4b550ebd9572..94c178738fc1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -933,6 +933,7 @@ void vc4_irq_reset(struct drm_device *dev);
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
+u8 vc4_hvs_get_fifo_frame_count(struct drm_device *dev, unsigned int fifo);
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 604933e20e6a..9d88bfb50c9b 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -197,6 +197,29 @@ static void vc4_hvs_update_gamma_lut(struct drm_crtc *crtc)
vc4_hvs_lut_load(crtc);
}
+u8 vc4_hvs_get_fifo_frame_count(struct drm_device *dev, unsigned int fifo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u8 field = 0;
+
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER_DISPSTAT2_FRCNT2);
+ break;
+ }
+
+ return field;
+}
+
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -582,6 +605,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
+ u32 reg;
hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -653,6 +677,26 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
vc4->hvs = hvs;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
+ HVS_WRITE(SCALER_DISPECTRL,
+ reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL,
+ reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
+ HVS_WRITE(SCALER_DISPEOLN,
+ reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
+ HVS_WRITE(SCALER_DISPDITHER,
+ reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
+
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
@@ -660,10 +704,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
- * be unused.
- */
- dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
SCALER_DISPCTRL_SLVWREIRQ |
SCALER_DISPCTRL_SLVRDEIRQ |
@@ -677,7 +717,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DSPEISLUR(1) |
SCALER_DISPCTRL_DSPEISLUR(2) |
SCALER_DISPCTRL_SCLEIRQ);
- dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 489f921ef44d..8ac2f088106a 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -379,8 +379,6 @@
# define SCALER_DISPSTATX_MODE_EOF 3
# define SCALER_DISPSTATX_FULL BIT(29)
# define SCALER_DISPSTATX_EMPTY BIT(28)
-# define SCALER_DISPSTATX_FRAME_COUNT_MASK VC4_MASK(17, 12)
-# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT 12
# define SCALER_DISPSTATX_LINE_MASK VC4_MASK(11, 0)
# define SCALER_DISPSTATX_LINE_SHIFT 0
@@ -403,9 +401,15 @@
(x) * (SCALER_DISPBKGND1 - \
SCALER_DISPBKGND0))
#define SCALER_DISPSTAT1 0x00000058
+# define SCALER_DISPSTAT1_FRCNT0_MASK VC4_MASK(23, 18)
+# define SCALER_DISPSTAT1_FRCNT0_SHIFT 18
+# define SCALER_DISPSTAT1_FRCNT1_MASK VC4_MASK(17, 12)
+# define SCALER_DISPSTAT1_FRCNT1_SHIFT 12
+
#define SCALER_DISPSTATX(x) (SCALER_DISPSTAT0 + \
(x) * (SCALER_DISPSTAT1 - \
SCALER_DISPSTAT0))
+
#define SCALER_DISPBASE1 0x0000005c
#define SCALER_DISPBASEX(x) (SCALER_DISPBASE0 + \
(x) * (SCALER_DISPBASE1 - \
@@ -415,7 +419,11 @@
(x) * (SCALER_DISPCTRL1 - \
SCALER_DISPCTRL0))
#define SCALER_DISPBKGND2 0x00000064
+
#define SCALER_DISPSTAT2 0x00000068
+# define SCALER_DISPSTAT2_FRCNT2_MASK VC4_MASK(17, 12)
+# define SCALER_DISPSTAT2_FRCNT2_SHIFT 12
+
#define SCALER_DISPBASE2 0x0000006c
#define SCALER_DISPALPHA2 0x00000070
#define SCALER_GAMADDR 0x00000078
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 9809ca3e2945..82beb8c159f2 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -298,12 +298,18 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
return;
- ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ ctrl = TXP_GO | TXP_EI |
VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
+ else
+ /*
+ * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
+ * hardware will force the output padding to be 0xff.
+ */
+ ctrl |= TXP_ALPHA_INVERT;
gem = drm_fb_cma_get_gem_obj(fb, 0);
TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a6caebd4a0dd..ef1f19083cd3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -179,6 +179,8 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
DRM_DEBUG("add mode: %dx%d\n", width, height);
mode = drm_cvt_mode(connector->dev, width, height, 60,
false, false, false);
+ if (!mode)
+ return count;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
count++;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 50c64e7813be..171e90c4b9f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -916,6 +916,15 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg(&dev_priv->drm,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+
/* Surface must be marked as a scanout. */
if (unlikely(!surface->metadata.scanout))
return -EINVAL;
@@ -1229,20 +1238,13 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
return -EINVAL;
}
- /* Limited framebuffer color depth support for screen objects */
- if (dev_priv->active_display_unit == vmw_du_screen_object) {
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_RGB565:
- break;
- default:
- DRM_ERROR("Invalid pixel format: %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg(&dev_priv->drm,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ return -EINVAL;
}
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index bbc809f7bd8a..8c8ee87fd3ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -248,7 +248,6 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8d1e869cc196..34ab08369e04 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -862,22 +862,21 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv;
-
dev_priv = container_of(bdev, struct vmw_private, bdev);
mutex_lock(&dev_priv->binding_mutex);
- dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
- if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
- mutex_unlock(&dev_priv->binding_mutex);
- return;
- }
-
/* If BO is being moved from MOB to system memory */
if (new_mem->mem_type == TTM_PL_SYSTEM &&
old_mem->mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
+ dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
+ if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
+ mutex_unlock(&dev_priv->binding_mutex);
+ return;
+ }
+
(void) vmw_query_readback_all(dx_query_mob);
mutex_unlock(&dev_priv->binding_mutex);
@@ -891,7 +890,6 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
(void) ttm_bo_wait(bo, false, false);
} else
mutex_unlock(&dev_priv->binding_mutex);
-
}
/**
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 840fd075c56f..6284db50ec9b 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -226,6 +226,17 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
dev_dbg(dev, "sid 0x%x status 0x%x\n",
cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
}
+ if (privdata->mp2_ops->discovery_status &&
+ privdata->mp2_ops->discovery_status(privdata) == 0) {
+ amd_sfh_hid_client_deinit(privdata);
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ devm_kfree(dev, cl_data->feature_report[i]);
+ devm_kfree(dev, in_data->input_report[i]);
+ devm_kfree(dev, cl_data->report_descr[i]);
+ }
+ dev_warn(dev, "Failed to discover, sensors not enabled\n");
+ return -EOPNOTSUPP;
+ }
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
return 0;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
index 5ad1e7acd294..a4bda2ac713e 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -139,10 +139,10 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data)
hid->driver_data = hid_data;
cli_data->hid_sensor_hubs[cur_hid_dev] = hid;
- hid->bus = BUS_AMD_AMDTP;
+ hid->bus = BUS_AMD_SFH;
hid->vendor = AMD_SFH_HID_VENDOR;
hid->product = AMD_SFH_HID_PRODUCT;
- snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-amdtp",
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-amdsfh",
hid->vendor, hid->product);
rc = hid_add_device(hid);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index ae2ac9191ba7..741cff350589 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -10,7 +10,7 @@
#define AMDSFH_HID_H
#define MAX_HID_DEVICES 5
-#define BUS_AMD_AMDTP 0x20
+#define BUS_AMD_SFH 0x20
#define AMD_SFH_HID_VENDOR 0x1022
#define AMD_SFH_HID_PRODUCT 0x0001
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 561bb27f42b1..ae8f1f2536e9 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -126,6 +126,12 @@ static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
return 0;
}
+static int amd_sfh_dis_sts_v2(struct amd_mp2_dev *privdata)
+{
+ return (readl(privdata->mmio + AMD_P2C_MSG(1)) &
+ SENSOR_DISCOVERY_STATUS_MASK) >> SENSOR_DISCOVERY_STATUS_SHIFT;
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -241,6 +247,7 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.response = amd_sfh_wait_response_v2,
.clear_intr = amd_sfh_clear_intr_v2,
.init_intr = amd_sfh_irq_init_v2,
+ .discovery_status = amd_sfh_dis_sts_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 00fc083dc123..2d3203d3daeb 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -38,6 +38,9 @@
#define AMD_SFH_IDLE_LOOP 200
+#define SENSOR_DISCOVERY_STATUS_MASK GENMASK(5, 3)
+#define SENSOR_DISCOVERY_STATUS_SHIFT 3
+
/* SFH Command register */
union sfh_cmd_base {
u32 ul;
@@ -142,5 +145,6 @@ struct amd_mp2_ops {
int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
void (*clear_intr)(struct amd_mp2_dev *privdata);
int (*init_intr)(struct amd_mp2_dev *privdata);
+ int (*discovery_status)(struct amd_mp2_dev *privdata);
};
#endif
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index 74ad8bf98bfd..e8c5e3ac9fff 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -347,6 +347,12 @@ static int bigben_probe(struct hid_device *hid,
bigben->report = list_entry(report_list->next,
struct hid_report, list);
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ error = -ENODEV;
+ goto error_hw_stop;
+ }
+
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
set_bit(FF_RUMBLE, hidinput->input->ffbit);
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 3091355d48df..8e4a5528e25d 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -188,7 +188,6 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
- input_free_device(input);
return ret;
}
@@ -200,7 +199,6 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
hid_err(hdev, "Failed to register elan input device: %d\n",
ret);
input_mt_destroy_slots(input);
- input_free_device(input);
return ret;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 645a5f566d23..42b5b050b72d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -753,6 +753,7 @@
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
+#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index c2c66ceca132..7d82f8d426bb 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -366,7 +366,7 @@ static const struct hidled_config hidled_configs[] = {
.type = DREAM_CHEEKY,
.name = "Dream Cheeky Webmail Notifier",
.short_name = "dream_cheeky",
- .max_brightness = 31,
+ .max_brightness = 63,
.num_leds = 1,
.report_size = 9,
.report_type = RAW_REQUEST,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e1afddb7b33d..f382444dc2db 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2032,6 +2032,12 @@ static const struct hid_device_id mt_devices[] = {
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ /* Lenovo X12 TAB Gen 1 */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X12_TAB) },
+
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
@@ -2176,6 +2182,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_GOOGLE,
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
+ { .driver_data = MT_CLS_GOOGLE,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
+ USB_DEVICE_ID_GOOGLE_WHISKERS) },
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index f3761c73b074..6b967bb38690 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -1221,7 +1221,9 @@ u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
/*
* Cannot return an ID of 0, which is reserved for an unsolicited
- * message from Hyper-V.
+ * message from Hyper-V; Hyper-V does not acknowledge (respond to)
+ * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of
+ * 0 sent by the guest.
*/
return current_id + 1;
}
@@ -1246,7 +1248,7 @@ u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id)
/* Hyper-V can send an unsolicited message with ID of 0 */
if (!trans_id)
- return trans_id;
+ return VMBUS_RQST_ERROR;
spin_lock_irqsave(&rqstor->req_lock, flags);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ce76fc382799..07003019263a 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -637,6 +637,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
if (newchannel->offermsg.offer.sub_channel_index == 0) {
mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
/*
* Don't call free_channel(), because newchannel->kobj
* is not initialized yet.
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index aea8125a4db8..50d9113f5402 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1381,7 +1381,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(vmbus_interrupt, 0);
+ add_interrupt_randomness(vmbus_interrupt);
}
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 5f8f824d997f..63b616ce3a6e 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2309,6 +2309,21 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
int page, ret;
/*
+ * Figure out if PEC is enabled before accessing any other register.
+ * Make sure PEC is disabled, will be enabled later if needed.
+ */
+ client->flags &= ~I2C_CLIENT_PEC;
+
+ /* Enable PEC if the controller and bus supports it */
+ if (!(data->flags & PMBUS_NO_CAPABILITY)) {
+ ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
+ client->flags |= I2C_CLIENT_PEC;
+ }
+ }
+
+ /*
* Some PMBus chips don't support PMBUS_STATUS_WORD, so try
* to use PMBUS_STATUS_BYTE instead if that is the case.
* Bail out if both registers are not supported.
@@ -2326,19 +2341,6 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->has_status_word = true;
}
- /* Make sure PEC is disabled, will be enabled later if needed */
- client->flags &= ~I2C_CLIENT_PEC;
-
- /* Enable PEC if the controller and bus supports it */
- if (!(data->flags & PMBUS_NO_CAPABILITY)) {
- ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
- if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
- client->flags |= I2C_CLIENT_PEC;
- }
- }
- }
-
/*
* Check if the chip is write protected. If it is, we can not clear
* faults, and we should not try it. Also, in that case, writes into
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 8a18c71df37a..a6bdf4cf80ca 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1382,7 +1382,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (conn->child_dev) {
+ if (conn->child_dev && conn->child_dev->has_conns_grp) {
ret = coresight_make_links(csdev, conn,
conn->child_dev);
if (ret)
@@ -1574,6 +1574,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
+ bool registered = false;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
@@ -1594,7 +1595,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
- goto err_free_csdev;
+ kfree(csdev);
+ goto err_out;
}
csdev->refcnt = refcnts;
@@ -1619,6 +1621,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
+ /*
+ * Make sure the device registration and the connection fixup
+ * are synchronised, so that we don't see uninitialised devices
+ * on the coresight bus while trying to resolve the connections.
+ */
+ mutex_lock(&coresight_mutex);
+
ret = device_register(&csdev->dev);
if (ret) {
put_device(&csdev->dev);
@@ -1626,7 +1635,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
* All resources are free'd explicitly via
* coresight_device_release(), triggered from put_device().
*/
- goto err_out;
+ goto out_unlock;
}
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -1641,11 +1650,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
* from put_device(), which is in turn called from
* function device_unregister().
*/
- goto err_out;
+ goto out_unlock;
}
}
-
- mutex_lock(&coresight_mutex);
+ /* Device is now registered */
+ registered = true;
ret = coresight_create_conns_sysfs_group(csdev);
if (!ret)
@@ -1655,16 +1664,18 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
if (!ret && cti_assoc_ops && cti_assoc_ops->add)
cti_assoc_ops->add(csdev);
+out_unlock:
mutex_unlock(&coresight_mutex);
- if (ret) {
+ /* Success */
+ if (!ret)
+ return csdev;
+
+ /* Unregister the device if needed */
+ if (registered) {
coresight_unregister(csdev);
return ERR_PTR(ret);
}
- return csdev;
-
-err_free_csdev:
- kfree(csdev);
err_out:
/* Cleanup the connection information */
coresight_release_platform_data(NULL, desc->pdata);
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 00de46565bc4..c60442970c2a 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -380,9 +380,10 @@ static int debug_notifier_call(struct notifier_block *self,
int cpu;
struct debug_drvdata *drvdata;
- mutex_lock(&debug_lock);
+ /* Bail out if we can't acquire the mutex or the functionality is off */
+ if (!mutex_trylock(&debug_lock))
+ return NOTIFY_DONE;
- /* Bail out if the functionality is disabled */
if (!debug_enable)
goto skip_dump;
@@ -401,7 +402,7 @@ static int debug_notifier_call(struct notifier_block *self,
skip_dump:
mutex_unlock(&debug_lock);
- return 0;
+ return NOTIFY_DONE;
}
static struct notifier_block debug_notifier = {
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index b0eae94909f4..c0c35785a0dc 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -656,6 +656,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
bool is_read;
+ u8 *dma_buf = NULL;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
@@ -703,7 +704,17 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
dev->msg = m_start;
dev->recv_len_abort = false;
+ if (dev->use_dma) {
+ dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
+ if (!dma_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev->buf = dma_buf;
+ }
+
ret = at91_do_twi_transfer(dev);
+ i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
ret = (ret < 0) ? ret : num;
out:
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 805c77143a0f..b4c1ad19cdae 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -760,7 +760,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
struct i2c_adapter *adap)
{
- unsigned long time_left;
+ unsigned long time_left, msg_timeout;
u32 reg;
id->p_msg = msg;
@@ -785,8 +785,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
else
cdns_i2c_msend(id);
+ /* Minimal time to execute this message */
+ msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
+ /* Plus some wiggle room */
+ msg_timeout += msecs_to_jiffies(500);
+
+ if (msg_timeout < adap->timeout)
+ msg_timeout = adap->timeout;
+
/* Wait for the signal of completion */
- time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
+ time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
if (time_left == 0) {
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index bf2a4920638a..a1100e37626e 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -477,9 +477,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
{
int ret;
- if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
-
if (prepare) {
/* Optional interface clock */
ret = clk_prepare_enable(dev->pclk);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 21113665ddea..718bebe4fb87 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -262,8 +262,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
}
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (!i2c_dw_prepare_clk(dev, true)) {
+ dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto exit_reset;
+ }
+
+ ret = i2c_dw_prepare_clk(dev, true);
+ if (ret)
+ goto exit_reset;
+
+ if (dev->clk) {
u64 clk_khz;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index a6187cbec2c9..483428c5e30b 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -82,6 +82,7 @@
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
+#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
/* Hardware Descriptor Constants - Control Field */
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
@@ -175,6 +176,8 @@ struct ismt_priv {
u8 head; /* ring buffer head pointer */
struct completion cmp; /* interrupt completion */
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
+ dma_addr_t log_dma;
+ u32 *log;
};
static const struct pci_device_id ismt_ids[] = {
@@ -411,6 +414,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
memset(desc, 0, sizeof(struct ismt_desc));
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
+ /* Always clear the log entries */
+ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
+
/* Initialize common control bits */
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
@@ -522,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
case I2C_SMBUS_BLOCK_PROC_CALL:
dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
dma_size = I2C_SMBUS_BLOCK_MAX;
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
desc->wr_len_cmd = data->block[0] + 1;
@@ -708,6 +717,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
/* initialize the Master Descriptor Base Address (MDBA) */
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
+ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
+
/* initialize the Master Control Register (MCTRL) */
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
@@ -795,6 +806,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
priv->head = 0;
init_completion(&priv->cmp);
+ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
+ ISMT_LOG_ENTRIES * sizeof(u32),
+ &priv->log_dma, GFP_KERNEL);
+ if (!priv->log)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 45fe4a7fe0c0..901f0fb04fee 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_disable_clk;
}
adap = &i2c->adap;
@@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
if (ret < 0)
- return ret;
+ goto err_disable_clk;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2c->clk);
+
return ret;
}
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 2ad166355ec9..d9ac62c1ac25 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -359,14 +359,14 @@ static int npcm_i2c_get_SCL(struct i2c_adapter *_adap)
{
struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
- return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+ return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
}
static int npcm_i2c_get_SDA(struct i2c_adapter *_adap)
{
struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
- return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+ return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
}
static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus)
@@ -563,6 +563,15 @@ static inline void npcm_i2c_nack(struct npcm_i2c *bus)
iowrite8(val, bus->reg + NPCM_I2CCTL1);
}
+static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ /* Clear NEGACK, STASTR and BER bits */
+ val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
+ iowrite8(val, bus->reg + NPCM_I2CST);
+}
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable)
{
@@ -642,8 +651,8 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
iowrite8(0xFF, bus->reg + NPCM_I2CST);
- /* Clear EOB bit */
- iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3);
+ /* Clear and disable EOB */
+ npcm_i2c_eob_int(bus, false);
/* Clear all fifo bits: */
iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
@@ -655,6 +664,9 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
}
#endif
+ /* clear status bits for spurious interrupts */
+ npcm_i2c_clear_master_status(bus);
+
bus->state = I2C_IDLE;
}
@@ -815,15 +827,6 @@ static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo)
}
}
-static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
-{
- u8 val;
-
- /* Clear NEGACK, STASTR and BER bits */
- val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
- iowrite8(val, bus->reg + NPCM_I2CST);
-}
-
static void npcm_i2c_master_abort(struct npcm_i2c *bus)
{
/* Only current master is allowed to issue a stop condition */
@@ -1231,7 +1234,16 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus)
ret = IRQ_HANDLED;
} /* SDAST */
- return ret;
+ /*
+ * if irq is not one of the above, make sure EOB is disabled and all
+ * status bits are cleared.
+ */
+ if (ret == IRQ_NONE) {
+ npcm_i2c_eob_int(bus, false);
+ npcm_i2c_clear_master_status(bus);
+ }
+
+ return IRQ_HANDLED;
}
static int npcm_i2c_reg_slave(struct i2c_client *client)
@@ -1467,6 +1479,9 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
npcm_i2c_eob_int(bus, false);
npcm_i2c_master_stop(bus);
+ /* Clear SDA Status bit (by reading dummy byte) */
+ npcm_i2c_rd_byte(bus);
+
/*
* The bus is released from stall only after the SW clears
* NEGACK bit. Then a Stop condition is sent.
@@ -1474,6 +1489,8 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
npcm_i2c_clear_master_status(bus);
readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val,
!(val & NPCM_I2CCST_BUSY), 10, 200);
+ /* verify no status bits are still set after bus is released */
+ npcm_i2c_clear_master_status(bus);
}
bus->state = I2C_IDLE;
@@ -1672,10 +1689,10 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap)
int iter = 27;
if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) {
- dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck",
- bus->num);
+ dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck",
+ bus->num, bus->dest_addr);
npcm_i2c_reset(bus);
- return status;
+ return 0;
}
npcm_i2c_int_enable(bus, false);
@@ -1909,6 +1926,7 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ)
return -EINVAL;
+ npcm_i2c_int_enable(bus, false);
npcm_i2c_disable(bus);
/* Configure FIFO mode : */
@@ -1937,10 +1955,17 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS;
iowrite8(val, bus->reg + NPCM_I2CCTL1);
- npcm_i2c_int_enable(bus, true);
-
npcm_i2c_reset(bus);
+ /* check HW is OK: SDA and SCL should be high at this point. */
+ if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
+ dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
+ dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
+
+ npcm_i2c_int_enable(bus, true);
return 0;
}
@@ -1988,10 +2013,14 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
if (bus->slave) {
bus->master_or_slave = I2C_SLAVE;
- return npcm_i2c_int_slave_handler(bus);
+ if (npcm_i2c_int_slave_handler(bus))
+ return IRQ_HANDLED;
}
#endif
- return IRQ_NONE;
+ /* clear status bits for spurious interrupts */
+ npcm_i2c_clear_master_status(bus);
+
+ return IRQ_HANDLED;
}
static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
@@ -2047,8 +2076,7 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
u16 nwrite, nread;
u8 *write_data, *read_data;
u8 slave_addr;
- int timeout;
- int ret = 0;
+ unsigned long timeout;
bool read_block = false;
bool read_PEC = false;
u8 bus_busy;
@@ -2099,13 +2127,13 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
* 9: bits per transaction (including the ack/nack)
*/
timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
- timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
+ timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
return -EINVAL;
}
- time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1;
+ time_left = jiffies + timeout + 1;
do {
/*
* we must clear slave address immediately when the bus is not
@@ -2138,12 +2166,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
bus->read_block_use = read_block;
reinit_completion(&bus->cmd_complete);
- if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
- write_data, read_data, read_PEC,
- read_block))
- ret = -EBUSY;
- if (ret != -EBUSY) {
+ npcm_i2c_int_enable(bus, true);
+
+ if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ write_data, read_data, read_PEC,
+ read_block)) {
time_left = wait_for_completion_timeout(&bus->cmd_complete,
timeout);
@@ -2157,26 +2185,31 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
}
- ret = bus->cmd_err;
/* if there was BER, check if need to recover the bus: */
if (bus->cmd_err == -EAGAIN)
- ret = i2c_recover_bus(adap);
+ bus->cmd_err = i2c_recover_bus(adap);
/*
* After any type of error, check if LAST bit is still set,
* due to a HW issue.
* It cannot be cleared without resetting the module.
*/
- if (bus->cmd_err &&
- (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
+ else if (bus->cmd_err &&
+ (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
npcm_i2c_reset(bus);
+ /* after any xfer, successful or not, stall and EOB must be disabled */
+ npcm_i2c_stall_after_start(bus, false);
+ npcm_i2c_eob_int(bus, false);
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/* reenable slave if it was enabled */
if (bus->slave)
iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN,
bus->reg + NPCM_I2CADDR1);
+#else
+ npcm_i2c_int_enable(bus, false);
#endif
return bus->cmd_err;
}
@@ -2269,7 +2302,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
adap = &bus->adap;
adap->owner = THIS_MODULE;
adap->retries = 3;
- adap->timeout = HZ;
+ adap->timeout = msecs_to_jiffies(35);
adap->algo = &npcm_i2c_algo;
adap->quirks = &npcm_i2c_quirks;
adap->algo_data = bus;
@@ -2336,8 +2369,7 @@ static struct platform_driver npcm_i2c_bus_driver = {
static int __init npcm_i2c_init(void)
{
npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
- platform_driver_register(&npcm_i2c_bus_driver);
- return 0;
+ return platform_driver_register(&npcm_i2c_bus_driver);
}
module_init(npcm_i2c_init);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 8c1b31ed0c42..ac8e7d60672a 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -77,6 +77,7 @@
/* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6
+#define SB800_PIIX4_SMB_MAP_SIZE 2
#define KERNCZ_IMC_IDX 0x3e
#define KERNCZ_IMC_DATA 0x3f
@@ -97,6 +98,9 @@
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
+#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
+#define SB800_PIIX4_FCH_PM_SIZE 8
+
/* insmod parameters */
/* If force is set to anything different from 0, we forcibly enable the
@@ -155,6 +159,12 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
};
static const char *piix4_aux_port_name_sb800 = " port 1";
+struct sb800_mmio_cfg {
+ void __iomem *addr;
+ struct resource *res;
+ bool use_mmio;
+};
+
struct i2c_piix4_adapdata {
unsigned short smba;
@@ -162,8 +172,75 @@ struct i2c_piix4_adapdata {
bool sb800_main;
bool notify_imc;
u8 port; /* Port number, shifted */
+ struct sb800_mmio_cfg mmio_cfg;
};
+static int piix4_sb800_region_request(struct device *dev,
+ struct sb800_mmio_cfg *mmio_cfg)
+{
+ if (mmio_cfg->use_mmio) {
+ struct resource *res;
+ void __iomem *addr;
+
+ res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
+ SB800_PIIX4_FCH_PM_SIZE,
+ "sb800_piix4_smb");
+ if (!res) {
+ dev_err(dev,
+ "SMBus base address memory region 0x%x already in use.\n",
+ SB800_PIIX4_FCH_PM_ADDR);
+ return -EBUSY;
+ }
+
+ addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
+ SB800_PIIX4_FCH_PM_SIZE);
+ if (!addr) {
+ release_resource(res);
+ dev_err(dev, "SMBus base address mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ mmio_cfg->res = res;
+ mmio_cfg->addr = addr;
+
+ return 0;
+ }
+
+ if (!request_muxed_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE,
+ "sb800_piix4_smb")) {
+ dev_err(dev,
+ "SMBus base address index region 0x%x already in use.\n",
+ SB800_PIIX4_SMB_IDX);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void piix4_sb800_region_release(struct device *dev,
+ struct sb800_mmio_cfg *mmio_cfg)
+{
+ if (mmio_cfg->use_mmio) {
+ iounmap(mmio_cfg->addr);
+ release_resource(mmio_cfg->res);
+ return;
+ }
+
+ release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE);
+}
+
+static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev)
+{
+ /*
+ * cd6h/cd7h port I/O accesses can be disabled on AMD processors
+ * w/ SMBus PCI revision ID 0x51 or greater. MMIO is supported on
+ * the same processors and is the recommended access method.
+ */
+ return (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+ PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+ PIIX4_dev->revision >= 0x51);
+}
+
static int piix4_setup(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
@@ -263,12 +340,61 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
+static int piix4_setup_sb800_smba(struct pci_dev *PIIX4_dev,
+ u8 smb_en,
+ u8 aux,
+ u8 *smb_en_status,
+ unsigned short *piix4_smba)
+{
+ struct sb800_mmio_cfg mmio_cfg;
+ u8 smba_en_lo;
+ u8 smba_en_hi;
+ int retval;
+
+ mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
+ retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
+ if (retval)
+ return retval;
+
+ if (mmio_cfg.use_mmio) {
+ smba_en_lo = ioread8(mmio_cfg.addr);
+ smba_en_hi = ioread8(mmio_cfg.addr + 1);
+ } else {
+ outb_p(smb_en, SB800_PIIX4_SMB_IDX);
+ smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
+ smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ }
+
+ piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
+
+ if (!smb_en) {
+ *smb_en_status = smba_en_lo & 0x10;
+ *piix4_smba = smba_en_hi << 8;
+ if (aux)
+ *piix4_smba |= 0x20;
+ } else {
+ *smb_en_status = smba_en_lo & 0x01;
+ *piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+ }
+
+ if (!*smb_en_status) {
+ dev_err(&PIIX4_dev->dev,
+ "SMBus Host Controller not enabled!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id, u8 aux)
{
unsigned short piix4_smba;
- u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status, port_sel;
+ u8 smb_en, smb_en_status, port_sel;
u8 i2ccfg, i2ccfg_offset = 0x10;
+ struct sb800_mmio_cfg mmio_cfg;
+ int retval;
/* SB800 and later SMBus does not support forcing address */
if (force || force_addr) {
@@ -290,35 +416,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
else
smb_en = (aux) ? 0x28 : 0x2c;
- if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb")) {
- dev_err(&PIIX4_dev->dev,
- "SMB base address index region 0x%x already in use.\n",
- SB800_PIIX4_SMB_IDX);
- return -EBUSY;
- }
-
- outb_p(smb_en, SB800_PIIX4_SMB_IDX);
- smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
- outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
- smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ retval = piix4_setup_sb800_smba(PIIX4_dev, smb_en, aux, &smb_en_status,
+ &piix4_smba);
- release_region(SB800_PIIX4_SMB_IDX, 2);
-
- if (!smb_en) {
- smb_en_status = smba_en_lo & 0x10;
- piix4_smba = smba_en_hi << 8;
- if (aux)
- piix4_smba |= 0x20;
- } else {
- smb_en_status = smba_en_lo & 0x01;
- piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
- }
-
- if (!smb_en_status) {
- dev_err(&PIIX4_dev->dev,
- "SMBus Host Controller not enabled!\n");
- return -ENODEV;
- }
+ if (retval)
+ return retval;
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
@@ -371,10 +473,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
}
} else {
- if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
- "sb800_piix4_smb")) {
+ mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
+ retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
+ if (retval) {
release_region(piix4_smba, SMBIOSIZE);
- return -EBUSY;
+ return retval;
}
outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -384,7 +487,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
SB800_PIIX4_PORT_IDX;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
- release_region(SB800_PIIX4_SMB_IDX, 2);
+ piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
}
dev_info(&PIIX4_dev->dev,
@@ -662,6 +765,29 @@ static void piix4_imc_wakeup(void)
release_region(KERNCZ_IMC_IDX, 2);
}
+static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
+{
+ u8 smba_en_lo, val;
+
+ if (mmio_cfg->use_mmio) {
+ smba_en_lo = ioread8(mmio_cfg->addr + piix4_port_sel_sb800);
+ val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
+ if (smba_en_lo != val)
+ iowrite8(val, mmio_cfg->addr + piix4_port_sel_sb800);
+
+ return (smba_en_lo & piix4_port_mask_sb800);
+ }
+
+ outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
+ smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
+
+ val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
+ if (smba_en_lo != val)
+ outb_p(val, SB800_PIIX4_SMB_IDX + 1);
+
+ return (smba_en_lo & piix4_port_mask_sb800);
+}
+
/*
* Handles access to multiple SMBus ports on the SB800.
* The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -678,12 +804,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
unsigned short piix4_smba = adapdata->smba;
int retries = MAX_TIMEOUT;
int smbslvcnt;
- u8 smba_en_lo;
- u8 port;
+ u8 prev_port;
int retval;
- if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb"))
- return -EBUSY;
+ retval = piix4_sb800_region_request(&adap->dev, &adapdata->mmio_cfg);
+ if (retval)
+ return retval;
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
smbslvcnt = inb_p(SMBSLVCNT);
@@ -738,18 +864,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
}
}
- outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
- smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
-
- port = adapdata->port;
- if ((smba_en_lo & piix4_port_mask_sb800) != port)
- outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
- SB800_PIIX4_SMB_IDX + 1);
+ prev_port = piix4_sb800_port_sel(adapdata->port, &adapdata->mmio_cfg);
retval = piix4_access(adap, addr, flags, read_write,
command, size, data);
- outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
+ piix4_sb800_port_sel(prev_port, &adapdata->mmio_cfg);
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
@@ -758,7 +878,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
piix4_imc_wakeup();
release:
- release_region(SB800_PIIX4_SMB_IDX, 2);
+ piix4_sb800_region_release(&adap->dev, &adapdata->mmio_cfg);
return retval;
}
@@ -836,6 +956,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
return -ENOMEM;
}
+ adapdata->mmio_cfg.use_mmio = piix4_sb800_use_mmio(dev);
adapdata->smba = smba;
adapdata->sb800_main = sb800_main;
adapdata->port = port << piix4_port_shift_sb800;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index bff9913c37b8..2c016f0299fc 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1070,8 +1070,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = rcar_i2c_clock_calculate(priv);
- if (ret < 0)
- goto out_pm_put;
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto out_pm_disable;
+ }
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
@@ -1100,19 +1102,19 @@ static int rcar_i2c_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto out_pm_disable;
+ goto out_pm_put;
priv->irq = ret;
ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
- goto out_pm_disable;
+ goto out_pm_put;
}
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
if (ret < 0)
- goto out_pm_disable;
+ goto out_pm_put;
if (priv->flags & ID_P_HOST_NOTIFY) {
priv->host_notify_client = i2c_new_slave_host_notify_device(adap);
@@ -1129,7 +1131,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
out_del_device:
i2c_del_adapter(&priv->adap);
out_pm_put:
- pm_runtime_put(dev);
+ if (priv->flags & ID_P_PM_BLOCKED)
+ pm_runtime_put(dev);
out_pm_disable:
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 12c90aa0900e..a77cd86fe75e 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
i2c->adap.dev.parent = dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c->adap.dev.fwnode = dev->fwnode;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
i2c_set_adapdata(&i2c->adap, i2c);
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 18c154afbd7a..101f2da2811b 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -188,7 +188,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
.sign = 'u',
.realbits = 24,
.storagebits = 32,
- .shift = 8,
.endianness = IIO_BE,
},
};
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index 00098caf6d9e..cfe003cc4f0b 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -36,8 +36,8 @@
/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
-#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
-#define SC27XX_ADC_SCALE_SHIFT 8
+#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
+#define SC27XX_ADC_SCALE_SHIFT 9
/* Bits definitions for SC27XX_ADC_INT_EN registers */
#define SC27XX_ADC_IRQ_EN BIT(0)
@@ -103,14 +103,14 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
100, 341,
};
-static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
- 4200, 856,
- 3600, 733,
+static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
+ 4200, 850,
+ 3600, 728,
};
-static const struct sc27xx_adc_linear_graph small_scale_graph_calib = {
- 1000, 833,
- 100, 80,
+static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
+ 1000, 838,
+ 100, 84,
};
static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
@@ -130,11 +130,11 @@ static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
size_t len;
if (big_scale) {
- calib_graph = &big_scale_graph_calib;
+ calib_graph = &sc2731_big_scale_graph_calib;
graph = &big_scale_graph;
cell_name = "big_scale_calib";
} else {
- calib_graph = &small_scale_graph_calib;
+ calib_graph = &sc2731_small_scale_graph_calib;
graph = &small_scale_graph;
cell_name = "small_scale_calib";
}
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index fba659bfdb40..64305d9fa560 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -61,7 +61,7 @@ struct stmpe_adc {
static int stmpe_read_voltage(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
STMPE_ADC_CH(info->channel));
mutex_unlock(&info->lock);
@@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
static int stmpe_read_temp(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
mutex_unlock(&info->lock);
return -ETIMEDOUT;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 0bbb090b108c..aff981551617 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -71,16 +71,18 @@ st_sensors_match_odr_error:
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
{
- int err;
+ int err = 0;
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ mutex_lock(&sdata->odr_lock);
+
if (!sdata->sensor_settings->odr.mask)
- return 0;
+ goto unlock_mutex;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
- goto st_sensors_match_odr_error;
+ goto unlock_mutex;
if ((sdata->sensor_settings->odr.addr ==
sdata->sensor_settings->pw.addr) &&
@@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
if (err >= 0)
sdata->odr = odr_out.hz;
-st_sensors_match_odr_error:
+unlock_mutex:
+ mutex_unlock(&sdata->odr_lock);
+
return err;
}
EXPORT_SYMBOL(st_sensors_set_odr);
@@ -365,6 +369,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *of_pdata;
int err = 0;
+ mutex_init(&sdata->odr_lock);
+
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (IS_ERR(of_pdata))
@@ -558,18 +564,24 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
err = -EBUSY;
goto out;
} else {
+ mutex_lock(&sdata->odr_lock);
err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&sdata->odr_lock);
goto out;
+ }
msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
err = st_sensors_read_axis_data(indio_dev, ch, val);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&sdata->odr_lock);
goto out;
+ }
*val = *val >> ch->scan_type.shift;
err = st_sensors_set_enable(indio_dev, false);
+ mutex_unlock(&sdata->odr_lock);
}
out:
mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index c0b7ef900735..c24f609c2ade 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
*/
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
- if (!swd) {
- ret = -ENOMEM;
- goto error_kzalloc;
- }
+ if (!swd)
+ return ERR_PTR(-ENOMEM);
+
/*
* Allocate an IIO device.
*
@@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
indio_dev = iio_device_alloc(parent, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
- goto error_ret;
+ goto error_free_swd;
}
st = iio_priv(indio_dev);
@@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
* indio_dev->name = spi_get_device_id(spi)->name;
*/
indio_dev->name = kstrdup(name, GFP_KERNEL);
+ if (!indio_dev->name) {
+ ret = -ENOMEM;
+ goto error_free_device;
+ }
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
ret = iio_simple_dummy_events_register(indio_dev);
if (ret < 0)
- goto error_free_device;
+ goto error_free_name;
ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
@@ -649,11 +652,12 @@ error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
iio_simple_dummy_events_unregister(indio_dev);
+error_free_name:
+ kfree(indio_dev->name);
error_free_device:
iio_device_free(indio_dev);
-error_ret:
+error_free_swd:
kfree(swd);
-error_kzalloc:
return ERR_PTR(ret);
}
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index cf38144b6f95..13a87d3e3544 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
u16 tries = 20;
u8 buffer[12];
int ret;
+ unsigned long time_left;
ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
if (ret < 0)
@@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
if (data->client->irq) {
reinit_completion(&data->completion);
- ret = wait_for_completion_timeout(&data->completion, HZ/10);
- if (ret < 0)
- return ret;
- else if (ret == 0)
+ time_left = wait_for_completion_timeout(&data->completion, HZ/10);
+ if (time_left == 0)
return -ETIMEDOUT;
vl53l0x_clear_irq(data);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1783a6ea5427..3ebdd42fec36 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -265,6 +265,8 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
unsigned long dim = from->nr_segs;
int idx;
+ if (!HFI1_CAP_IS_KSET(SDMA))
+ return -EINVAL;
idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
if (!cq || !pq) {
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index ec2a45c5cf57..7facc04cc36c 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -488,7 +488,7 @@ void set_link_ipg(struct hfi1_pportdata *ppd)
u16 shift, mult;
u64 src;
u32 current_egress_rate; /* Mbits /sec */
- u32 max_pkt_time;
+ u64 max_pkt_time;
/*
* max_pkt_time is the maximum packet egress time in units
* of the fabric clock period 1/(805 MHz).
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index f07d328689d3..a95b654f5254 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1288,11 +1288,13 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
kvfree(sde->tx_ring);
sde->tx_ring = NULL;
}
- spin_lock_irq(&dd->sde_map_lock);
- sdma_map_free(rcu_access_pointer(dd->sdma_map));
- RCU_INIT_POINTER(dd->sdma_map, NULL);
- spin_unlock_irq(&dd->sde_map_lock);
- synchronize_rcu();
+ if (rcu_access_pointer(dd->sdma_map)) {
+ spin_lock_irq(&dd->sde_map_lock);
+ sdma_map_free(rcu_access_pointer(dd->sdma_map));
+ RCU_INIT_POINTER(dd->sdma_map, NULL);
+ spin_unlock_irq(&dd->sde_map_lock);
+ synchronize_rcu();
+ }
kfree(dd->per_sdma);
dd->per_sdma = NULL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9467c39e3d28..c94991356a2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -559,6 +559,11 @@ struct hns_roce_cmd_context {
u16 busy;
};
+enum hns_roce_cmdq_state {
+ HNS_ROCE_CMDQ_STATE_NORMAL,
+ HNS_ROCE_CMDQ_STATE_FATAL_ERR,
+};
+
struct hns_roce_cmdq {
struct dma_pool *pool;
struct semaphore poll_sem;
@@ -578,6 +583,7 @@ struct hns_roce_cmdq {
* close device, switch into poll mode(non event mode)
*/
u8 use_events;
+ enum hns_roce_cmdq_state state;
};
struct hns_roce_cmd_mailbox {
@@ -753,7 +759,6 @@ struct hns_roce_caps {
u32 num_pi_qps;
u32 reserved_qps;
int num_qpc_timer;
- int num_cqc_timer;
int num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 96fe73ba689c..bf01210c56c2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1273,6 +1273,16 @@ static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
return tail == priv->cmq.csq.head;
}
+static void update_cmdq_status(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+
+ if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
+ handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
+ hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
+}
+
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
@@ -1326,6 +1336,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
csq->head, tail);
csq->head = tail;
+ update_cmdq_status(hr_dev);
+
ret = -EAGAIN;
}
@@ -1340,6 +1352,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
bool busy;
int ret;
+ if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
+ return -EIO;
+
if (!v2_chk_mbox_is_avail(hr_dev, &busy))
return busy ? -EBUSY : 0;
@@ -1536,6 +1551,9 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
{
int i;
+ if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
+ return;
+
for (i = hr_dev->func_num - 1; i >= 0; i--) {
__hns_roce_function_clear(hr_dev, i);
if (i != 0)
@@ -1955,7 +1973,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
- caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
+ caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
@@ -2249,7 +2267,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
- caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
@@ -2818,6 +2835,9 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
mb_st = (struct hns_roce_mbox_status *)desc.data;
end = msecs_to_jiffies(timeout) + jiffies;
while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
+ return -EIO;
+
status = 0;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
true);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 35c61da7ba15..df4501e77fd1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -51,7 +51,7 @@
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
-#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 1f2209de8812..13c8195b5c3a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -663,7 +663,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
- hr_dev->caps.num_cqc_timer, 1);
+ hr_dev->caps.cqc_timer_bt_num, 1);
if (ret) {
dev_err(dev,
"Failed to init CQC timer memory, aborting.\n");
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 8ef112f883a7..3acab569fbb9 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2775,7 +2775,7 @@ void rvt_qp_iter(struct rvt_dev_info *rdi,
EXPORT_SYMBOL(rvt_qp_iter);
/*
- * This should be called with s_lock held.
+ * This should be called with s_lock and r_lock held.
*/
void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
@@ -3134,7 +3134,9 @@ send_comp:
rvp->n_loop_pkts++;
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ spin_lock(&sqp->r_lock);
rvt_send_complete(sqp, wqe, send_status);
+ spin_unlock(&sqp->r_lock);
if (local_ops) {
atomic_dec(&sqp->local_ops_pending);
local_ops = 0;
@@ -3188,7 +3190,9 @@ serr:
spin_unlock_irqrestore(&qp->r_lock, flags);
serr_no_r_lock:
spin_lock_irqsave(&sqp->s_lock, flags);
+ spin_lock(&sqp->r_lock);
rvt_send_complete(sqp, wqe, send_status);
+ spin_unlock(&sqp->r_lock);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index fc996fd31e58..7812e3d6a6c2 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -683,7 +683,7 @@ next_wqe:
opcode = next_opcode(qp, wqe, wqe->wr.opcode);
if (unlikely(opcode < 0)) {
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto exit;
+ goto err;
}
mask = rxe_opcode[opcode].mask;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ccaeb2426385..ba246fabc6c1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
+static const unsigned int input_max_code[EV_CNT] = {
+ [EV_KEY] = KEY_MAX,
+ [EV_REL] = REL_MAX,
+ [EV_ABS] = ABS_MAX,
+ [EV_MSC] = MSC_MAX,
+ [EV_SW] = SW_MAX,
+ [EV_LED] = LED_MAX,
+ [EV_SND] = SND_MAX,
+ [EV_FF] = FF_MAX,
+};
+
static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max)
{
@@ -2074,6 +2085,14 @@ EXPORT_SYMBOL(input_get_timestamp);
*/
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{
+ if (type < EV_CNT && input_max_code[type] &&
+ code > input_max_code[type]) {
+ pr_err("%s: invalid code %u for type %u\n", __func__, code,
+ type);
+ dump_stack();
+ return;
+ }
+
switch (type) {
case EV_KEY:
__set_bit(code, dev->keybit);
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 8dbf1e69c90a..22a91db645b8 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -131,7 +131,7 @@ static void gpio_keys_quiesce_key(void *data)
if (!bdata->gpiod)
hrtimer_cancel(&bdata->release_timer);
- if (bdata->debounce_use_hrtimer)
+ else if (bdata->debounce_use_hrtimer)
hrtimer_cancel(&bdata->debounce_timer);
else
cancel_delayed_work_sync(&bdata->work);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index cb6ec59a045d..efffcf0ebd3b 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
},
{
/*
- * Lenovo Yoga Tab2 1051L, something messes with the home-button
+ * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
* IRQ settings, leading to a non working home-button.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
},
},
{} /* Terminating entry */
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index fe43e5557ed7..cdcb7737c46a 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -205,6 +205,7 @@ static int bbc_beep_probe(struct platform_device *op)
info = &state->u.bbc;
info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0);
+ of_node_put(dp);
if (!info->clock_freq)
goto out_free;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 59a14505b9cd..ca150618d32f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_data)
goto err_free_bt_buffer;
- if (dev->bt_urb)
+ if (dev->bt_urb) {
usb_fill_int_urb(dev->bt_urb, udev,
usb_rcvintpipe(udev, cfg->bt_ep),
dev->bt_data, dev->cfg.bt_datalen,
bcm5974_irq_button, dev, 1);
+ dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ }
+
usb_fill_int_urb(dev->tp_urb, udev,
usb_rcvintpipe(udev, cfg->tp_ep),
dev->tp_data, dev->cfg.tp_datalen,
bcm5974_irq_trackpad, dev, 1);
+ dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
/* create bcm5974 device */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 30576a5f2f04..f437eefec94a 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -420,9 +420,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
if (error)
return error;
- usleep_range(50, 100);
+ usleep_range(12000, 15000);
gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(100);
+ msleep(160);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index bc11203c9cf7..c175d44c52f3 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -337,13 +337,15 @@ static int stmfts_input_open(struct input_dev *dev)
struct stmfts_data *sdata = input_get_drvdata(dev);
int err;
- err = pm_runtime_get_sync(&sdata->client->dev);
- if (err < 0)
+ err = pm_runtime_resume_and_get(&sdata->client->dev);
+ if (err)
return err;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
- if (err)
+ if (err) {
+ pm_runtime_put_sync(&sdata->client->dev);
return err;
+ }
mutex_lock(&sdata->mutex);
sdata->running = true;
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 3eb7936d2cf6..2c8e12549804 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -21,13 +21,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
{
size_t i;
struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
qn = node->data;
+ qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -45,10 +50,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
{
size_t i;
struct qcom_icc_node *qn;
- struct qcom_icc_provider *qp;
qn = node->data;
- qp = to_qcom_provider(node->provider);
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
@@ -68,9 +71,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- for (i = 0; i < qn->num_bcms; i++)
- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index 12d59c36df53..5f7c0f85fa8e 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -47,7 +47,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM
DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
@@ -129,7 +128,6 @@ DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
@@ -160,7 +158,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
@@ -372,22 +369,6 @@ static struct qcom_icc_desc sc7180_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node *ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static struct qcom_icc_desc sc7180_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
@@ -519,8 +500,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sc7180_dc_noc},
{ .compatible = "qcom,sc7180-gem-noc",
.data = &sc7180_gem_noc},
- { .compatible = "qcom,sc7180-ipa-virt",
- .data = &sc7180_ipa_virt},
{ .compatible = "qcom,sc7180-mc-virt",
.data = &sc7180_mc_virt},
{ .compatible = "qcom,sc7180-mmss-noc",
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 2a85f53802b5..745e3c36a61a 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -535,7 +535,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 8dfb5dea562a..aa707582ea01 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -551,7 +551,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index 3e26a2175b28..c79f93a1ac73 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -531,7 +531,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 204ed33c56dc..9a7742732d73 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -84,7 +84,7 @@
#define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000
-#define LOOP_TIMEOUT 100000
+#define LOOP_TIMEOUT 2000000
/*
* ACPI table definitions
*
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f46eb7397021..e23e70af718f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1816,17 +1816,10 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
-static void __init amd_iommu_init_dma_ops(void)
-{
- swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-}
-
int __init amd_iommu_init_api(void)
{
int err;
- amd_iommu_init_dma_ops();
-
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index f763c1430d15..e2e80eb2840c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
+#include <linux/sched/mm.h>
#include <linux/slab.h>
#include "arm-smmu-v3.h"
@@ -96,9 +97,14 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
+ /* Don't free the mm until we release the ASID */
+ mmgrab(mm);
+
asid = arm64_mm_context_get(mm);
- if (!asid)
- return ERR_PTR(-ESRCH);
+ if (!asid) {
+ err = -ESRCH;
+ goto out_drop_mm;
+ }
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
@@ -165,6 +171,8 @@ out_free_cd:
kfree(cd);
out_put_context:
arm64_mm_context_put(mm);
+out_drop_mm:
+ mmdrop(mm);
return err < 0 ? ERR_PTR(err) : ret;
}
@@ -173,6 +181,7 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
if (arm_smmu_free_asid(cd)) {
/* Unpin ASID */
arm64_mm_context_put(cd->mm);
+ mmdrop(cd->mm);
kfree(cd);
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 430315135cff..79edfdca6607 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3786,6 +3786,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
if (resource_size(res) < arm_smmu_resource_size(smmu)) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 4bc75c4ce402..324e8f32962a 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -2090,11 +2090,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = res->start;
- smmu->base = devm_ioremap_resource(dev, res);
+ smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
+ ioaddr = res->start;
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 276b6fcc80ad..48c6f7ff4aef 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -619,6 +619,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
dma_addr_t iova;
+ ssize_t ret;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -656,8 +657,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
- < size)
+ ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
+ if (ret < 0 || ret < size)
goto out_free_sg;
sgt->sgl->dma_address = iova;
@@ -1054,7 +1055,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* implementation - it knows better than we do.
*/
ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
- if (ret < iova_len)
+ if (ret < 0 || ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 91a5c75966f3..a1ffb3d6d901 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5728,7 +5728,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
- ver != 0x9a)
+ ver != 0x9a && ver != 0xa7)
return;
if (risky_device(dev))
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 3a38352b603f..c9eaf27cbb74 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -615,16 +615,19 @@ static void insert_iommu_master(struct device *dev,
static int qcom_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
- struct msm_iommu_dev *iommu;
+ struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
- list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
- if (iommu->dev->of_node == spec->np)
+ list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
+ if (iter->dev->of_node == spec->np) {
+ iommu = iter;
break;
+ }
+ }
- if (!iommu || iommu->dev->of_node != spec->np) {
+ if (!iommu) {
ret = -ENODEV;
goto fail;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5971a1168666..2ae46fa6b3de 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -451,7 +451,7 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct device *m4udev = data->dev;
int ret, domid;
@@ -461,20 +461,24 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
return domid;
if (!dom->data) {
- if (mtk_iommu_domain_finalise(dom, data, domid))
+ /* Data is in the frstdata in sharing pgtable case. */
+ frstdata = mtk_iommu_get_m4u_data();
+
+ if (mtk_iommu_domain_finalise(dom, frstdata, domid))
return -ENODEV;
dom->data = data;
}
+ mutex_lock(&data->mutex);
if (!data->m4u_dom) { /* Initialize the M4U HW */
ret = pm_runtime_resume_and_get(m4udev);
if (ret < 0)
- return ret;
+ goto err_unlock;
ret = mtk_iommu_hw_init(data);
if (ret) {
pm_runtime_put(m4udev);
- return ret;
+ goto err_unlock;
}
data->m4u_dom = dom;
writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
@@ -482,9 +486,14 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
pm_runtime_put(m4udev);
}
+ mutex_unlock(&data->mutex);
mtk_iommu_config(data, dev, true, domid);
return 0;
+
+err_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
}
static void mtk_iommu_detach_device(struct iommu_domain *domain,
@@ -577,6 +586,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
* All the ports in each a device should be in the same larbs.
*/
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ if (larbid >= MTK_LARB_NR_MAX)
+ return ERR_PTR(-EINVAL);
+
for (i = 1; i < fwspec->num_ids; i++) {
larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
if (larbid != larbidx) {
@@ -586,6 +598,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
}
}
larbdev = data->larb_imu[larbid].dev;
+ if (!larbdev)
+ return ERR_PTR(-EINVAL);
+
link = device_link_add(dev, larbdev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link)
@@ -624,6 +639,7 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
if (domid < 0)
return ERR_PTR(domid);
+ mutex_lock(&data->mutex);
group = data->m4u_group[domid];
if (!group) {
group = iommu_group_alloc();
@@ -632,6 +648,7 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
} else {
iommu_group_ref_get(group);
}
+ mutex_unlock(&data->mutex);
return group;
}
@@ -906,6 +923,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, data);
+ mutex_init(&data->mutex);
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
"mtk-iommu.%pa", &ioaddr);
@@ -951,10 +969,8 @@ static int mtk_iommu_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
+ list_del(&data->list);
- clk_disable_unprepare(data->bclk);
device_link_remove(data->smicomm_dev, &pdev->dev);
pm_runtime_disable(&pdev->dev);
devm_free_irq(&pdev->dev, data->irq, data);
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index f81fa8862ed0..f413546ac6e5 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -80,6 +80,8 @@ struct mtk_iommu_data {
struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
+ struct mutex mutex; /* Protect m4u_group/m4u_dom above */
+
struct list_head list;
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index bc7ee90b9373..254530ad6c48 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -80,6 +80,7 @@
/* MTK generation one iommu HW only support 4K size mapping */
#define MT2701_IOMMU_PAGE_SHIFT 12
#define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT)
+#define MT2701_LARB_NR_MAX 3
/*
* MTK m4u support 4GB iova address space, and only support 4K page
@@ -457,6 +458,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
/* Link the consumer device with the smi-larb device(supplier) */
larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ if (larbid >= MT2701_LARB_NR_MAX)
+ return ERR_PTR(-EINVAL);
+
for (idx = 1; idx < fwspec->num_ids; idx++) {
larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
if (larbid != larbidx) {
@@ -467,6 +471,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
}
larbdev = data->larb_imu[larbid].dev;
+ if (!larbdev)
+ return ERR_PTR(-EINVAL);
+
link = device_link_add(dev, larbdev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link)
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index c91ddb0cf144..01709c61e364 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -308,7 +308,16 @@ static inline int armada_370_xp_msi_init(struct device_node *node,
static void armada_xp_mpic_perf_init(void)
{
- unsigned long cpuid = cpu_logical_map(smp_processor_id());
+ unsigned long cpuid;
+
+ /*
+ * This Performance Counter Overflow interrupt is specific for
+ * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
+ */
+ if (!of_machine_is_compatible("marvell,armada-370-xp"))
+ return;
+
+ cpuid = cpu_logical_map(smp_processor_id());
/* Enable Performance Counter Overflow interrupts */
writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index a47db16ff960..9c9fc3e2967e 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -77,8 +77,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
}
i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
- if (i2c_ic->parent_irq < 0) {
- ret = i2c_ic->parent_irq;
+ if (!i2c_ic->parent_irq) {
+ ret = -EINVAL;
goto err_iounmap;
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 18b77c3e6db4..279e92cf0b16 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -157,8 +157,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
}
irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
- rc = irq;
+ if (!irq) {
+ rc = -EINVAL;
goto err;
}
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index b4c1924f0255..38fab02ffe9d 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent)
/* The PB11MPCore GIC needs to be configured in the syscon */
map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (!IS_ERR(map)) {
/* new irq mode with no DCC */
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index bebd3ea787e7..90a10f6dcf82 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1906,7 +1906,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
if (!gic_data.ppi_descs)
- return;
+ goto out_put_node;
nr_parts = of_get_child_count(parts_node);
@@ -1947,12 +1947,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
continue;
cpu = of_cpu_node_to_id(cpu_node);
- if (WARN_ON(cpu < 0))
+ if (WARN_ON(cpu < 0)) {
+ of_node_put(cpu_node);
continue;
+ }
pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask);
+ of_node_put(cpu_node);
}
pr_cont("}\n");
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index 50a56820c99b..56bf502d9c67 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -134,9 +134,9 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
if (!cpu_ictl)
return -EINVAL;
ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp);
+ of_node_put(cpu_ictl);
if (ret || tmp != 1)
return -EINVAL;
- of_node_put(cpu_ictl);
cpu_int = be32_to_cpup(imap + 2);
if (cpu_int > 7 || cpu_int < 2)
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index abd011fcecf4..c7db617e1a2f 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -37,11 +37,26 @@ struct exiu_irq_data {
u32 spi_base;
};
-static void exiu_irq_eoi(struct irq_data *d)
+static void exiu_irq_ack(struct irq_data *d)
{
struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
writel(BIT(d->hwirq), data->base + EIREQCLR);
+}
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ /*
+ * Level triggered interrupts are latched and must be cleared during
+ * EOI or the interrupt will be jammed on. Of course if a level
+ * triggered interrupt is still asserted then the write will not clear
+ * the interrupt.
+ */
+ if (irqd_is_level_type(d))
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+
irq_chip_eoi_parent(d);
}
@@ -91,10 +106,13 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
writel_relaxed(val, data->base + EILVL);
val = readl_relaxed(data->base + EIEDG);
- if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) {
val &= ~BIT(d->hwirq);
- else
+ irq_set_handler_locked(d, handle_fasteoi_irq);
+ } else {
val |= BIT(d->hwirq);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ }
writel_relaxed(val, data->base + EIEDG);
writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
@@ -104,6 +122,7 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
static struct irq_chip exiu_irq_chip = {
.name = "EXIU",
+ .irq_ack = exiu_irq_ack,
.irq_eoi = exiu_irq_eoi,
.irq_enable = exiu_irq_enable,
.irq_mask = exiu_irq_mask,
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 27933338f7b3..8c581c985aa7 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -151,14 +151,25 @@ static struct irq_chip xtensa_mx_irq_chip = {
.irq_set_affinity = xtensa_mx_irq_set_affinity,
};
+static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
+{
+ unsigned int i;
+
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+
+ /* Initialize default IRQ routing to CPU 0 */
+ for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
+ set_er(1, MIROUT(i));
+}
+
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
- irq_set_default_host(root_domain);
- secondary_init_irq();
+ xtensa_mx_init_common(root_domain);
return 0;
}
@@ -168,8 +179,7 @@ static int __init xtensa_mx_init(struct device_node *np,
struct irq_domain *root_domain =
irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
- irq_set_default_host(root_domain);
- secondary_init_irq();
+ xtensa_mx_init_common(root_domain);
return 0;
}
IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 5cdc361da37c..539a2ed4e13d 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -44,6 +44,7 @@ config ADB_IOP
config ADB_CUDA
bool "Support for Cuda/Egret based Macs and PowerMacs"
depends on (ADB || PPC_PMAC) && !PPC_PMAC64
+ select RTC_LIB
help
This provides support for Cuda/Egret based Macintosh and
Power Macintosh systems. This includes most m68k based Macs,
@@ -57,6 +58,7 @@ config ADB_CUDA
config ADB_PMU
bool "Support for PMU based PowerMacs and PowerBooks"
depends on PPC_PMAC || MAC
+ select RTC_LIB
help
On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the
PMU is an embedded microprocessor whose primary function is to
@@ -67,6 +69,10 @@ config ADB_PMU
this device; you should do so if your machine is one of those
mentioned above.
+config ADB_PMU_EVENT
+ def_bool y
+ depends on ADB_PMU && INPUT=y
+
config ADB_PMU_LED
bool "Support for the Power/iBook front LED"
depends on PPC_PMAC && ADB_PMU
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 49819b1b6f20..712edcb3e0b0 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -12,7 +12,8 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o
-obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
+obj-$(CONFIG_ADB_PMU) += via-pmu.o
+obj-$(CONFIG_ADB_PMU_EVENT) += via-pmu-event.o
obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o
obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
obj-$(CONFIG_ADB_CUDA) += via-cuda.o
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 4b98bc26a94b..2109129ea1bb 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1459,7 +1459,7 @@ next:
pmu_pass_intr(data, len);
/* len == 6 is probably a bad check. But how do I
* know what PMU versions send what events here? */
- if (len == 6) {
+ if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) {
via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
via_pmu_event(PMU_EVT_LID, data[1]&1);
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 3e7d4b20ab34..4229b9b5da98 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -82,11 +82,11 @@ static void msg_submit(struct mbox_chan *chan)
exit:
spin_unlock_irqrestore(&chan->lock, flags);
- /* kick start the timer immediately to avoid delays */
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
- /* but only if not already active */
- if (!hrtimer_active(&chan->mbox->poll_hrt))
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ /* kick start the timer immediately to avoid delays */
+ spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
}
}
@@ -120,20 +120,26 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
+ unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
- resched = true;
txdone = chan->mbox->ops->last_tx_done(chan);
if (txdone)
tx_tick(chan, 0);
+ else
+ resched = true;
}
}
if (resched) {
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
@@ -500,6 +506,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
mbox->poll_hrt.function = txdone_hrtimer;
+ spin_lock_init(&mbox->poll_hrt_lock);
}
for (i = 0; i < mbox->num_chans; i++) {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 8eecc9df319b..7b6f8bfef927 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
int i;
struct bkey *k = NULL;
struct btree_iter iter;
- struct btree_check_state *check_state;
- char name[32];
+ struct btree_check_state check_state;
/* check and mark root node keys */
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
@@ -2018,63 +2017,58 @@ int bch_btree_check(struct cache_set *c)
if (c->root->level == 0)
return 0;
- check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
- if (!check_state)
- return -ENOMEM;
-
- check_state->c = c;
- check_state->total_threads = bch_btree_chkthread_nr();
- check_state->key_idx = 0;
- spin_lock_init(&check_state->idx_lock);
- atomic_set(&check_state->started, 0);
- atomic_set(&check_state->enough, 0);
- init_waitqueue_head(&check_state->wait);
+ check_state.c = c;
+ check_state.total_threads = bch_btree_chkthread_nr();
+ check_state.key_idx = 0;
+ spin_lock_init(&check_state.idx_lock);
+ atomic_set(&check_state.started, 0);
+ atomic_set(&check_state.enough, 0);
+ init_waitqueue_head(&check_state.wait);
+ rw_lock(0, c->root, c->root->level);
/*
* Run multiple threads to check btree nodes in parallel,
- * if check_state->enough is non-zero, it means current
+ * if check_state.enough is non-zero, it means current
* running check threads are enough, unncessary to create
* more.
*/
- for (i = 0; i < check_state->total_threads; i++) {
- /* fetch latest check_state->enough earlier */
+ for (i = 0; i < check_state.total_threads; i++) {
+ /* fetch latest check_state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&check_state->enough))
+ if (atomic_read(&check_state.enough))
break;
- check_state->infos[i].result = 0;
- check_state->infos[i].state = check_state;
- snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
- atomic_inc(&check_state->started);
+ check_state.infos[i].result = 0;
+ check_state.infos[i].state = &check_state;
- check_state->infos[i].thread =
+ check_state.infos[i].thread =
kthread_run(bch_btree_check_thread,
- &check_state->infos[i],
- name);
- if (IS_ERR(check_state->infos[i].thread)) {
+ &check_state.infos[i],
+ "bch_btrchk[%d]", i);
+ if (IS_ERR(check_state.infos[i].thread)) {
pr_err("fails to run thread bch_btrchk[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(check_state->infos[i].thread);
+ kthread_stop(check_state.infos[i].thread);
ret = -ENOMEM;
goto out;
}
+ atomic_inc(&check_state.started);
}
/*
* Must wait for all threads to stop.
*/
- wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0);
+ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
- for (i = 0; i < check_state->total_threads; i++) {
- if (check_state->infos[i].result) {
- ret = check_state->infos[i].result;
+ for (i = 0; i < check_state.total_threads; i++) {
+ if (check_state.infos[i].result) {
+ ret = check_state.infos[i].result;
goto out;
}
}
out:
- kfree(check_state);
+ rw_unlock(0, c->root);
return ret;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 50482107134f..1b5fdbc0d83e 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -226,7 +226,7 @@ struct btree_check_info {
int result;
};
-#define BCH_BTR_CHKTHREAD_MAX 64
+#define BCH_BTR_CHKTHREAD_MAX 12
struct btree_check_state {
struct cache_set *c;
int total_threads;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 61bd79babf7a..346a92c43858 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -407,6 +407,11 @@ err:
return ret;
}
+void bch_journal_space_reserve(struct journal *j)
+{
+ j->do_reserve = true;
+}
+
/* Journalling */
static void btree_flush_write(struct cache_set *c)
@@ -625,12 +630,30 @@ static void do_journal_discard(struct cache *ca)
}
}
+static unsigned int free_journal_buckets(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+ struct cache *ca = c->cache;
+ struct journal_device *ja = &c->cache->journal;
+ unsigned int n;
+
+ /* In case njournal_buckets is not power of 2 */
+ if (ja->cur_idx >= ja->discard_idx)
+ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
+ else
+ n = ja->discard_idx - ja->cur_idx;
+
+ if (n > (1 + j->do_reserve))
+ return n - (1 + j->do_reserve);
+
+ return 0;
+}
+
static void journal_reclaim(struct cache_set *c)
{
struct bkey *k = &c->journal.key;
struct cache *ca = c->cache;
uint64_t last_seq;
- unsigned int next;
struct journal_device *ja = &ca->journal;
atomic_t p __maybe_unused;
@@ -653,12 +676,10 @@ static void journal_reclaim(struct cache_set *c)
if (c->journal.blocks_free)
goto out;
- next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
- /* No space available on this device */
- if (next == ja->discard_idx)
+ if (!free_journal_buckets(c))
goto out;
- ja->cur_idx = next;
+ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
k->ptr[0] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index f2ea34d5f431..cd316b4a1e95 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -105,6 +105,7 @@ struct journal {
spinlock_t lock;
spinlock_t flush_write_lock;
bool btree_flushing;
+ bool do_reserve;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list);
void bch_journal_free(struct cache_set *c);
int bch_journal_alloc(struct cache_set *c);
+void bch_journal_space_reserve(struct journal *j);
#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 6d1de889baeb..9f4a2850aa47 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1107,6 +1107,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
* which would call closure_get(&dc->disk.cl)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+ if (!ddip) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio->bi_end_io(bio);
+ return;
+ }
+
ddip->d = d;
/* Count on the bcache device */
ddip->orig_bdev = orig_bdev;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3f72c0f40613..af4fa8071cbc 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2131,6 +2131,7 @@ static int run_cache_set(struct cache_set *c)
flash_devs_run(c);
+ bch_journal_space_reserve(&c->journal);
set_bit(CACHE_SET_RUNNING, &c->flags);
return 0;
err:
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 1c03a3b72f8a..9699ce076b77 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -802,13 +802,11 @@ static int bch_writeback_thread(void *arg)
/* Init */
#define INIT_KEYS_EACH_TIME 500000
-#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init {
struct btree_op op;
unsigned int inode;
size_t count;
- struct bkey start;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -824,11 +822,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
KEY_START(k), KEY_SIZE(k));
op->count++;
- if (atomic_read(&b->c->search_inflight) &&
- !(op->count % INIT_KEYS_EACH_TIME)) {
- bkey_copy_key(&op->start, k);
- return -EAGAIN;
- }
+ if (!(op->count % INIT_KEYS_EACH_TIME))
+ cond_resched();
return MAP_CONTINUE;
}
@@ -843,24 +838,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
-
- do {
- ret = bcache_btree(map_keys_recurse,
- k,
- c->root,
- &op.op,
- &op.start,
- sectors_dirty_init_fn,
- 0);
- if (ret == -EAGAIN)
- schedule_timeout_interruptible(
- msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
- else if (ret < 0) {
- pr_warn("sectors dirty init failed, ret=%d!\n", ret);
- break;
- }
- } while (ret == -EAGAIN);
+
+ ret = bcache_btree(map_keys_recurse,
+ k,
+ c->root,
+ &op.op,
+ &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn,
+ 0);
+ if (ret < 0)
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
return ret;
}
@@ -904,7 +891,6 @@ static int bch_dirty_init_thread(void *arg)
goto out;
}
skip_nr--;
- cond_resched();
}
if (p) {
@@ -914,7 +900,6 @@ static int bch_dirty_init_thread(void *arg)
p = NULL;
prev_idx = cur_idx;
- cond_resched();
}
out:
@@ -945,67 +930,55 @@ void bch_sectors_dirty_init(struct bcache_device *d)
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
- struct bch_dirty_init_state *state;
- char name[32];
+ struct bch_dirty_init_state state;
/* Just count root keys if no leaf node */
+ rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid)
sectors_dirty_init_fn(&op.op, c->root, k);
- return;
- }
- state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
- if (!state) {
- pr_warn("sectors dirty init failed: cannot allocate memory\n");
+ rw_unlock(0, c->root);
return;
}
- state->c = c;
- state->d = d;
- state->total_threads = bch_btre_dirty_init_thread_nr();
- state->key_idx = 0;
- spin_lock_init(&state->idx_lock);
- atomic_set(&state->started, 0);
- atomic_set(&state->enough, 0);
- init_waitqueue_head(&state->wait);
-
- for (i = 0; i < state->total_threads; i++) {
- /* Fetch latest state->enough earlier */
+ state.c = c;
+ state.d = d;
+ state.total_threads = bch_btre_dirty_init_thread_nr();
+ state.key_idx = 0;
+ spin_lock_init(&state.idx_lock);
+ atomic_set(&state.started, 0);
+ atomic_set(&state.enough, 0);
+ init_waitqueue_head(&state.wait);
+
+ for (i = 0; i < state.total_threads; i++) {
+ /* Fetch latest state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&state->enough))
+ if (atomic_read(&state.enough))
break;
- state->infos[i].state = state;
- atomic_inc(&state->started);
- snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
-
- state->infos[i].thread =
- kthread_run(bch_dirty_init_thread,
- &state->infos[i],
- name);
- if (IS_ERR(state->infos[i].thread)) {
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(state->infos[i].thread);
+ kthread_stop(state.infos[i].thread);
goto out;
}
+ atomic_inc(&state.started);
}
- /*
- * Must wait for all threads to stop.
- */
- wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0);
-
out:
- kfree(state);
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+ rw_unlock(0, c->root);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 02b2f9df73f6..31df716951f6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -20,7 +20,7 @@
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
-#define BCH_DIRTY_INIT_THRD_MAX 64
+#define BCH_DIRTY_INIT_THRD_MAX 12
/*
* 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4be601821482..e39936d688d4 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -3430,6 +3430,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+static char hex2asc(unsigned char c)
+{
+ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
+}
+
static void crypt_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -3448,9 +3453,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
if (cc->key_size > 0) {
if (cc->key_string)
DMEMIT(":%u:%s", cc->key_size, cc->key_string);
- else
- for (i = 0; i < cc->key_size; i++)
- DMEMIT("%02x", cc->key[i]);
+ else {
+ for (i = 0; i < cc->key_size; i++) {
+ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
+ hex2asc(cc->key[i] & 0xf));
+ }
+ }
} else
DMEMIT("-");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e2a51c184a25..d5b827086962 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4478,8 +4478,6 @@ try_smaller_buffer:
}
if (should_write_sb) {
- int r;
-
init_journal(ic, 0, ic->journal_sections, 0);
r = dm_integrity_failed(ic);
if (unlikely(r)) {
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 1ecf75ef276a..ccdd65148498 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
/*
* Work out how many "unsigned long"s we need to hold the bitset.
*/
- bitset_size = dm_round_up(region_count,
- sizeof(*lc->clean_bits) << BYTE_SHIFT);
+ bitset_size = dm_round_up(region_count, BITS_PER_LONG);
bitset_size >>= BYTE_SHIFT;
lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 0e039a8c0bf2..a3f2050b9c9b 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -225,6 +225,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
atomic_read(&shared->in_flight[READ]),
atomic_read(&shared->in_flight[WRITE]));
}
+ cond_resched();
}
dm_stat_free(&s->rcu_head);
}
@@ -330,6 +331,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
for (ni = 0; ni < n_entries; ni++) {
atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+ cond_resched();
}
if (s->n_histogram_entries) {
@@ -342,6 +344,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
for (ni = 0; ni < n_entries; ni++) {
s->stat_shared[ni].tmp.histogram = hi;
hi += s->n_histogram_entries + 1;
+ cond_resched();
}
}
@@ -362,6 +365,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
for (ni = 0; ni < n_entries; ni++) {
p[ni].histogram = hi;
hi += s->n_histogram_entries + 1;
+ cond_resched();
}
}
}
@@ -497,6 +501,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
}
DMEMIT("\n");
}
+ cond_resched();
}
mutex_unlock(&stats->mutex);
@@ -774,6 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
local_irq_enable();
}
}
+ cond_resched();
}
}
@@ -889,6 +895,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
if (unlikely(sz + 1 >= maxlen))
goto buffer_overflow;
+
+ cond_resched();
}
if (clear)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 88288c8d6bc8..426299ceb33d 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1312,6 +1312,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
+ .features = DM_TARGET_IMMUTABLE,
.version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index e29c6298ef5c..8cc11b1987ec 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -639,14 +639,6 @@ re_read:
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- /* Setup nodes/clustername only if bitmap version is
- * cluster-compatible
- */
- if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
- nodes = le32_to_cpu(sb->nodes);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name,
- sb->cluster_name, 64);
- }
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -668,6 +660,16 @@ re_read:
goto out;
}
+ /*
+ * Setup nodes/clustername only if bitmap version is
+ * cluster-compatible
+ */
+ if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+ sb->cluster_name, 64);
+ }
+
/* keep the array size field of the bitmap superblock up to date */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
@@ -700,9 +702,9 @@ re_read:
out:
kunmap_atomic(sb);
- /* Assigning chunksize is required for "re_read" */
- bitmap->mddev->bitmap_info.chunksize = chunksize;
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
+ /* Assigning chunksize is required for "re_read" */
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_warn("%s: Could not setup cluster service (%d)\n",
@@ -713,18 +715,18 @@ out:
goto re_read;
}
-
out_no_sb:
- if (test_bit(BITMAP_STALE, &bitmap->flags))
- bitmap->events_cleared = bitmap->mddev->events;
- bitmap->mddev->bitmap_info.chunksize = chunksize;
- bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
- bitmap->mddev->bitmap_info.max_write_behind = write_behind;
- bitmap->mddev->bitmap_info.nodes = nodes;
- if (bitmap->mddev->bitmap_info.space == 0 ||
- bitmap->mddev->bitmap_info.space > sectors_reserved)
- bitmap->mddev->bitmap_info.space = sectors_reserved;
- if (err) {
+ if (err == 0) {
+ if (test_bit(BITMAP_STALE, &bitmap->flags))
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.nodes = nodes;
+ if (bitmap->mddev->bitmap_info.space == 0 ||
+ bitmap->mddev->bitmap_info.space > sectors_reserved)
+ bitmap->mddev->bitmap_info.space = sectors_reserved;
+ } else {
md_bitmap_print_sb(bitmap);
if (bitmap->cluster_slot < 0)
md_cluster_stop(bitmap->mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5ce2648cbe5b..bf1c5c0e472e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2628,14 +2628,16 @@ static void sync_sbs(struct mddev *mddev, int nospares)
static bool does_sb_need_changing(struct mddev *mddev)
{
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL, *iter;
struct mdp_superblock_1 *sb;
int role;
/* Find a good rdev */
- rdev_for_each(rdev, mddev)
- if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
+ rdev_for_each(iter, mddev)
+ if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
+ rdev = iter;
break;
+ }
/* No good device found. */
if (!rdev)
@@ -5586,8 +5588,6 @@ static void md_free(struct kobject *ko)
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
kfree(mddev);
}
@@ -6275,8 +6275,6 @@ void md_stop(struct mddev *mddev)
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -7944,17 +7942,22 @@ EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
- struct md_thread *thread = *threadp;
- if (!thread)
- return;
- pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
- /* Locking ensures that mddev_unlock does not wake_up a
+ struct md_thread *thread;
+
+ /*
+ * Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
+ thread = *threadp;
+ if (!thread) {
+ spin_unlock(&pers_lock);
+ return;
+ }
*threadp = NULL;
spin_unlock(&pers_lock);
+ pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
kfree(thread);
}
@@ -9772,16 +9775,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
void md_reload_sb(struct mddev *mddev, int nr)
{
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL, *iter;
int err;
/* Find the rdev */
- rdev_for_each_rcu(rdev, mddev) {
- if (rdev->desc_nr == nr)
+ rdev_for_each_rcu(iter, mddev) {
+ if (iter->desc_nr == nr) {
+ rdev = iter;
break;
+ }
}
- if (!rdev || rdev->desc_nr != nr) {
+ if (!rdev) {
pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
return;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b59a77b31b90..8495045eb989 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -128,21 +128,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
- if (conf->nr_strip_zones == 1) {
- conf->layout = RAID0_ORIG_LAYOUT;
- } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
- mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
- conf->layout = mddev->layout;
- } else if (default_layout == RAID0_ORIG_LAYOUT ||
- default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
- conf->layout = default_layout;
- } else {
- pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
- mdname(mddev));
- pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
- err = -ENOTSUPP;
- goto abort;
- }
/*
* now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size
@@ -273,6 +258,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
(unsigned long long)smallest->sectors);
}
+ if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
+ conf->layout = RAID0_ORIG_LAYOUT;
+ } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
+ mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = mddev->layout;
+ } else if (default_layout == RAID0_ORIG_LAYOUT ||
+ default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = default_layout;
+ } else {
+ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+ mdname(mddev));
+ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
+ err = -EOPNOTSUPP;
+ goto abort;
+ }
+
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -361,7 +362,6 @@ static void free_conf(struct mddev *mddev, struct r0conf *conf)
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
}
static void raid0_free(struct mddev *mddev, void *priv)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b9d062f0a02b..e54d802ee0bb 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *conf)
return degraded;
}
-static int has_failed(struct r5conf *conf)
+static bool has_failed(struct r5conf *conf)
{
- int degraded;
+ int degraded = conf->mddev->degraded;
- if (conf->mddev->reshape_position == MaxSector)
- return conf->mddev->degraded > conf->max_degraded;
+ if (test_bit(MD_BROKEN, &conf->mddev->flags))
+ return true;
- degraded = raid5_calc_degraded(conf);
- if (degraded > conf->max_degraded)
- return 1;
- return 0;
+ if (conf->mddev->reshape_position != MaxSector)
+ degraded = raid5_calc_degraded(conf);
+
+ return degraded > conf->max_degraded;
}
struct stripe_head *
@@ -2877,34 +2877,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
unsigned long flags;
pr_debug("raid456: error called\n");
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
+
spin_lock_irqsave(&conf->device_lock, flags);
+ set_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ mddev->degraded = raid5_calc_degraded(conf);
- if (test_bit(In_sync, &rdev->flags) &&
- mddev->degraded == conf->max_degraded) {
- /*
- * Don't allow to achieve failed state
- * Don't try to recover this device
- */
+ if (has_failed(conf)) {
+ set_bit(MD_BROKEN, &conf->mddev->flags);
conf->recovery_disabled = mddev->recovery_disabled;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- return;
+
+ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
+ mdname(mddev), mddev->degraded, conf->raid_disks);
+ } else {
+ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
- set_bit(Faulty, &rdev->flags);
- clear_bit(In_sync, &rdev->flags);
- mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
r5c_update_on_rdev_error(mddev, rdev);
}
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 1f599e300e42..67776a0d31e8 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1272,7 +1272,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
* While trying to poll the physical address was reset
* and the adapter was unconfigured, so bail out.
*/
- if (!adap->is_configuring)
+ if (adap->phys_addr == CEC_PHYS_ADDR_INVALID)
return -EINTR;
if (err)
@@ -1329,7 +1329,6 @@ static void cec_adap_unconfigure(struct cec_adapter *adap)
adap->phys_addr != CEC_PHYS_ADDR_INVALID)
WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
adap->log_addrs.log_addr_mask = 0;
- adap->is_configuring = false;
adap->is_configured = false;
cec_flush(adap);
wake_up_interruptible(&adap->kthread_waitq);
@@ -1521,9 +1520,10 @@ unconfigure:
for (i = 0; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
cec_adap_unconfigure(adap);
+ adap->is_configuring = false;
adap->kthread_config = NULL;
- mutex_unlock(&adap->lock);
complete(&adap->config_completion);
+ mutex_unlock(&adap->lock);
return 0;
}
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 5363f3bcafe3..67bb770ed63f 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -1603,8 +1603,11 @@ static int ccs_power_on(struct device *dev)
usleep_range(1000, 2000);
} while (--retry);
- if (!reset)
- return -EIO;
+ if (!reset) {
+ dev_err(dev, "software reset failed\n");
+ rval = -EIO;
+ goto out_cci_addr_fail;
+ }
}
if (sensor->hwcfg.i2c_addr_alt) {
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index be3f6ea55559..84279a680873 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -1011,7 +1011,7 @@ static int imx412_power_on(struct device *dev)
struct imx412 *imx412 = to_imx412(sd);
int ret;
- gpiod_set_value_cansleep(imx412->reset_gpio, 1);
+ gpiod_set_value_cansleep(imx412->reset_gpio, 0);
ret = clk_prepare_enable(imx412->inclk);
if (ret) {
@@ -1024,7 +1024,7 @@ static int imx412_power_on(struct device *dev)
return 0;
error_reset:
- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
+ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
return ret;
}
@@ -1040,10 +1040,10 @@ static int imx412_power_off(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx412 *imx412 = to_imx412(sd);
- gpiod_set_value_cansleep(imx412->reset_gpio, 0);
-
clk_disable_unprepare(imx412->inclk);
+ gpiod_set_value_cansleep(imx412->reset_gpio, 1);
+
return 0;
}
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 1aa2c58fd38c..ce943702ffe9 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -15,6 +15,7 @@
#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/module.h>
@@ -168,6 +169,8 @@ struct max9286_priv {
u32 init_rev_chan_mv;
u32 rev_chan_mv;
+ u32 gpio_poc[2];
+
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixelrate;
@@ -1026,20 +1029,27 @@ static int max9286_setup(struct max9286_priv *priv)
return 0;
}
-static void max9286_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int max9286_gpio_set(struct max9286_priv *priv, unsigned int offset,
+ int value)
{
- struct max9286_priv *priv = gpiochip_get_data(chip);
-
if (value)
priv->gpio_state |= BIT(offset);
else
priv->gpio_state &= ~BIT(offset);
- max9286_write(priv, 0x0f, MAX9286_0X0F_RESERVED | priv->gpio_state);
+ return max9286_write(priv, 0x0f,
+ MAX9286_0X0F_RESERVED | priv->gpio_state);
+}
+
+static void max9286_gpiochip_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct max9286_priv *priv = gpiochip_get_data(chip);
+
+ max9286_gpio_set(priv, offset, value);
}
-static int max9286_gpio_get(struct gpio_chip *chip, unsigned int offset)
+static int max9286_gpiochip_get(struct gpio_chip *chip, unsigned int offset)
{
struct max9286_priv *priv = gpiochip_get_data(chip);
@@ -1059,13 +1069,10 @@ static int max9286_register_gpio(struct max9286_priv *priv)
gpio->of_node = dev->of_node;
gpio->ngpio = 2;
gpio->base = -1;
- gpio->set = max9286_gpio_set;
- gpio->get = max9286_gpio_get;
+ gpio->set = max9286_gpiochip_set;
+ gpio->get = max9286_gpiochip_get;
gpio->can_sleep = true;
- /* GPIO values default to high */
- priv->gpio_state = BIT(0) | BIT(1);
-
ret = devm_gpiochip_add_data(dev, gpio, priv);
if (ret)
dev_err(dev, "Unable to create gpio_chip\n");
@@ -1073,26 +1080,83 @@ static int max9286_register_gpio(struct max9286_priv *priv)
return ret;
}
-static int max9286_init(struct device *dev)
+static int max9286_parse_gpios(struct max9286_priv *priv)
{
- struct max9286_priv *priv;
- struct i2c_client *client;
+ struct device *dev = &priv->client->dev;
int ret;
- client = to_i2c_client(dev);
- priv = i2c_get_clientdata(client);
+ /* GPIO values default to high */
+ priv->gpio_state = BIT(0) | BIT(1);
- /* Enable the bus power. */
- ret = regulator_enable(priv->regulator);
- if (ret < 0) {
- dev_err(&client->dev, "Unable to turn PoC on\n");
- return ret;
+ /*
+ * Parse the "gpio-poc" vendor property. If the property is not
+ * specified the camera power is controlled by a regulator.
+ */
+ ret = of_property_read_u32_array(dev->of_node, "maxim,gpio-poc",
+ priv->gpio_poc, 2);
+ if (ret == -EINVAL) {
+ /*
+ * If gpio lines are not used for the camera power, register
+ * a gpio controller for consumers.
+ */
+ ret = max9286_register_gpio(priv);
+ if (ret)
+ return ret;
+
+ priv->regulator = devm_regulator_get(dev, "poc");
+ if (IS_ERR(priv->regulator)) {
+ return dev_err_probe(dev, PTR_ERR(priv->regulator),
+ "Unable to get PoC regulator (%ld)\n",
+ PTR_ERR(priv->regulator));
+ }
+
+ return 0;
+ }
+
+ /* If the property is specified make sure it is well formed. */
+ if (ret || priv->gpio_poc[0] > 1 ||
+ (priv->gpio_poc[1] != GPIO_ACTIVE_HIGH &&
+ priv->gpio_poc[1] != GPIO_ACTIVE_LOW)) {
+ dev_err(dev, "Invalid 'gpio-poc' property\n");
+ return -EINVAL;
}
+ return 0;
+}
+
+static int max9286_poc_enable(struct max9286_priv *priv, bool enable)
+{
+ int ret;
+
+ /* If the regulator is not available, use gpio to control power. */
+ if (!priv->regulator)
+ ret = max9286_gpio_set(priv, priv->gpio_poc[0],
+ enable ^ priv->gpio_poc[1]);
+ else if (enable)
+ ret = regulator_enable(priv->regulator);
+ else
+ ret = regulator_disable(priv->regulator);
+
+ if (ret < 0)
+ dev_err(&priv->client->dev, "Unable to turn power %s\n",
+ enable ? "on" : "off");
+
+ return ret;
+}
+
+static int max9286_init(struct max9286_priv *priv)
+{
+ struct i2c_client *client = priv->client;
+ int ret;
+
+ ret = max9286_poc_enable(priv, true);
+ if (ret)
+ return ret;
+
ret = max9286_setup(priv);
if (ret) {
- dev_err(dev, "Unable to setup max9286\n");
- goto err_regulator;
+ dev_err(&client->dev, "Unable to setup max9286\n");
+ goto err_poc_disable;
}
/*
@@ -1101,13 +1165,13 @@ static int max9286_init(struct device *dev)
*/
ret = max9286_v4l2_register(priv);
if (ret) {
- dev_err(dev, "Failed to register with V4L2\n");
- goto err_regulator;
+ dev_err(&client->dev, "Failed to register with V4L2\n");
+ goto err_poc_disable;
}
ret = max9286_i2c_mux_init(priv);
if (ret) {
- dev_err(dev, "Unable to initialize I2C multiplexer\n");
+ dev_err(&client->dev, "Unable to initialize I2C multiplexer\n");
goto err_v4l2_register;
}
@@ -1118,8 +1182,8 @@ static int max9286_init(struct device *dev)
err_v4l2_register:
max9286_v4l2_unregister(priv);
-err_regulator:
- regulator_disable(priv->regulator);
+err_poc_disable:
+ max9286_poc_enable(priv, false);
return ret;
}
@@ -1262,7 +1326,6 @@ static int max9286_probe(struct i2c_client *client)
mutex_init(&priv->mutex);
priv->client = client;
- i2c_set_clientdata(client, priv);
priv->gpiod_pwdn = devm_gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_HIGH);
@@ -1290,25 +1353,15 @@ static int max9286_probe(struct i2c_client *client)
*/
max9286_configure_i2c(priv, false);
- ret = max9286_register_gpio(priv);
+ ret = max9286_parse_gpios(priv);
if (ret)
goto err_powerdown;
- priv->regulator = devm_regulator_get(&client->dev, "poc");
- if (IS_ERR(priv->regulator)) {
- if (PTR_ERR(priv->regulator) != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Unable to get PoC regulator (%ld)\n",
- PTR_ERR(priv->regulator));
- ret = PTR_ERR(priv->regulator);
- goto err_powerdown;
- }
-
ret = max9286_parse_dt(priv);
if (ret)
goto err_powerdown;
- ret = max9286_init(&client->dev);
+ ret = max9286_init(priv);
if (ret < 0)
goto err_cleanup_dt;
@@ -1324,13 +1377,13 @@ err_powerdown:
static int max9286_remove(struct i2c_client *client)
{
- struct max9286_priv *priv = i2c_get_clientdata(client);
+ struct max9286_priv *priv = sd_to_max9286(i2c_get_clientdata(client));
i2c_mux_del_adapters(priv->mux);
max9286_v4l2_unregister(priv);
- regulator_disable(priv->regulator);
+ max9286_poc_enable(priv, false);
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index ef8b52dc9401..bb3666fc5618 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -2498,9 +2498,9 @@ static int ov5648_probe(struct i2c_client *client)
/* DOVDD: digital I/O */
sensor->dovdd = devm_regulator_get(dev, "dovdd");
- if (IS_ERR(sensor->dvdd)) {
+ if (IS_ERR(sensor->dovdd)) {
dev_err(dev, "cannot get DOVDD (digital I/O) regulator\n");
- ret = PTR_ERR(sensor->dvdd);
+ ret = PTR_ERR(sensor->dovdd);
goto error_endpoint;
}
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 196746423116..1be2c0e5bdc1 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -2017,7 +2017,6 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&info->sd.entity);
- ov7670_power_off(sd);
return 0;
}
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 025a610de893..9c6f66cab564 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -611,7 +611,7 @@ static int rdacm20_probe(struct i2c_client *client)
goto error_free_ctrls;
dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->sd.entity.flags |= MEDIA_ENT_F_CAM_SENSOR;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
if (ret < 0)
goto error_free_ctrls;
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index 12ec5467ed1e..ef31cf5f23ca 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -583,7 +583,7 @@ static int rdacm21_probe(struct i2c_client *client)
goto error_free_ctrls;
dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->sd.entity.flags |= MEDIA_ENT_F_CAM_SENSOR;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
if (ret < 0)
goto error_free_ctrls;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index f8f2ff3b00c3..a07b18f2034e 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2165,7 +2165,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
err = dma_set_mask(&pci_dev->dev, 0xffffffff);
if (err) {
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
- goto fail_ctrl;
+ goto fail_dma_set_mask;
}
err = request_irq(pci_dev->irq, cx23885_irq,
@@ -2173,7 +2173,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
- goto fail_irq;
+ goto fail_dma_set_mask;
}
switch (dev->board) {
@@ -2195,7 +2195,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
return 0;
-fail_irq:
+fail_dma_set_mask:
cx23885_dev_unregister(dev);
fail_ctrl:
v4l2_ctrl_handler_free(hdl);
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 40c10ca94def..a4192e80e9a0 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1339,11 +1339,11 @@ static void cx25821_finidev(struct pci_dev *pci_dev)
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
cx25821_shutdown(dev);
- pci_disable_device(pci_dev);
/* unregister stuff */
if (pci_dev->irq)
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
cx25821_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 757a58829a51..9d9124308f6a 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1723,6 +1723,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
rc = aspeed_video_setup_video(video);
if (rc) {
+ aspeed_video_free_buf(video, &video->jpeg);
clk_unprepare(video->vclk);
clk_unprepare(video->eclk);
return rc;
@@ -1748,8 +1749,7 @@ static int aspeed_video_remove(struct platform_device *pdev)
v4l2_device_unregister(v4l2_dev);
- dma_free_coherent(video->dev, VE_JPEG_HEADER_SIZE, video->jpeg.virt,
- video->jpeg.dma);
+ aspeed_video_free_buf(video, &video->jpeg);
of_reserved_mem_device_release(dev);
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index e29a9193bac8..7421bc51709c 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -267,7 +267,7 @@ static void isc_sama5d2_config_rlp(struct isc_device *isc)
* Thus, if the YCYC mode is selected, replace it with the
* sama5d2-compliant mode which is YYCC .
*/
- if ((rlp_mode & ISC_RLP_CFG_MODE_YCYC) == ISC_RLP_CFG_MODE_YCYC) {
+ if ((rlp_mode & ISC_RLP_CFG_MODE_MASK) == ISC_RLP_CFG_MODE_YCYC) {
rlp_mode &= ~ISC_RLP_CFG_MODE_MASK;
rlp_mode |= ISC_RLP_CFG_MODE_YYCC;
}
@@ -537,7 +537,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(isc->ispck);
if (ret) {
dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto cleanup_subdev;
+ goto disable_pm;
}
/* ispck should be greater or equal to hclock */
@@ -555,6 +555,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
unprepare_clk:
clk_disable_unprepare(isc->ispck);
+disable_pm:
+ pm_runtime_disable(dev);
+
cleanup_subdev:
isc_subdev_cleanup(isc);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 4a553f42ff0a..b4b85a19f7d6 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1318,7 +1318,8 @@ static int coda_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *f)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
- int i;
+ struct coda_q_data *q_data;
+ const struct coda_codec *codec;
if (f->index)
return -EINVAL;
@@ -1327,12 +1328,19 @@ static int coda_enum_frameintervals(struct file *file, void *fh,
if (!ctx->vdoa && f->pixel_format == V4L2_PIX_FMT_YUYV)
return -EINVAL;
- for (i = 0; i < CODA_MAX_FORMATS; i++) {
- if (f->pixel_format == ctx->cvd->src_formats[i] ||
- f->pixel_format == ctx->cvd->dst_formats[i])
- break;
+ if (coda_format_normalize_yuv(f->pixel_format) == V4L2_PIX_FMT_YUV420) {
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ codec = coda_find_codec(ctx->dev, f->pixel_format,
+ q_data->fourcc);
+ } else {
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+ f->pixel_format);
}
- if (i == CODA_MAX_FORMATS)
+ if (!codec)
+ return -EINVAL;
+
+ if (f->width < MIN_W || f->width > codec->max_w ||
+ f->height < MIN_H || f->height > codec->max_h)
return -EINVAL;
f->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
@@ -2338,8 +2346,8 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
- V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
- V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 0x0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
if (ctx->dev->devtype->product == CODA_HX4 ||
ctx->dev->devtype->product == CODA_7541) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
@@ -2353,12 +2361,15 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
if (ctx->dev->devtype->product == CODA_960) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
- V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
- ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2)),
V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
}
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
@@ -2420,7 +2431,7 @@ static void coda_decode_ctrls(struct coda_ctx *ctx)
ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
- ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index e55e411038f4..e3072d69c49f 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -140,7 +140,7 @@ static int fimc_is_enable_clocks(struct fimc_is *is)
dev_err(&is->pdev->dev, "clock %s enable failed\n",
fimc_is_clocks[i]);
for (--i; i >= 0; i--)
- clk_disable(is->clocks[i]);
+ clk_disable_unprepare(is->clocks[i]);
return ret;
}
pr_debug("enabled clock: %s\n", fimc_is_clocks[i]);
@@ -830,7 +830,7 @@ static int fimc_is_probe(struct platform_device *pdev)
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto err_irq;
+ goto err_pm_disable;
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
@@ -864,6 +864,8 @@ err_pm:
pm_runtime_put_noidle(dev);
if (!pm_runtime_enabled(dev))
fimc_is_runtime_suspend(dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
err_irq:
free_irq(is->irq, is);
err_clk:
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
index edcb3a5e3cb9..2dd4ddbc748a 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
@@ -32,7 +32,7 @@ static inline int fimc_isp_video_device_register(struct fimc_isp *isp,
return 0;
}
-void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+static inline void fimc_isp_video_device_unregister(struct fimc_isp *isp,
enum v4l2_buf_type type)
{
}
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 0f2482367e06..9bc4becdf638 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -104,6 +104,9 @@ int hfi_core_deinit(struct venus_core *core, bool blocking)
mutex_lock(&core->lock);
}
+ if (!core->ops)
+ goto unlock;
+
ret = core->ops->core_deinit(core);
if (!ret)
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index d99ea8973b67..e3246344fb72 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -868,7 +868,7 @@ static int rga_probe(struct platform_device *pdev)
ret = pm_runtime_resume_and_get(rga->dev);
if (ret < 0)
- goto rel_vdev;
+ goto rel_m2m;
rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
@@ -884,7 +884,7 @@ static int rga_probe(struct platform_device *pdev)
DMA_ATTR_WRITE_COMBINE);
if (!rga->cmdbuf_virt) {
ret = -ENOMEM;
- goto rel_vdev;
+ goto rel_m2m;
}
rga->src_mmu_pages =
@@ -921,6 +921,8 @@ free_src_pages:
free_dma:
dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
+rel_m2m:
+ v4l2_m2m_release(rga->m2m_dev);
rel_vdev:
video_device_release(vfd);
unreg_v4l2_dev:
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index c887a31ebb54..420ad4d8df5d 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -1859,7 +1859,7 @@ static int delta_probe(struct platform_device *pdev)
if (ret) {
dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n",
DELTA_PREFIX);
- goto err;
+ goto err_pm_disable;
}
/* register all available decoders */
@@ -1873,7 +1873,7 @@ static int delta_probe(struct platform_device *pdev)
if (ret) {
dev_err(delta->dev, "%s failed to register V4L2 device\n",
DELTA_PREFIX);
- goto err;
+ goto err_pm_disable;
}
delta->work_queue = create_workqueue(DELTA_NAME);
@@ -1898,6 +1898,8 @@ err_work_queue:
destroy_workqueue(delta->work_queue);
err_v4l2:
v4l2_device_unregister(&delta->v4l2_dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
err:
return ret;
}
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 85587c1b6a37..75083cb234fe 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -291,11 +291,11 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
+ crop.left * fmtinfo->bpp[0] / 8;
if (format->num_planes > 1) {
+ unsigned int bpl = format->plane_fmt[1].bytesperline;
unsigned int offset;
- offset = crop.top * format->plane_fmt[1].bytesperline
- + crop.left / fmtinfo->hsub
- * fmtinfo->bpp[1] / 8;
+ offset = crop.top / fmtinfo->vsub * bpl
+ + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8;
mem.addr[1] += offset;
mem.addr[2] += offset;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 2ca4e86c7b9f..97355e3ebdfd 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -153,6 +153,24 @@ struct imon_context {
const struct imon_usb_dev_descr *dev_descr;
/* device description with key */
/* table for front panels */
+ /*
+ * Fields for deferring free_imon_context().
+ *
+ * Since reference to "struct imon_context" is stored into
+ * "struct file"->private_data, we need to remember
+ * how many file descriptors might access this "struct imon_context".
+ */
+ refcount_t users;
+ /*
+ * Use a flag for telling display_open()/vfd_write()/lcd_write() that
+ * imon_disconnect() was already called.
+ */
+ bool disconnected;
+ /*
+ * We need to wait for RCU grace period in order to allow
+ * display_open() to safely check ->disconnected and increment ->users.
+ */
+ struct rcu_head rcu;
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -160,18 +178,18 @@ struct imon_context {
/* vfd character device file operations */
static const struct file_operations vfd_fops = {
.owner = THIS_MODULE,
- .open = &display_open,
- .write = &vfd_write,
- .release = &display_close,
+ .open = display_open,
+ .write = vfd_write,
+ .release = display_close,
.llseek = noop_llseek,
};
/* lcd character device file operations */
static const struct file_operations lcd_fops = {
.owner = THIS_MODULE,
- .open = &display_open,
- .write = &lcd_write,
- .release = &display_close,
+ .open = display_open,
+ .write = lcd_write,
+ .release = display_close,
.llseek = noop_llseek,
};
@@ -439,9 +457,6 @@ static struct usb_driver imon_driver = {
.id_table = imon_usb_id_table,
};
-/* to prevent races between open() and disconnect(), probing, etc */
-static DEFINE_MUTEX(driver_lock);
-
/* Module bookkeeping bits */
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESC);
@@ -481,9 +496,11 @@ static void free_imon_context(struct imon_context *ictx)
struct device *dev = ictx->dev;
usb_free_urb(ictx->tx_urb);
+ WARN_ON(ictx->dev_present_intf0);
usb_free_urb(ictx->rx_urb_intf0);
+ WARN_ON(ictx->dev_present_intf1);
usb_free_urb(ictx->rx_urb_intf1);
- kfree(ictx);
+ kfree_rcu(ictx, rcu);
dev_dbg(dev, "%s: iMON context freed\n", __func__);
}
@@ -499,9 +516,6 @@ static int display_open(struct inode *inode, struct file *file)
int subminor;
int retval = 0;
- /* prevent races with disconnect */
- mutex_lock(&driver_lock);
-
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
@@ -509,13 +523,16 @@ static int display_open(struct inode *inode, struct file *file)
retval = -ENODEV;
goto exit;
}
- ictx = usb_get_intfdata(interface);
- if (!ictx) {
+ rcu_read_lock();
+ ictx = usb_get_intfdata(interface);
+ if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) {
+ rcu_read_unlock();
pr_err("no context found for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
+ rcu_read_unlock();
mutex_lock(&ictx->lock);
@@ -533,8 +550,10 @@ static int display_open(struct inode *inode, struct file *file)
mutex_unlock(&ictx->lock);
+ if (retval && refcount_dec_and_test(&ictx->users))
+ free_imon_context(ictx);
+
exit:
- mutex_unlock(&driver_lock);
return retval;
}
@@ -544,16 +563,9 @@ exit:
*/
static int display_close(struct inode *inode, struct file *file)
{
- struct imon_context *ictx = NULL;
+ struct imon_context *ictx = file->private_data;
int retval = 0;
- ictx = file->private_data;
-
- if (!ictx) {
- pr_err("no context for device\n");
- return -ENODEV;
- }
-
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
@@ -568,6 +580,8 @@ static int display_close(struct inode *inode, struct file *file)
}
mutex_unlock(&ictx->lock);
+ if (refcount_dec_and_test(&ictx->users))
+ free_imon_context(ictx);
return retval;
}
@@ -934,15 +948,12 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
int offset;
int seq;
int retval = 0;
- struct imon_context *ictx;
+ struct imon_context *ictx = file->private_data;
static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
- ictx = file->private_data;
- if (!ictx) {
- pr_err_ratelimited("no context for device\n");
+ if (ictx->disconnected)
return -ENODEV;
- }
mutex_lock(&ictx->lock);
@@ -1018,13 +1029,10 @@ static ssize_t lcd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos)
{
int retval = 0;
- struct imon_context *ictx;
+ struct imon_context *ictx = file->private_data;
- ictx = file->private_data;
- if (!ictx) {
- pr_err_ratelimited("no context for device\n");
+ if (ictx->disconnected)
return -ENODEV;
- }
mutex_lock(&ictx->lock);
@@ -2402,7 +2410,6 @@ static int imon_probe(struct usb_interface *interface,
int ifnum, sysfs_err;
int ret = 0;
struct imon_context *ictx = NULL;
- struct imon_context *first_if_ctx = NULL;
u16 vendor, product;
usbdev = usb_get_dev(interface_to_usbdev(interface));
@@ -2414,17 +2421,12 @@ static int imon_probe(struct usb_interface *interface,
dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
__func__, vendor, product, ifnum);
- /* prevent races probing devices w/multiple interfaces */
- mutex_lock(&driver_lock);
-
first_if = usb_ifnum_to_if(usbdev, 0);
if (!first_if) {
ret = -ENODEV;
goto fail;
}
- first_if_ctx = usb_get_intfdata(first_if);
-
if (ifnum == 0) {
ictx = imon_init_intf0(interface, id);
if (!ictx) {
@@ -2432,9 +2434,11 @@ static int imon_probe(struct usb_interface *interface,
ret = -ENODEV;
goto fail;
}
+ refcount_set(&ictx->users, 1);
} else {
/* this is the secondary interface on the device */
+ struct imon_context *first_if_ctx = usb_get_intfdata(first_if);
/* fail early if first intf failed to register */
if (!first_if_ctx) {
@@ -2448,14 +2452,13 @@ static int imon_probe(struct usb_interface *interface,
ret = -ENODEV;
goto fail;
}
+ refcount_inc(&ictx->users);
}
usb_set_intfdata(interface, ictx);
if (ifnum == 0) {
- mutex_lock(&ictx->lock);
-
if (product == 0xffdc && ictx->rf_device) {
sysfs_err = sysfs_create_group(&interface->dev.kobj,
&imon_rf_attr_group);
@@ -2466,21 +2469,17 @@ static int imon_probe(struct usb_interface *interface,
if (ictx->display_supported)
imon_init_display(ictx, interface);
-
- mutex_unlock(&ictx->lock);
}
dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
- mutex_unlock(&driver_lock);
usb_put_dev(usbdev);
return 0;
fail:
- mutex_unlock(&driver_lock);
usb_put_dev(usbdev);
dev_err(dev, "unable to register, err %d\n", ret);
@@ -2496,10 +2495,8 @@ static void imon_disconnect(struct usb_interface *interface)
struct device *dev;
int ifnum;
- /* prevent races with multi-interface device probing and display_open */
- mutex_lock(&driver_lock);
-
ictx = usb_get_intfdata(interface);
+ ictx->disconnected = true;
dev = ictx->dev;
ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
@@ -2540,11 +2537,9 @@ static void imon_disconnect(struct usb_interface *interface)
}
}
- if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1)
+ if (refcount_dec_and_test(&ictx->users))
free_imon_context(ictx);
- mutex_unlock(&driver_lock);
-
dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n",
__func__, ifnum);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 3915d551d59e..fccd1798445d 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2569,6 +2569,11 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
} while (0);
mutex_unlock(&pvr2_unit_mtx);
+ INIT_WORK(&hdw->workpoll, pvr2_hdw_worker_poll);
+
+ if (hdw->unit_number == -1)
+ goto fail;
+
cnt1 = 0;
cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2");
cnt1 += cnt2;
@@ -2580,8 +2585,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
hdw->name[cnt1] = 0;
- INIT_WORK(&hdw->workpoll,pvr2_hdw_worker_poll);
-
pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
hdw->unit_number,hdw->name);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index c9d208677bcd..63842eb223a1 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -864,29 +864,31 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh,
struct uvc_video_chain *chain = handle->chain;
const struct uvc_entity *selector = chain->selector;
struct uvc_entity *iterm = NULL;
+ struct uvc_entity *it;
u32 index = input->index;
- int pin = 0;
if (selector == NULL ||
(chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
if (index != 0)
return -EINVAL;
- list_for_each_entry(iterm, &chain->entities, chain) {
- if (UVC_ENTITY_IS_ITERM(iterm))
+ list_for_each_entry(it, &chain->entities, chain) {
+ if (UVC_ENTITY_IS_ITERM(it)) {
+ iterm = it;
break;
+ }
}
- pin = iterm->id;
} else if (index < selector->bNrInPins) {
- pin = selector->baSourceID[index];
- list_for_each_entry(iterm, &chain->entities, chain) {
- if (!UVC_ENTITY_IS_ITERM(iterm))
+ list_for_each_entry(it, &chain->entities, chain) {
+ if (!UVC_ENTITY_IS_ITERM(it))
continue;
- if (iterm->id == pin)
+ if (it->id == selector->baSourceID[index]) {
+ iterm = it;
break;
+ }
}
}
- if (iterm == NULL || iterm->id != pin)
+ if (iterm == NULL)
return -EINVAL;
memset(input, 0, sizeof(*input));
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 9c8318923ed0..4733e7898ffe 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1322,7 +1322,6 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
*/
static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
{
- int counters_size;
int ret, i;
dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
@@ -1332,8 +1331,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
return dmc->num_counters;
}
- counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters;
- dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL);
+ dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
+ sizeof(*dmc->counter), GFP_KERNEL);
if (!dmc->counter)
return -ENOMEM;
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index e5c8bc998eb4..965820481f1e 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -46,14 +46,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
}
clk_enable(davinci_vc->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- fifo_base = (dma_addr_t)res->start;
- davinci_vc->base = devm_ioremap_resource(&pdev->dev, res);
+ davinci_vc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(davinci_vc->base)) {
ret = PTR_ERR(davinci_vc->base);
goto fail;
}
+ fifo_base = (dma_addr_t)res->start;
davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev,
davinci_vc->base,
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index e92eeeb67a98..4cd5ecc72211 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -403,7 +403,7 @@ static int __init micro_probe(struct platform_device *pdev)
micro_reset_comm(micro);
irq = platform_get_irq(pdev, 0);
- if (!irq)
+ if (irq < 0)
return -EINVAL;
ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr,
IRQF_SHARED, "ipaq-micro",
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index d6cd5537126c..69f9b0336410 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev)
clk_disable_unprepare(ssc->clk);
ssc->irq = platform_get_irq(pdev, 0);
- if (!ssc->irq) {
+ if (ssc->irq < 0) {
dev_dbg(&pdev->dev, "could not get irq\n");
- return -ENXIO;
+ return ssc->irq;
}
mutex_lock(&user_lock);
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59eda55d92a3..1ef9b61077c4 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
return 0;
out_init_fail:
+ usb_set_intfdata(ucr->pusb_intf, NULL);
usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
ucr->iobuf_dma);
return ret;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 86d8fb8c0148..c7134d2cf69a 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1351,17 +1351,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
struct fastrpc_req_munmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_buf *buf, *b;
+ struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
spin_lock(&fl->lock);
- list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
- if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
+ if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
+ buf = iter;
break;
- buf = NULL;
+ }
}
spin_unlock(&fl->lock);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 4282b625200f..fac4a811b97b 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -248,6 +248,11 @@ void lkdtm_ARRAY_BOUNDS(void)
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
@@ -267,7 +272,10 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
pr_err("FAIL: survived array bounds overflow!\n");
- pr_expected_config(CONFIG_UBSAN_BOUNDS);
+ if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
void lkdtm_CORRUPT_LIST_ADD(void)
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index c212a253edde..ef9a24aabfc3 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -9,19 +9,19 @@
extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) \
pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
lkdtm_kernel_info); \
else \
pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
lkdtm_kernel_info); \
-}
+} while (0)
#ifndef MODULE
int lkdtm_check_bool_cmdline(const char *param);
#define pr_expected_config_param(kconfig, param) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
@@ -52,7 +52,7 @@ int lkdtm_check_bool_cmdline(const char *param);
break; \
} \
} \
-}
+} while (0)
#else
#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
#endif
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 9161ce7ed47a..3fead5efe523 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -30,12 +30,12 @@ static const unsigned char test_text[] = "This is a test.\n";
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
- return stack + 0;
+ return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
- unsigned char buf[32];
+ unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
@@ -43,7 +43,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
buf[i] = value & 0xff;
}
- return trick_compiler(buf);
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +71,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
bad_stack -= sizeof(unsigned long);
}
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index cebcca6d6d3e..cf2b8261da14 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1351,7 +1351,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CAP_SETUP) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
return 0;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 64ce3f830262..15e8e2b322b1 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -109,6 +109,8 @@
#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */
+#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a738253dbd05..5324b65d0d29 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -115,6 +115,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index e70525eedaae..d278f8ba2c76 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -558,7 +558,9 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
err_unregister:
ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
+ free_minor(info);
device_unregister(&info->dev);
+ return rc;
err_put:
ocxl_afu_put(afu);
free_minor(info);
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index be4016084979..61dbff5f0065 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -100,7 +100,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
pi->base = base;
pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
- /* initlize capability by RDPT */
+ /* initialize capability by RDPT */
pi->capability &= ioread8(base);
pi->events = pi->capability;
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index bb7aa6368538..b9e6400a574b 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -34,7 +34,9 @@ pvpanic_send_event(unsigned int event)
{
struct pvpanic_instance *pi_cur;
- spin_lock(&pvpanic_lock);
+ if (!spin_trylock(&pvpanic_lock))
+ return;
+
list_for_each_entry(pi_cur, &pvpanic_list, list) {
if (event & pi_cur->capability & pi_cur->events)
iowrite8(event, pi_cur->base);
@@ -56,9 +58,13 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
return NOTIFY_DONE;
}
+/*
+ * Call our notifier very early on panic, deferring the
+ * action taken to the hypervisor.
+ */
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
- .priority = 1, /* let this called before broken drm_fb_helper */
+ .priority = INT_MAX,
};
static void pvpanic_remove(void *param)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c4fcb1ad6d4d..a196116444a3 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -609,11 +609,11 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
/*
- * Ensure RPMB/R1B command has completed by polling CMD13
- * "Send Status".
+ * Ensure RPMB/R1B command has completed by polling CMD13 "Send Status". Here we
+ * allow to override the default timeout value if a custom timeout is specified.
*/
- err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, false,
- MMC_BUSY_IO);
+ err = mmc_poll_for_busy(card, idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS,
+ false, MMC_BUSY_IO);
}
return err;
@@ -1482,8 +1482,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
- else
- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
}
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 80a2c270d502..3c59dec08c3b 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -235,6 +235,26 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
return PTR_ERR(host->dma_rx);
}
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx) {
+ struct device *dev = host->dma_tx->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
+ if (host->dma_rx) {
+ struct device *dev = host->dma_rx->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
return 0;
}
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b4891bb26648..a3e62e212631 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -147,6 +147,9 @@ struct sdhci_am654_data {
int drv_strength;
int strb_sel;
u32 flags;
+ u32 quirks;
+
+#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
};
struct sdhci_am654_driver_data {
@@ -369,6 +372,21 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
}
}
+static void sdhci_am654_reset(struct sdhci_host *host, u8 mask)
+{
+ u8 ctrl;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_reset(host, mask);
+
+ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+}
+
static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -500,7 +518,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_am654_reset,
};
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
@@ -719,6 +737,9 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
device_property_read_u32(dev, "ti,clkbuf-sel",
&sdhci_am654->clkbuf_sel);
+ if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
+ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
+
sdhci_get_of_property(pdev);
return 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a761134fd3be..59334530dd46 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,6 +59,10 @@
#define CFI_SR_WBASB BIT(3)
#define CFI_SR_SLSB BIT(1)
+enum cfi_quirks {
+ CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
+};
+
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
#if !FORCE_WORD_WRITE
@@ -436,6 +440,15 @@ static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
mtd->name);
}
+static void fixup_quirks(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01)
+ cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
+}
+
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -474,6 +487,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@@ -802,21 +816,25 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
}
/*
- * Return true if the chip is ready.
+ * Return true if the chip is ready and has the correct value.
*
* Ready is one of: read mode, query mode, erase-suspend-read mode (in any
* non-suspended sector) and is indicated by no toggle bits toggling.
*
+ * Error are indicated by toggling bits or bits held with the wrong value,
+ * or with bits toggling.
+ *
* Note that anything more complicated than checking if no bits are toggling
* (including checking DQ5 for an error status) is tricky to get working
* correctly and is therefore not done (particularly with interleaved chips
* as each chip must be checked independently of the others).
*/
static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
- unsigned long addr)
+ unsigned long addr, map_word *expected)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word d, t;
+ int ret;
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB);
@@ -826,57 +844,32 @@ static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
- d = map_read(map, addr);
+ t = map_read(map, addr);
- return map_word_andequal(map, d, ready, ready);
+ return map_word_andequal(map, t, ready, ready);
}
d = map_read(map, addr);
t = map_read(map, addr);
- return map_word_equal(map, d, t);
+ ret = map_word_equal(map, d, t);
+
+ if (!ret || !expected)
+ return ret;
+
+ return map_word_equal(map, t, *expected);
}
-/*
- * Return true if the chip is ready and has the correct value.
- *
- * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
- * non-suspended sector) and it is indicated by no bits toggling.
- *
- * Error are indicated by toggling bits or bits held with the wrong value,
- * or with bits toggling.
- *
- * Note that anything more complicated than checking if no bits are toggling
- * (including checking DQ5 for an error status) is tricky to get working
- * correctly and is therefore not done (particularly with interleaved chips
- * as each chip must be checked independently of the others).
- *
- */
static int __xipram chip_good(struct map_info *map, struct flchip *chip,
- unsigned long addr, map_word expected)
+ unsigned long addr, map_word *expected)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word oldd, curd;
-
- if (cfi_use_status_reg(cfi)) {
- map_word ready = CMD(CFI_SR_DRB);
-
- /*
- * For chips that support status register, check device
- * ready bit
- */
- cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
- cfi->device_type, NULL);
- curd = map_read(map, addr);
-
- return map_word_andequal(map, curd, ready, ready);
- }
+ map_word *datum = expected;
- oldd = map_read(map, addr);
- curd = map_read(map, addr);
+ if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
+ datum = NULL;
- return map_word_equal(map, oldd, curd) &&
- map_word_equal(map, curd, expected);
+ return chip_ready(map, chip, addr, datum);
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
@@ -893,7 +886,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
case FL_STATUS:
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
@@ -932,7 +925,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
@@ -1463,7 +1456,7 @@ static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
/* wait for chip to become ready */
timeo = jiffies + msecs_to_jiffies(2);
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
@@ -1699,7 +1692,7 @@ static int __xipram do_write_oneword_once(struct map_info *map,
* "chip_good" to avoid the failure due to scheduling.
*/
if (time_after(jiffies, timeo) &&
- !chip_good(map, chip, adr, datum)) {
+ !chip_good(map, chip, adr, &datum)) {
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
xip_disable(map, chip, adr);
@@ -1707,7 +1700,7 @@ static int __xipram do_write_oneword_once(struct map_info *map,
break;
}
- if (chip_good(map, chip, adr, datum)) {
+ if (chip_good(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -1979,14 +1972,14 @@ static int __xipram do_write_buffer_wait(struct map_info *map,
* "chip_good" to avoid the failure due to scheduling.
*/
if (time_after(jiffies, timeo) &&
- !chip_good(map, chip, adr, datum)) {
+ !chip_good(map, chip, adr, &datum)) {
pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
__func__, adr);
ret = -EIO;
break;
}
- if (chip_good(map, chip, adr, datum)) {
+ if (chip_good(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -2195,7 +2188,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
* If the driver thinks the chip is idle, and no toggle bits
* are changing, then the chip is actually idle for sure.
*/
- if (chip->state == FL_READY && chip_ready(map, chip, adr))
+ if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
return 0;
/*
@@ -2212,7 +2205,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
/* wait for the chip to become ready */
for (i = 0; i < jiffies_to_usecs(timeo); i++) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
return 0;
udelay(1);
@@ -2276,13 +2269,13 @@ retry:
map_write(map, datum, adr);
for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
udelay(1);
}
- if (!chip_good(map, chip, adr, datum) ||
+ if (!chip_ready(map, chip, adr, &datum) ||
cfi_check_err_status(map, chip, adr)) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
@@ -2424,6 +2417,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
+ map_word datum = map_word_ff(map);
adr = cfi->addr_unlock1;
@@ -2478,7 +2472,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (chip_ready(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -2523,6 +2517,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
+ map_word datum = map_word_ff(map);
adr += chip->start;
@@ -2577,7 +2572,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (chip_ready(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -2771,7 +2766,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
*/
timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 03e3de3a5d79..1e94e7d10b8b 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -257,6 +257,10 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
return 0;
}
+ if (mtd_type_is_nand(mbd->mtd))
+ pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ mbd->tr->name, mbd->mtd->name);
+
/* OK, it's not open. Create cache info for it */
mtdblk->count = 1;
mutex_init(&mtdblk->cache_mutex);
@@ -322,10 +326,6 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (!(mtd->flags & MTD_WRITEABLE))
dev->mbd.readonly = 1;
- if (mtd_type_is_nand(mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
- tr->name, mtd->name);
-
if (add_mtd_blktrans_dev(&dev->mbd))
kfree(dev);
}
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 7eec60ea9056..0d72672f8b64 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2983,11 +2983,10 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
if (IS_ERR(cdns_ctrl->reg))
return PTR_ERR(cdns_ctrl->reg);
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
- cdns_ctrl->io.dma = res->start;
- cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
+ cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+ cdns_ctrl->io.dma = res->start;
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 20c085a30adc..de7e722d3826 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -74,22 +74,21 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret;
}
- denali->reg = ioremap(csr_base, csr_len);
+ denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
- denali->host = ioremap(mem_base, mem_len);
+ denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
- ret = -ENOMEM;
- goto out_unmap_reg;
+ return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
- goto out_unmap_host;
+ return ret;
nsels = denali->nbanks;
@@ -117,10 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
-out_unmap_host:
- iounmap(denali->host);
-out_unmap_reg:
- iounmap(denali->reg);
return ret;
}
@@ -129,8 +124,6 @@ static void denali_pci_remove(struct pci_dev *dev)
struct denali_controller *denali = pci_get_drvdata(dev);
denali_remove(denali);
- iounmap(denali->reg);
- iounmap(denali->host);
}
static struct pci_driver denali_pci_driver = {
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index 7c1c80dae826..e91b879b32bd 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -619,9 +619,9 @@ static int ebu_nand_probe(struct platform_device *pdev)
resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res);
- ebu_host->cs[cs].nand_pa = res->start;
if (IS_ERR(ebu_host->cs[cs].chipaddr))
return PTR_ERR(ebu_host->cs[cs].chipaddr);
+ ebu_host->cs[cs].nand_pa = res->start;
ebu_host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ebu_host->clk))
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 1dd1c5898093..da77ab20296e 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -39,6 +39,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants_f,
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
@@ -339,7 +347,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index cc08bd707378..90f39aabc1ff 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1007,6 +1007,15 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
if (ret)
return ret;
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr1 != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
if (nor->flags & SNOR_F_NO_READ_CR)
return 0;
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 28f55f9cf715..053ab52668e8 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -97,6 +97,33 @@ out:
return e;
}
+/*
+ * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * @ubi: UBI device description object
+ * @is_wl_pool: whether UBI is filling wear leveling pool
+ *
+ * This helper function checks whether there are enough free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
+ * there is at least one of free pebs is filled into fm_wl_pool.
+ * For wear leveling pool, UBI should also reserve free pebs for bad pebs
+ * handling, because there maybe no enough free pebs for user volumes after
+ * producing new bad pebs.
+ */
+static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+{
+ int fm_used = 0; // fastmap non anchor pebs.
+ int beb_rsvd_pebs;
+
+ if (!ubi->free.rb_node)
+ return false;
+
+ beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
+ if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+ return ubi->free_count - beb_rsvd_pebs > fm_used;
+}
+
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
}
- if (ubi->fm_next_anchor) {
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->free_count++;
- }
- /* All available PEBs are in ubi->free, now is the time to get
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!ubi->free.rb_node)
+ if (!has_enough_free_count(ubi, false))
break;
e = wl_get_wle(ubi);
@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ if (!has_enough_free_count(ubi, true))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
@@ -286,20 +308,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we have a next anchor? */
- if (!ubi->fm_next_anchor) {
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (!ubi->fm_next_anchor)
- /* Tell wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
}
- /* Do wear leveling to get a new anchor PEB or check the
- * existing next anchor candidate.
- */
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ ubi->fm_do_produce_anchor = 1;
+ /* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -381,11 +409,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL;
}
- if (ubi->fm_next_anchor) {
- return_unused_peb(ubi, ubi->fm_next_anchor);
- ubi->fm_next_anchor = NULL;
- }
-
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 6b5f1ffd961b..6e95c4b1473e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
- if (ubi->fm_next_anchor) {
- fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
-
- fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
- set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
- fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
-
- free_peb_count++;
- fm_pos += sizeof(*fec);
- ubi_assert(fm_pos <= ubi->fm_size);
- }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7c083ad58274..078112e23dfd 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -489,8 +489,7 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The new anchor PEB used during fastmap update
- * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
+ * @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -601,7 +600,6 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
- struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1bc7b3a05604..6ea95ade4ca6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -309,7 +309,6 @@ out_mapping:
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8455f1d47f3c..afcdacb9d0e9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -689,16 +689,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
- if (e1 && ubi->fm_next_anchor &&
- (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ if (e1 && ubi->fm_anchor &&
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
- /* fm_next_anchor is no longer considered a good anchor
- * candidate.
+ /*
+ * fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->fm_next_anchor = NULL;
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->fm_anchor = NULL;
ubi->free_count++;
}
@@ -1085,12 +1085,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
- /* Abort anchor production, if needed it will be
+ /*
+ * Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
- ubi->fm_next_anchor = e;
+ ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5b5be7a3b276..85ccd96df406 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -81,7 +81,6 @@ config WIREGUARD
select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_LIB_BLAKE2S
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index b56a54d6c5a9..8f184a852a0a 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0
+#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
-struct m_can_pci_config {
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
- unsigned int clock_freq;
-};
-
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
-static const struct can_bittiming_const m_can_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 128,
- .sjw_max = 128,
- .brp_min = 1,
- .brp_max = 512,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 16,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static const struct m_can_pci_config m_can_pci_ehl = {
- .bit_timing = &m_can_bittiming_const_ehl,
- .data_timing = &m_can_data_bittiming_const_ehl,
- .clock_freq = 200000000,
-};
-
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
- const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
- cfg = (const struct m_can_pci_config *)id->driver_data;
-
priv = cdev_to_priv(mcan_class);
priv->base = base;
@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->bit_timing = cfg->bit_timing;
- mcan_class->data_timing = cfg->data_timing;
- mcan_class->can.clock.freq = cfg->clock_freq;
+ mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
- { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 0f322dabaf65..281856eea2ef 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -433,7 +433,7 @@ struct mcp251xfd_hw_tef_obj {
/* The tx_obj_raw version is used in spi async, i.e. without
* regmap. We have to take care of endianness ourselves.
*/
-struct mcp251xfd_hw_tx_obj_raw {
+struct __packed mcp251xfd_hw_tx_obj_raw {
__le32 id;
__le32 flags;
u8 data[sizeof_field(struct canfd_frame, data)];
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index a579b9b791ed..262b783d1df8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 37a3dabdce31..6d1fcb08bba1 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -72,7 +72,6 @@ source "drivers/net/dsa/realtek/Kconfig"
config NET_DSA_SMSC_LAN9303
tristate
- depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_TAG_LAN9303
select REGMAP
help
@@ -82,6 +81,7 @@ config NET_DSA_SMSC_LAN9303
config NET_DSA_SMSC_LAN9303_I2C
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
depends on I2C
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_SMSC_LAN9303
select REGMAP_I2C
help
@@ -91,6 +91,7 @@ config NET_DSA_SMSC_LAN9303_I2C
config NET_DSA_SMSC_LAN9303_MDIO
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
+ depends on VLAN_8021Q || VLAN_8021Q=n
help
Enable access functions if the SMSC/Microchip LAN9303 is configured
for MDIO managed mode.
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 68d74ef25e1b..077304d93e15 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
- if (err)
+ if (err) {
+ of_node_put(gphy_fw_np);
goto remove_gphy;
+ }
i++;
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5c8f471309b0..15d0986de952 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2540,13 +2540,7 @@ static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
/* Port5 supports ethier RGMII or SGMII.
* Port6 supports SGMII only.
*/
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
+ if (port == 6) {
phylink_set(supported, 2500baseX_Full);
phylink_set(supported, 2500baseT_Full);
}
@@ -2914,8 +2908,6 @@ static void
mt7530_mac_port_validate(struct dsa_switch *ds, int port,
unsigned long *supported)
{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
}
static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
@@ -2952,8 +2944,10 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
}
/* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
+ if (state->interface != PHY_INTERFACE_MODE_MII) {
phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
priv->info->mac_port_validate(ds, port, mask);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 836aef05effc..2ca3cbba5764 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3532,6 +3532,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
*/
child = of_get_child_by_name(np, "mdio");
err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2b05ead515cd..6ae7a0ed9e0b 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -50,22 +50,17 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 ctrl, u16 status, u16 lpa,
+ u16 bmsr, u16 lpa, u16 status,
struct phylink_link_state *state)
{
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
- * still get here on link up. But we want to set an_complete
- * only if AN was enabled, thus we look at BMCR_ANENABLE.
- * (According to 802.3-2008 section 22.2.4.2.10, we should be
- * able to get this same value from BMSR_ANEGCAPABLE, but tests
- * show that these Marvell PHYs don't conform to this part of
- * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ * still get here on link up.
*/
- state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +186,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
return err;
}
@@ -212,7 +207,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -915,13 +910,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &ctrl);
+ MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -939,7 +934,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 804b37c76b1e..b51f5b9577e0 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 72f8751784c3..e9c6f1fa0b1a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -345,7 +345,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -363,12 +362,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -376,18 +380,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@ -445,7 +448,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
AQ_CFG_RX_FRAME_MAX);
@@ -454,7 +457,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 9f1b15077e7d..45c17c585d74 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 0ddfb5b5d53c..2e6c5f258a1f 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,3 +17,8 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_tg3.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0877b3d7f88c..ae541a9d1eee 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 9513cfb5ba58..0ce28bc955a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -323,7 +323,6 @@ static void bgmac_remove(struct bcma_device *core)
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
- kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c035cce857fe..405285b474d2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -35,6 +35,7 @@
#include <linux/tcp.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -1150,6 +1151,36 @@ static void macb_tx_error_task(struct work_struct *work)
spin_unlock_irqrestore(&bp->lock, flags);
}
+static bool ptp_one_step_sync(struct sk_buff *skb)
+{
+ struct ptp_header *hdr;
+ unsigned int ptp_class;
+ u8 msgtype;
+
+ /* No need to parse packet if PTP TS is not involved */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ goto not_oss;
+
+ /* Identify and return whether PTP one step sync is being processed */
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ goto not_oss;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ goto not_oss;
+
+ if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
+ goto not_oss;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ return true;
+
+not_oss:
+ return false;
+}
+
static void macb_tx_interrupt(struct macb_queue *queue)
{
unsigned int tail;
@@ -1194,8 +1225,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP) &&
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !ptp_one_step_sync(skb) &&
gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
@@ -1245,7 +1276,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1284,6 +1314,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -2025,7 +2056,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ !ptp_one_step_sync(skb))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -2123,7 +2155,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
return 0;
if (padlen <= 0) {
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index c2e1f163bb14..c52ec1cc8a08 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -469,8 +469,10 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_ONESTEP_SYNC:
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
return -ERANGE;
- fallthrough;
+ tx_bd_control = TSTAMP_ALL_FRAMES;
+ break;
case HWTSTAMP_TX_ON:
+ gem_ptp_set_one_step_sync(bp, 0);
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
default:
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fcedd733bacb..834a3f8c80da 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1398,8 +1398,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
- if (!dev)
+ if (!dev) {
+ pci_disable_device(pdev);
return -ENOMEM;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1778,6 +1780,7 @@ err_out_free_res:
err_out_free_netdev:
free_netdev (dev);
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index e1df2dc810a2..0b833572205f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1910,6 +1910,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 892f2f12c54c..15d10775a757 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3194,7 +3194,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3205,10 +3205,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
ret = hclge_get_sfp_info(hdev, mac);
- else
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
ret = hclge_get_sfp_speed(hdev, &speed);
+ }
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
@@ -3220,6 +3223,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
@@ -3302,6 +3307,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
ret = hclge_push_vf_link_status(vport);
if (ret) {
vport->vf_info.link_state = link_state_old;
@@ -10397,12 +10408,42 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false;
}
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ vport->port_base_vlan_cfg.tbl_sta = false;
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
@@ -10415,38 +10456,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- if (old_vlan_info->vlan_tag == 0)
- ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- true, 0);
- else
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to clear vport%u port base vlan %u, ret = %d.\n",
- vport->vport_id, old_vlan_info->vlan_tag, ret);
- return ret;
- }
-
- goto out;
- }
-
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 429652a8cde1..afc47c9b5ec4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_shaper_ir_para ir_para;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 1db7f40b4525..5df18cc3ee55 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -231,6 +231,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 06586173add7..998717f02136 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
{
struct hinic_hwif *hwif = attr->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
chain->hwif = hwif;
chain->chain_type = attr->chain_type;
@@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
sema_init(&chain->sem, 1);
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
if (!chain->cell_ctxt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 307a6d4af993..a627237f694b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
int err;
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
@@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
u16 max_wqe_size;
int err;
@@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 56b6b04e209b..ca76896d9f1c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
int i, err;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
@@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev)
if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
GFP_KERNEL);
if (!hwdev->msix_entries)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index d3fc05a07fdb..045c47786a04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq)
struct hinic_hwif *hwif = eq->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 init_val, addr, val;
- size_t addr_size;
int err, pg;
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
if (!eq->dma_addr)
return -ENOMEM;
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
if (!eq->virt_addr) {
err = -ENOMEM;
goto err_virt_addr_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index ebc77771f5da..4aa1f433ed24 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -643,6 +643,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = alloc_msg_buf(pf_to_mgmt);
if (err) {
dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
@@ -650,6 +651,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 0c1b0a91b1ae..4daf6bf291ec 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -193,20 +193,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
@@ -379,15 +379,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(*wq->shadow_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index ae707e305684..f8aa80ec201b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -144,13 +144,12 @@ static int create_txqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
if (nic_dev->txqs)
return -EINVAL;
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -241,13 +240,12 @@ static int create_rxqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
if (nic_dev->rxqs)
return -EINVAL;
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
if (!nic_dev->rxqs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5bdb0d374ef..a984a7a6dd2e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -862,7 +862,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
int err, irqname_len;
- size_t sges_size;
txq->netdev = netdev;
txq->sq = sq;
@@ -871,13 +870,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 513ba6974355..0e13ce9b4d00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2576,15 +2576,16 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, pf->state);
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ dev_warn(&pf->pdev->dev,
+ "Cannot start offline testing when PF is in reset state.\n");
+ goto skip_ol_tests;
+ }
+
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
- data[I40E_ETH_TEST_REG] = 1;
- data[I40E_ETH_TEST_EEPROM] = 1;
- data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LINK] = 1;
- eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -2631,9 +2632,17 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 0;
}
-skip_ol_tests:
-
netif_info(pf, drv, netdev, "testing finished\n");
+ return;
+
+skip_ol_tests:
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, pf->state);
+ netif_info(pf, drv, netdev, "testing failed\n");
}
static void i40e_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 29387f0814e9..9bc05d671ad5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8523,6 +8523,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+ if (!tc) {
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 10a83e5385c7..d3a4a33977ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1433,13 +1431,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
- if (ring_is_xdp(tx_ring)) {
- tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
- GFP_KERNEL);
- if (!tx_ring->xsk_descs)
- goto err;
- }
-
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1463,8 +1454,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index bfc2845c99d1..f6d91fa1562e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -390,7 +390,6 @@ struct i40e_ring {
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
- struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index babf8b7fa767..6c1e668f4ebf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2282,7 +2282,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ for (i = 0; i < vf->num_tc; i++)
num_qps_all += vf->ch[i].num_qps;
if (num_qps_all != qci->num_queue_pairs) {
aq_ret = I40E_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 3f27a8ebe2ec..54c91dc459dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -473,11 +473,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
- struct xdp_desc *descs = xdp_ring->xsk_descs;
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index f74610442bda..533a953f15ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC;
+ q_vector->tx.type = ICE_TX_CONTAINER;
+ q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF)
goto out;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 38c2d9a5574a..19f115402969 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3466,15 +3466,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-enum ice_container_type {
- ICE_RX_CONTAINER,
- ICE_TX_CONTAINER,
-};
-
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -3484,13 +3478,12 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise.
*/
static int
-ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
- struct ice_ring_container *rc)
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
{
if (!rc->ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting;
@@ -3501,7 +3494,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL;
}
@@ -3522,18 +3515,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else {
@@ -3585,7 +3578,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -3597,10 +3589,10 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise.
*/
static int
-ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
- const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
+ const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back;
u16 itr_setting;
@@ -3608,7 +3600,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (!rc->ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
@@ -3641,7 +3633,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
- c_type);
+ rc->type);
return -EINVAL;
}
@@ -3690,22 +3682,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 653996e8fd30..4417238b0e64 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2980,8 +2980,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- coalesce[i].itr_tx = q_vector->tx.itr_setting;
- coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].itr_tx = q_vector->tx.itr_settings;
+ coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq)
@@ -3037,21 +3037,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[i].itr_rx;
+ rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
}
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[i].itr_tx;
+ rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
}
@@ -3065,12 +3065,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) {
/* transmit */
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
/* receive */
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 27b5c75ce386..188abf36a5b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5656,9 +5656,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
- /* clear this now, and the first stats read will be used as baseline */
- vsi->stat_offsets_loaded = false;
-
+ /* Perform an initial read of the statistics registers now to
+ * set the baseline so counters are ready when interface is up
+ */
+ ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ef26ff351b57..9b50e9e6042a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -254,12 +254,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
* This function must be called periodically to ensure that the cached value
* is never more than 2 seconds old. It must also be called whenever the PHC
* time has been changed.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
*/
-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
{
u64 systime;
int i;
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
/* Read the current PHC time */
systime = ice_ptp_read_src_clk_reg(pf, NULL);
@@ -282,6 +289,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
}
}
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
}
/**
@@ -1418,17 +1428,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ int err;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
- ice_ptp_update_cached_phctime(pf);
+ err = ice_ptp_update_cached_phctime(pf);
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
- /* Run twice a second */
+ /* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
- msecs_to_jiffies(500));
+ msecs_to_jiffies(err ? 10 : 500));
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 7c2328529ff8..4adc3dff04ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -332,6 +332,11 @@ static inline bool ice_ring_is_xdp(struct ice_ring *ring)
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
@@ -340,9 +345,15 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored)
*/
- u16 itr_setting:13;
- u16 itr_reserved:2;
- u16 itr_mode:1;
+ union {
+ struct {
+ u16 itr_setting:13;
+ u16 itr_reserved:2;
+ u16 itr_mode:1;
+ };
+ u16 itr_settings;
+ };
+ enum ice_container_type type;
};
struct ice_coalesce_stored {
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2b1873061912..5581747947e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -378,7 +378,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
do {
*xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!xdp) {
+ if (!*xdp) {
ok = false;
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index bf8ef81f6c0e..b88303351484 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
- if (adapter->link_speed != SPEED_1000)
+ if (adapter->link_speed != SPEED_1000 ||
+ !hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 214a38de3f41..aaebdae8b5ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1157,9 +1157,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ disable = IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
- enable = 0;
+ enable = IXGBE_VMOLR_BAM;
break;
case IXGBEVF_XCAST_MODE_MULTI:
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1181,9 +1181,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
return -EPERM;
}
- disable = 0;
+ disable = IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 1f90a7403392..4895faa667b5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -206,7 +206,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
- return blkaddr;
+ return false;
/* Registers that can be accessed from PF/VF */
if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ee1fd472e925..8601ef26c260 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -820,6 +820,17 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
return true;
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@@ -1311,7 +1322,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
@@ -1725,7 +1739,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ else
+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
if (!ring->data[i])
return -ENOMEM;
}
@@ -1991,6 +2008,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fde521b1eeca..c3cffb32fb06 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2103,7 +2103,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
- return 0;
+ return ret;
}
i += ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index e8093c4e09d4..949f12ede3d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -544,12 +544,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
PCI_SLOT(dev->pdev->devfn));
}
-static int next_phys_dev(struct device *dev, const void *data)
+static int _next_phys_dev(struct mlx5_core_dev *mdev,
+ const struct mlx5_core_dev *curr)
{
- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
- struct mlx5_core_dev *mdev = madev->mdev;
- const struct mlx5_core_dev *curr = data;
-
if (!mlx5_core_is_pf(mdev))
return 0;
@@ -562,22 +559,69 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+static void *pci_get_other_drvdata(struct device *this, struct device *other)
{
- struct auxiliary_device *adev;
- struct mlx5_adev *madev;
+ if (this->driver != other->driver)
+ return NULL;
+
+ return pci_get_drvdata(to_pci_dev(other));
+}
+
+static int next_phys_dev(struct device *dev, const void *data)
+{
+ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+ mdev = pci_get_other_drvdata(this->device, dev);
+ if (!mdev)
+ return 0;
+
+ return _next_phys_dev(mdev, data);
+}
+
+static int next_phys_dev_lag(struct device *dev, const void *data)
+{
+ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+ mdev = pci_get_other_drvdata(this->device, dev);
+ if (!mdev)
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
+ !MLX5_CAP_GEN(mdev, lag_master) ||
+ MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS)
+ return 0;
+
+ return _next_phys_dev(mdev, data);
+}
+
+static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct device *next;
if (!mlx5_core_is_pf(dev))
return NULL;
- adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
- if (!adev)
+ next = bus_find_device(&pci_bus_type, NULL, dev, match);
+ if (!next)
return NULL;
- madev = container_of(adev, struct mlx5_adev, adev);
- put_device(&adev->dev);
- return madev->mdev;
+ put_device(next);
+ return pci_get_drvdata(to_pci_dev(next));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+{
+ lockdep_assert_held(&mlx5_intf_mutex);
+ return mlx5_get_next_dev(dev, &next_phys_dev);
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
+{
+ lockdep_assert_held(&mlx5_intf_mutex);
+ return mlx5_get_next_dev(dev, &next_phys_dev_lag);
}
void mlx5_dev_list_lock(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f9cf9fb31547..ea46152816f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
if (!tracer->owner)
return;
+ if (unlikely(!tracer->str_db.loaded))
+ goto arm;
+
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
}
+arm:
mlx5_fw_tracer_arm(dev);
}
@@ -1137,8 +1141,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
- if (likely(tracer->str_db.loaded))
- queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index a88a1a48229f..d634c034a419 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -12,6 +12,7 @@ struct mlx5e_post_act;
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
+ MLX5E_TC_MISS_LEVEL,
};
struct mlx5e_tc_table {
@@ -20,6 +21,7 @@ struct mlx5e_tc_table {
*/
struct mutex t_lock;
struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
struct mlx5_fs_chains *chains;
struct mlx5e_post_act *post_act;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index d4b7b4d73b08..94200f2dd92b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -650,7 +650,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh,
- u8 zone_restore_id, bool nat)
+ u8 zone_restore_id, bool nat_table, bool has_nat)
{
struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct flow_action_entry *meta;
@@ -665,11 +665,12 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
&attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
- if (nat) {
- err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
- &mod_acts);
- if (err)
- goto err_mapping;
+ if (nat_table) {
+ if (has_nat) {
+ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
+ if (err)
+ goto err_mapping;
+ }
ct_state |= MLX5_CT_STATE_NAT_BIT;
}
@@ -684,7 +685,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- if (nat) {
+ if (nat_table && has_nat) {
attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
mod_acts.num_actions,
mod_acts.actions);
@@ -752,7 +753,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh,
- zone_restore_id, nat);
+ zone_restore_id,
+ nat,
+ mlx5_tc_ct_entry_has_nat(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 01301bee420c..e00648094fc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3542,6 +3542,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ if (params->xdp_prog) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+ }
+
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
@@ -4174,6 +4181,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
unlock:
mutex_unlock(&priv->state_lock);
+
+ /* Need to fix some features. */
+ if (!err)
+ netdev_update_features(netdev);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 8125e2fc38fc..015eaef9667e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -5033,6 +5033,33 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
return tc_tbl_size;
}
+static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err = 0;
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_TC_MISS_LEVEL;
+ ft_attr.prio = 0;
+ ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+
+ *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(*ft)) {
+ err = PTR_ERR(*ft);
+ netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
+ }
+
+ return err;
+}
+
+static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
@@ -5065,19 +5092,23 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->mapping = chains_mapping;
+ err = mlx5e_tc_nic_create_miss_table(priv);
+ if (err)
+ goto err_chains;
+
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ attr.default_ft = priv->fs.tc.miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
err = PTR_ERR(tc->chains);
- goto err_chains;
+ goto err_miss;
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
@@ -5100,6 +5131,8 @@ err_reg:
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
mlx5_chains_destroy(tc->chains);
+err_miss:
+ mlx5e_tc_nic_destroy_miss_table(priv);
err_chains:
mapping_destroy(chains_mapping);
err_mapping:
@@ -5140,6 +5173,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_tc_post_act_destroy(tc->post_act);
mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
+ mlx5e_tc_nic_destroy_miss_table(priv);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 00834c914dc6..cb3f9de3d00b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -113,7 +113,7 @@
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
#define KERNEL_NIC_TC_NUM_PRIOS 1
-#define KERNEL_NIC_TC_NUM_LEVELS 2
+#define KERNEL_NIC_TC_NUM_LEVELS 3
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
@@ -1527,9 +1527,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
return NULL;
}
-static bool check_conflicting_actions(u32 action1, u32 action2)
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
+ const struct mlx5_fs_vlan *vlan1)
{
- u32 xored_actions = action1 ^ action2;
+ return vlan0->ethtype != vlan1->ethtype ||
+ vlan0->vid != vlan1->vid ||
+ vlan0->prio != vlan1->prio;
+}
+
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
+ const struct mlx5_flow_act *act2)
+{
+ u32 action1 = act1->action;
+ u32 action2 = act2->action;
+ u32 xored_actions;
+
+ xored_actions = action1 ^ action2;
/* if one rule only wants to count, it's ok */
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
@@ -1546,6 +1559,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ act1->pkt_reformat != act2->pkt_reformat)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ act1->modify_hdr != act2->modify_hdr)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
+ return true;
+
return false;
}
@@ -1553,7 +1582,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
+ if (check_conflicting_actions(flow_act, &fte->action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
@@ -2031,16 +2060,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->dests_size) {
- if (fte->modify_mask)
- modify_fte(fte);
- up_write_ref_node(&fte->node, false);
- } else if (list_empty(&fte->node.children)) {
+ if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
} else {
up_write_ref_node(&fte->node, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index c19d9327095b..0fbb239559f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -435,7 +435,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- struct lag_tracker tracker;
+ struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
@@ -752,12 +752,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
- return 0;
-
- tmp_dev = mlx5_get_next_phys_dev(dev);
+ tmp_dev = mlx5_get_next_phys_dev_lag(dev);
if (tmp_dev)
ldev = tmp_dev->priv.lag;
@@ -802,6 +797,11 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
+ return;
+
recheck:
mlx5_dev_list_lock();
err = __mlx5_lag_dev_add_mdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 230eab7e3bc9..3f3ea8d268ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -186,6 +186,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index a5b9f65db23c..897c7f852123 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -846,7 +846,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
@@ -914,7 +915,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req,
&action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (ret)
goto free_action;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 0d6f86eb248b..c74083de1801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val;
fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level;
+ fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3d4e035698dd..bc206836af6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1394,7 +1394,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 7e58f4e594b7..0553ee1fe80a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -43,11 +43,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
if (err && action) {
err = mlx5dr_action_destroy(action);
- if (err) {
- action = NULL;
- mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
- err);
- }
+ if (err)
+ mlx5_core_err(ns->dev,
+ "Failed to destroy action (%d)\n", err);
+ action = NULL;
}
ft->fs_dr_table.miss_action = action;
if (old_miss_action) {
@@ -492,11 +491,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->flow_context.flow_source;
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 5ef199543479..7806e5c05b67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -96,7 +96,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index a68d931090dd..15c8d4de8350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -8,8 +8,8 @@
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
- MLXSW_SP_COUNTER_SUB_POOL_FLOW,
MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
};
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 5f92b1691360..aff6d4f35cd2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -168,8 +168,6 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
struct dcb_app *app)
{
- int prio;
-
if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
netdev_err(dev, "APP entry with priority value %u is invalid\n",
app->priority);
@@ -183,17 +181,6 @@ static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
app->protocol);
return -EINVAL;
}
-
- /* Warn about any DSCP APP entries with the same PID. */
- prio = fls(dcb_ieee_getapp_mask(dev, app));
- if (prio--) {
- if (prio < app->priority)
- netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
- app->priority, app->protocol, prio);
- else if (prio > app->priority)
- netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
- app->priority, app->protocol, prio);
- }
break;
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 26d01adbedad..ce6f6590a777 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -864,7 +864,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
.listeners_arr = {
MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
- false, SP_LLDP, DISCARD),
+ true, SP_LLDP, DISCARD),
},
},
{
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 030ae89f3a33..18dc64d7f412 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -980,8 +980,10 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
break;
case CQE_RX_TRUNCATED:
- netdev_err(ndev, "Dropped a truncated packet\n");
- return;
+ ++ndev->stats.rx_dropped;
+ rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
+ netdev_warn_once(ndev, "Dropped a truncated packet\n");
+ goto drop;
case CQE_RX_COALESCED_4:
netdev_err(ndev, "RX coalescing is unsupported\n");
@@ -1043,6 +1045,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
mana_rx_skb(old_buf, oob, rxq);
+drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
mana_post_pkt_rxq(rxq);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index bfd7d1c35076..7e9fcc16286e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -442,6 +442,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv6);
}
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ map[FLOW_PAY_QINQ] = key_size;
+ key_size += sizeof(struct nfp_flower_vlan);
+ }
+
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
map[FLOW_PAY_GRE] = key_size;
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
@@ -450,11 +455,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
}
- if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
- map[FLOW_PAY_QINQ] = key_size;
- key_size += sizeof(struct nfp_flower_vlan);
- }
-
if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
(in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
map[FLOW_PAY_UDP_TUN] = key_size;
@@ -693,6 +693,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+ offset = key_map[FLOW_PAY_QINQ];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+ (struct nfp_flower_vlan *)msk,
+ rules[i]);
+ }
+ }
+
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
offset = key_map[FLOW_PAY_GRE];
key = kdata + offset;
@@ -733,17 +744,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
- offset = key_map[FLOW_PAY_QINQ];
- key = kdata + offset;
- msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
- (struct nfp_flower_vlan *)msk,
- rules[i]);
- }
- }
-
if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
offset = key_map[FLOW_PAY_UDP_TUN];
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9d86eea4dc16..fb8bd2135c63 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -602,6 +602,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match;
@@ -637,14 +645,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
- (struct nfp_flower_vlan *)msk,
- rule);
- ext += sizeof(struct nfp_flower_vlan);
- msk += sizeof(struct nfp_flower_vlan);
- }
-
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index be1a358baadb..8b614b0201e7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -286,8 +286,6 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
@@ -295,6 +293,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
if (eth_port) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
nfp_net_set_fec_link_mode(eth_port, cmd);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 4eb9ea280474..40d14d80f6f1 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3612,7 +3612,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers;
unsigned long hw_flags;
- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
+ test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index f5a4d8f4fd11..c1cd1c97f09d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2256,7 +2256,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index d5f2ccd3bca4..b1657e03a74f 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -308,6 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
@@ -328,6 +329,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 1;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -956,10 +958,6 @@ int efx_set_channels(struct efx_nic *efx)
struct efx_channel *channel;
int rc;
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
if (efx->xdp_tx_queue_count) {
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f6981810039d..bf097264d8fb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1533,7 +1533,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return true;
+ return channel && channel->channel >= channel->efx->tx_channel_offset;
}
static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 6f87e296a410..502fbbc082fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1073,13 +1073,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
- goto err_dvr_probe;
+ goto err_alloc_irq;
}
return 0;
-err_dvr_probe:
- pci_free_irq_vectors(pdev);
err_alloc_irq:
clk_disable_unprepare(plat->stmmac_clk);
clk_unregister_fixed_rate(plat->stmmac_clk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fcf17d8a0494..644bb54f5f02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i));
break;
}
-
- pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 0462dcc93e53..dd5c4ef92ef3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1084,8 +1084,9 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
struct tc_cls_u32_offload cls_u32 = { };
struct stmmac_packet_attrs attr = { };
- struct tc_action **actions, *act;
+ struct tc_action **actions;
struct tc_u32_sel *sel;
+ struct tcf_gact *gact;
struct tcf_exts *exts;
int ret, i, nk = 1;
@@ -1104,14 +1105,14 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
goto cleanup_sel;
}
- actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
if (!actions) {
ret = -ENOMEM;
goto cleanup_exts;
}
- act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
- if (!act) {
+ gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL);
+ if (!gact) {
ret = -ENOMEM;
goto cleanup_actions;
}
@@ -1126,9 +1127,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
exts->nr_actions = nk;
exts->actions = actions;
for (i = 0; i < nk; i++) {
- struct tcf_gact *gact = to_gact(&act[i]);
-
- actions[i] = &act[i];
+ actions[i] = (struct tc_action *)&gact[i];
gact->tcf_action = TC_ACT_SHOT;
}
@@ -1152,7 +1151,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
cleanup_act:
- kfree(act);
+ kfree(gact);
cleanup_actions:
kfree(actions);
cleanup_exts:
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 130346f74ee8..ea9d073e87fa 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1802,6 +1802,7 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
if (IS_ERR(cpts)) {
int ret = PTR_ERR(cpts);
+ of_node_put(node);
if (ret == -EOPNOTSUPP) {
dev_info(dev, "cpts disabled\n");
return 0;
@@ -2668,9 +2669,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (!node)
return -ENOENT;
common->port_num = of_get_child_count(node);
+ of_node_put(node);
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
return -ENOENT;
- of_node_put(node);
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 39234852e01b..20f6aa508003 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -272,7 +272,7 @@ static int ptp_ixp_probe(struct platform_device *pdev)
ixp_clock.master_irq = platform_get_irq(pdev, 0);
ixp_clock.slave_irq = platform_get_irq(pdev, 1);
if (IS_ERR(ixp_clock.regs) ||
- !ixp_clock.master_irq || !ixp_clock.slave_irq)
+ ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0)
return -ENXIO;
ixp_clock.caps = ptp_ixp_caps;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bdfcf75f0827..ae4577731e3e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2665,7 +2665,10 @@ static int netvsc_suspend(struct hv_device *dev)
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
-
+ if (!ndev_ctx->saved_netvsc_dev_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = netvsc_detach(net, nvdev);
out:
rtnl_unlock();
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index a2fcdb1abdb9..a734e5576729 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1370,9 +1370,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
struct gsi_event *event_done;
struct gsi_event *event;
struct gsi_trans *trans;
+ u32 trans_count = 0;
u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
trans_info = &channel->trans_info;
@@ -1393,6 +1394,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
do {
trans->len = __le16_to_cpu(event->len);
byte_count += trans->len;
+ trans_count++;
/* Move on to the next event and transaction */
if (--event_avail)
@@ -1404,7 +1406,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* We record RX bytes when they are received */
channel->byte_count += byte_count;
- channel->trans_count++;
+ channel->trans_count += trans_count;
}
/* Initialize a ring, including allocating DMA memory for its entries */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 87e42db1b61e..06a791d45f94 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -570,19 +570,23 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
- val |= HDR_ENDIANNESS_FMASK; /* big endian */
-
- /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
- * driver assumes this field is meaningful in packets it receives,
- * and assumes the header's payload length includes that padding.
- * The RMNet driver does *not* pad packets it sends, however, so
- * the pad field (although 0) should be ignored.
- */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ if (endpoint->data->qmap) {
+ /* We have a header, so we must specify its endianness */
+ val |= HDR_ENDIANNESS_FMASK; /* big endian */
+
+ /* A QMAP header contains a 6 bit pad field at offset 0.
+ * The RMNet driver assumes this field is meaningful in
+ * packets it receives, and assumes the header's payload
+ * length includes that padding. The RMNet driver does
+ * *not* pad packets it sends, however, so the pad field
+ * (although 0) should be ignored.
+ */
+ if (!endpoint->toward_ipa) {
+ val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ }
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
@@ -722,13 +726,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
+ u32 buffer_size;
bool close_eof;
u32 limit;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
+ limit = ipa_aggr_size_kb(buffer_size);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
@@ -738,8 +744,6 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
close_eof = endpoint->data->rx.aggr_close_eof;
val |= aggr_sw_eof_active_encoded(version, close_eof);
-
- /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
} else {
val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
AGGR_EN_FMASK);
@@ -1045,7 +1049,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
err_trans_free:
gsi_trans_free(trans);
err_free_pages:
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
return -ENOMEM;
}
@@ -1385,7 +1389,7 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct page *page = trans->data;
if (page)
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
}
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6bbc81ad295f..d097097c93c3 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -137,6 +137,7 @@
#define DP83867_DOWNSHIFT_2_COUNT 2
#define DP83867_DOWNSHIFT_4_COUNT 4
#define DP83867_DOWNSHIFT_8_COUNT 8
+#define DP83867_SGMII_AUTONEG_EN BIT(7)
/* CFG3 bits */
#define DP83867_CFG3_INT_OE BIT(7)
@@ -836,6 +837,32 @@ static int dp83867_phy_reset(struct phy_device *phydev)
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
+static void dp83867_link_change_notify(struct phy_device *phydev)
+{
+ /* There is a limitation in DP83867 PHY device where SGMII AN is
+ * only triggered once after the device is booted up. Even after the
+ * PHY TPI is down and up again, SGMII AN is not triggered and
+ * hence no new in-band message from PHY to MAC side SGMII.
+ * This could cause an issue during power up, when PHY is up prior
+ * to MAC. At this condition, once MAC side SGMII is up, MAC side
+ * SGMII wouldn`t receive new in-band message from TI PHY with
+ * correct link status, speed and duplex info.
+ * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
+ * whenever there is a link change.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ int val = 0;
+
+ val = phy_clear_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ if (val < 0)
+ return;
+
+ phy_set_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ }
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -860,6 +887,8 @@ static struct phy_driver dp83867_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
+
+ .link_change_notify = dp83867_link_change_notify,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8dc6e6269c65..2c0216fe58de 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1011,7 +1011,6 @@ int __init mdio_bus_init(void)
return ret;
}
-EXPORT_SYMBOL_GPL(mdio_bus_init);
#if IS_ENABLED(CONFIG_PHYLIB)
void mdio_bus_exit(void)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 64d829ed9887..05a8985d7107 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -335,7 +335,7 @@ static int kszphy_config_reset(struct phy_device *phydev)
}
}
- if (priv->led_mode >= 0)
+ if (priv->type && priv->led_mode >= 0)
kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
return 0;
@@ -351,10 +351,10 @@ static int kszphy_config_init(struct phy_device *phydev)
type = priv->type;
- if (type->has_broadcast_disable)
+ if (type && type->has_broadcast_disable)
kszphy_broadcast_disable(phydev);
- if (type->has_nand_tree_disable)
+ if (type && type->has_nand_tree_disable)
kszphy_nand_tree_disable(phydev);
return kszphy_config_reset(phydev);
@@ -1328,7 +1328,7 @@ static int kszphy_probe(struct phy_device *phydev)
priv->type = type;
- if (type->led_mode_reg) {
+ if (type && type->led_mode_reg) {
ret = of_property_read_u32(np, "micrel,led-mode",
&priv->led_mode);
if (ret)
@@ -1349,7 +1349,8 @@ static int kszphy_probe(struct phy_device *phydev)
unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
- priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
+ if (type)
+ priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
"micrel,rmii-reference-clock-select-25-mhz");
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3619520340b7..e172743948ed 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
+ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev;
ctx->dev = dev;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bd8f8619ad6f..396505396a2e 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -799,11 +799,7 @@ static int ax88772_stop(struct usbnet *dev)
{
struct asix_common_private *priv = dev->driver_priv;
- /* On unplugged USB, we will get MDIO communication errors and the
- * PHY will be set in to PHY_HALTED state.
- */
- if (priv->phydev->state != PHY_HALTED)
- phy_stop(priv->phydev);
+ phy_stop(priv->phydev);
return 0;
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index eb0d325e92b7..4e39e4345084 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1217,8 +1217,7 @@ static int smsc95xx_start_phy(struct usbnet *dev)
static int smsc95xx_stop(struct usbnet *dev)
{
- if (dev->net->phydev)
- phy_stop(dev->net->phydev);
+ phy_stop(dev->net->phydev);
return 0;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a33d7fb82a00..af2bbaff2478 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1614,9 +1614,6 @@ void usbnet_disconnect (struct usb_interface *intf)
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description);
- if (dev->driver_info->unbind)
- dev->driver_info->unbind(dev, intf);
-
net = dev->net;
unregister_netdev (net);
@@ -1624,6 +1621,9 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_scuttle_anchored_urbs(&dev->deferred);
+ if (dev->driver_info->unbind)
+ dev->driver_info->unbind(dev, intf);
+
usb_kill_urb(dev->interrupt);
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 5b0215b7c176..bc3192cf48e3 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
+ rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
+ rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
+ /* ring has already been cleaned up */
+ if (!rq->rx_ring[0].base)
+ return;
+
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index c0cfd9b36c0b..720952b92e78 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
static_identity->static_public, private_key);
}
+static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
+{
+ struct blake2s_state state;
+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
+ int i;
+
+ if (keylen > BLAKE2S_BLOCK_SIZE) {
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, key, keylen);
+ blake2s_final(&state, x_key);
+ } else
+ memcpy(x_key, key, keylen);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, i_hash);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x5c ^ 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&state, i_hash);
+
+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
+}
+
/* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869
@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */
- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len)
goto out;
/* Expand first key: key = secret, data = 0x1 */
output[0] = 1;
- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len);
if (!second_dst || !second_len)
@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len);
if (!third_dst || !third_len)
@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len);
out:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 1f73fbfee0c0..8a80919b627f 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5339,13 +5339,29 @@ err:
static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 opt;
ath10k_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- if (!ar->hw_rfkill_on)
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on) {
+ /* If the current driver state is RESTARTING but not yet
+ * fully RESTARTED because of incoming suspend event,
+ * then ath10k_halt() is already called via
+ * ath10k_core_restart() and should not be called here.
+ */
+ if (ar->state != ATH10K_STATE_RESTARTING) {
+ ath10k_halt(ar);
+ } else {
+ /* Suspending here, because when in RESTARTING
+ * state, ath10k_core_stop() skips
+ * ath10k_wait_for_suspend().
+ */
+ opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
+ ath10k_wait_for_suspend(ar, opt);
+ }
+ }
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 07004564a3ec..f85fd341557e 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -4266,8 +4266,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath11k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+ mutex_lock(&ar->conf_mutex);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -4283,6 +4283,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
arvif->is_started);
ieee80211_free_txskb(ar->hw, skb);
}
+ mutex_unlock(&ar->conf_mutex);
}
}
@@ -5592,6 +5593,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
int ret;
mutex_lock(&ar->conf_mutex);
@@ -5603,9 +5605,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath11k_peer_find_by_addr(ab, ar->mac_addr))
- ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ if (peer)
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ }
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 1afe67759659..e5af9358e610 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -214,7 +214,10 @@ static int ath11k_spectral_scan_config(struct ath11k *ar,
return -ENODEV;
arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+
+ spin_lock_bh(&ar->spectral.lock);
ar->spectral.mode = mode;
+ spin_unlock_bh(&ar->spectral.lock);
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
@@ -829,9 +832,6 @@ static inline void ath11k_spectral_ring_free(struct ath11k *ar)
{
struct ath11k_spectral *sp = &ar->spectral;
- if (!sp->enabled)
- return;
-
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
}
@@ -883,15 +883,16 @@ void ath11k_spectral_deinit(struct ath11k_base *ab)
if (!sp->enabled)
continue;
- ath11k_spectral_debug_unregister(ar);
- ath11k_spectral_ring_free(ar);
+ mutex_lock(&ar->conf_mutex);
+ ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ mutex_unlock(&ar->conf_mutex);
spin_lock_bh(&sp->lock);
-
- sp->mode = ATH11K_SPECTRAL_DISABLED;
sp->enabled = false;
-
spin_unlock_bh(&sp->lock);
+
+ ath11k_spectral_debug_unregister(ar);
+ ath11k_spectral_ring_free(ar);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b0a4ca3559fd..abed1effd95c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5615,7 +5615,7 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
static u8 ar9003_get_eepmisc(struct ath_hw *ah)
{
- return ah->eeprom.map4k.baseEepHeader.eepMisc;
+ return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc;
}
const struct eeprom_ops eep_ar9300_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index a171dbb29fbb..ad949eb02f3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -720,7 +720,7 @@
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
-#define AR_CH0_TOP2_XPABIASLVL_S 12
+#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 6a850a0bfa8a..a23eaca0326d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1016,6 +1016,14 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
goto rx_next;
}
+ if (rxstatus->rs_keyix >= ATH_KEYMAX &&
+ rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) {
+ ath_dbg(common, ANY,
+ "Invalid keyix, dropping (keyix: %d)\n",
+ rxstatus->rs_keyix);
+ goto rx_next;
+ }
+
/* Get the RX status information */
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 88444fe6d1c6..f9e1306ac74f 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1558,6 +1558,9 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
goto out;
}
} while (ar->beacon_enabled && i--);
+
+ /* no entry found in list */
+ return NULL;
}
out:
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index cf3ccf4ddfe7..aa5c99465674 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -582,7 +582,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
u16 data[4];
s16 gain[2];
u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+ static const s16 lna_gain[4] = { -2, 10, 19, 25 };
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index 05404fbd1e70..c1395e622759 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -1123,7 +1123,7 @@ void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev)
struct b43legacy_phy *phy = &dev->phy;
u16 regstack[12] = { 0 };
u16 mls;
- u16 fval;
+ s16 fval;
int i;
int j;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index 36d1e6b2568d..4aec1fce1ae2 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -383,7 +383,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
/* Each fragment may need to have room for encryption
* pre/postfix */
- if (host_encrypt)
+ if (host_encrypt && crypt && crypt->ops)
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_mpdu_postfix_len;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index f2b090be3898..3d6008da4f9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -563,6 +563,9 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
struct iwl_power_vifs *power_iterator = _data;
bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+ if (!mvmvif->uploaded)
+ return;
+
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index d2ee6469e67b..3fa25cd64cda 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -303,5 +303,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
+ mutex_lock(&priv->wdev.mtx);
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
+ mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 72622220051b..6c8b44194579 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -162,8 +162,9 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (!sta)
return;
- if (!status->aggr && !(status->flag & RX_FLAG_8023)) {
- mt76_rx_aggr_check_ctl(skb, frames);
+ if (!status->aggr) {
+ if (!(status->flag & RX_FLAG_8023))
+ mt76_rx_aggr_check_ctl(skb, frames);
return;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 7691292526e0..a8a0e6af51f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -799,6 +799,7 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u8 fc_type, fc_stype;
+ u16 ethertype;
bool wmm = false;
u32 val;
@@ -812,7 +813,8 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
FIELD_PREP(MT_TXD1_TID, tid);
- if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
val |= MT_TXD1_ETH_802_3;
txwi[1] |= cpu_to_le32(val);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index 7d7d43a5422f..93d0cc1827d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -118,109 +118,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
}
-static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
-{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
- { 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
- int i;
-
- if (addr < 0x100000)
- return addr;
-
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
- u32 ofs;
-
- if (addr < fixed_map[i].phys)
- continue;
-
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
- continue;
-
- return fixed_map[i].mapped + ofs;
- }
-
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
- return mt7921_reg_map_l1(dev, addr);
-
- dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
- addr);
-
- return 0;
-}
-
-static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rr(mdev, addr);
-}
-
-static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- dev->bus_ops->wr(mdev, addr, val);
-}
-
-static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
if (force) {
@@ -380,20 +277,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev)
{
- struct mt76_bus_ops *bus_ops;
int ret;
- dev->bus_ops = dev->mt76.bus;
- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
- GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
-
- bus_ops->rr = mt7921_rr;
- bus_ops->wr = mt7921_wr;
- bus_ops->rmw = mt7921_rmw;
- dev->mt76.bus = bus_ops;
-
mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index c093920a597d..bef8d4a76ed9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -563,7 +563,7 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_VHT;
- if (i > 9)
+ if (i > 11)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
@@ -681,6 +681,7 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
{
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u8 fc_type, fc_stype;
+ u16 ethertype;
bool wmm = false;
u32 val;
@@ -694,7 +695,8 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
FIELD_PREP(MT_TXD1_TID, tid);
- if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
val |= MT_TXD1_ETH_802_3;
txwi[1] |= cpu_to_le32(val);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 506a1909ce6d..4119f8efd896 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1306,8 +1306,6 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@@ -1320,16 +1318,8 @@ int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO;
- goto out;
}
- mt7921_wpdma_reinit_cond(dev);
- clear_bit(MT76_STATE_PM, &mphy->state);
-
- pm->stats.last_wake_event = jiffies;
- pm->stats.doze_time += pm->stats.last_wake_event -
- pm->stats.last_doze_event;
-out:
return err;
}
@@ -1345,6 +1335,16 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
goto out;
err = __mt7921_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ goto out;
+
+ mt7921_wpdma_reinit_cond(dev);
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
+
out:
mutex_unlock(&pm->mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index c3905bcab360..3d35838ef306 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -88,6 +88,110 @@ static void mt7921_irq_tasklet(unsigned long data)
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
}
+static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7921_reg_map_l1(dev, addr);
+
+ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
+ addr);
+
+ return 0;
+}
+
+static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+
static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -110,6 +214,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt7921_update_channel,
};
+ struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
@@ -145,6 +250,24 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops) {
+ ret = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ bus_ops->rr = mt7921_rr;
+ bus_ops->wr = mt7921_wr;
+ bus_ops->rmw = mt7921_rmw;
+ dev->mt76.bus = bus_ops;
+
+ ret = __mt7921_mcu_drv_pmctrl(dev);
+ if (ret)
+ goto err_free_dev;
+
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index 6bd63934c2d8..b5a1b65c087c 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -233,7 +233,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
- if (cfg80211_register_netdevice(wl->monitor_dev)) {
+ if (register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
free_netdev(wl->monitor_dev);
return NULL;
@@ -251,7 +251,7 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked)
return;
if (rtnl_locked)
- cfg80211_unregister_netdevice(wl->monitor_dev);
+ unregister_netdevice(wl->monitor_dev);
else
unregister_netdev(wl->monitor_dev);
wl->monitor_dev = NULL;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 2477e18c7cae..025619cd14e8 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
+ unsigned int prio = 0;
unsigned long flags;
- unsigned int idx, prio, hw_prio;
+ unsigned int idx, hw_prio;
+
dma_addr_t mapping;
u32 tx_flags;
u8 rc_flags;
@@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
/* do arithmetic and then convert to le16 */
u16 frame_duration = 0;
- prio = skb_get_queue_mapping(skb);
+ /* rtl8180/rtl8185 only has one useable tx queue */
+ if (dev->queues > IEEE80211_AC_BK)
+ prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 86a236873254..a8eebafb9a7e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1014,7 +1014,7 @@ int rtl_usb_probe(struct usb_interface *intf,
hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
sizeof(struct rtl_usb_priv), &rtl_ops);
if (!hw) {
- WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
+ pr_warn("rtl_usb: ieee80211 alloc failed\n");
return -ENOMEM;
}
rtlpriv = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 785b8181513f..f405f42d1c1b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -506,6 +506,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s8 rx_power;
u8 lna_idx = 0;
u8 vga_idx = 0;
@@ -517,6 +518,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = rx_power;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = rx_power;
}
@@ -524,6 +526,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 rxsc, bw;
s8 min_rx_power = -120;
@@ -543,6 +546,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
pkt_stat->bw = bw;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index a99aedff795d..ea7309453096 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
int err;
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ usb_anchor_urb(urb, &drv_data->tx_anchor);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err) {
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
+ }
drv_data->tx_in_flight++;
+ usb_free_urb(urb);
+ }
+
+ /* Cleanup the rest deferred urbs. */
+ while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ kfree(urb->setup_packet);
+ usb_free_urb(urb);
}
- usb_scuttle_anchored_urbs(&drv_data->deferred);
}
static int nfcmrvl_resume(struct usb_interface *intf)
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index d32aec0c334f..6dc0af63440f 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2789,13 +2789,14 @@ void pn53x_common_clean(struct pn533 *priv)
{
struct pn533_cmd *cmd, *n;
+ /* delete the timer before cleanup the worker */
+ del_timer_sync(&priv->listen_timer);
+
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
skb_queue_purge(&priv->resp_q);
- del_timer(&priv->listen_timer);
-
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
list_del(&cmd->queue);
kfree(cmd);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 0841e0e370a0..d41636504246 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -241,7 +241,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
}
EXPORT_SYMBOL(st21nfca_hci_se_io);
-static void st21nfca_se_wt_timeout(struct timer_list *t)
+static void st21nfca_se_wt_work(struct work_struct *work)
{
/*
* No answer from the secure element
@@ -254,8 +254,9 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfca_hci_info *info = from_timer(info, t,
- se_info.bwi_timer);
+ struct st21nfca_hci_info *info = container_of(work,
+ struct st21nfca_hci_info,
+ se_info.timeout_work);
pr_debug("\n");
@@ -273,6 +274,13 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
+static void st21nfca_se_wt_timeout(struct timer_list *t)
+{
+ struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer);
+
+ schedule_work(&info->se_info.timeout_work);
+}
+
static void st21nfca_se_activation_timeout(struct timer_list *t)
{
struct st21nfca_hci_info *info = from_timer(info, t,
@@ -296,6 +304,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
int r = 0;
struct device *dev = &hdev->ndev->dev;
struct nfc_evt_transaction *transaction;
+ u32 aid_len;
+ u8 params_len;
pr_debug("connectivity gate event: %x\n", event);
@@ -304,43 +314,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
r = nfc_se_connectivity(hdev->ndev, host);
break;
case ST21NFCA_EVT_TRANSACTION:
- /*
- * According to specification etsi 102 622
+ /* According to specification etsi 102 622
* 11.2.2.4 EVT_TRANSACTION Table 52
* Description Tag Length
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
+ *
+ * The key differences are aid storage length is variably sized
+ * in the packet, but fixed in nfc_evt_transaction, and that the aid_len
+ * is u8 in the packet, but u32 in the structure, and the tags in
+ * the packet are not included in nfc_evt_transaction.
+ *
+ * size in bytes: 1 1 5-16 1 1 0-255
+ * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
+ * member name: aid_tag(M) aid_len aid params_tag(M) params_len params
+ * example: 0x81 5-16 X 0x82 0-255 X
*/
- if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
- skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+ if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
- if (!transaction)
- return -ENOMEM;
+ aid_len = skb->data[1];
- transaction->aid_len = skb->data[1];
-
- /* Checking if the length of the AID is valid */
- if (transaction->aid_len > sizeof(transaction->aid))
- return -EINVAL;
+ if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
+ return -EPROTO;
- memcpy(transaction->aid, &skb->data[2],
- transaction->aid_len);
+ params_len = skb->data[aid_len + 3];
- /* Check next byte is PARAMETERS tag (82) */
- if (skb->data[transaction->aid_len + 2] !=
- NFC_EVT_TRANSACTION_PARAMS_TAG)
+ /* Verify PARAMETERS tag is (82), and final check that there is enough
+ * space in the packet to read everything.
+ */
+ if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
+ (skb->len < aid_len + 4 + params_len))
return -EPROTO;
- transaction->params_len = skb->data[transaction->aid_len + 3];
+ transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
- /* Total size is allocated (skb->len - 2) minus fixed array members */
- if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
- return -EINVAL;
+ transaction->aid_len = aid_len;
+ transaction->params_len = params_len;
- memcpy(transaction->params, skb->data +
- transaction->aid_len + 4, transaction->params_len);
+ memcpy(transaction->aid, &skb->data[2], aid_len);
+ memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
r = nfc_se_transaction(hdev->ndev, host, transaction);
break;
@@ -364,6 +379,7 @@ int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
switch (event) {
case ST21NFCA_EVT_TRANSMIT_DATA:
del_timer_sync(&info->se_info.bwi_timer);
+ cancel_work_sync(&info->se_info.timeout_work);
info->se_info.bwi_active = false;
r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE,
ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
@@ -393,6 +409,7 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
init_completion(&info->se_info.req_completion);
+ INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work);
/* initialize timers */
timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
@@ -420,6 +437,7 @@ void st21nfca_se_deinit(struct nfc_hci_dev *hdev)
if (info->se_info.se_active)
del_timer_sync(&info->se_info.se_active_timer);
+ cancel_work_sync(&info->se_info.timeout_work);
info->se_info.bwi_active = false;
info->se_info.se_active = false;
}
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index cb6ad916be91..ae6771cc9894 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -141,6 +141,7 @@ struct st21nfca_se_info {
se_io_cb_t cb;
void *cb_context;
+ struct work_struct timeout_work;
};
struct st21nfca_hci_info {
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7de592d7eff4..47625fe4276e 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -399,9 +399,7 @@ static ssize_t capability_show(struct device *dev,
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
switch (cap) {
case NVDIMM_FWA_CAP_QUIESCE:
@@ -426,10 +424,8 @@ static ssize_t activate_show(struct device *dev,
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
state = nd_desc->fw_ops->activate_state(nd_desc);
- nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return -EOPNOTSUPP;
@@ -474,7 +470,6 @@ static ssize_t activate_store(struct device *dev,
else
return -EINVAL;
- nvdimm_bus_lock(dev);
state = nd_desc->fw_ops->activate_state(nd_desc);
switch (state) {
@@ -492,7 +487,6 @@ static ssize_t activate_store(struct device *dev,
default:
rc = -ENXIO;
}
- nvdimm_bus_unlock(dev);
if (rc == 0)
rc = len;
@@ -515,10 +509,7 @@ static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribut
if (!nd_desc->fw_ops)
return 0;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
-
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4b80150e4afa..b5aa55c61461 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -379,11 +379,6 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (dev->driver == NULL) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
- return -EINVAL;
- }
-
rc = check_security_state(nvdimm);
if (rc)
return rc;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f2bb57615762..9bc9f6d225bd 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1777,7 +1777,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 7);
+ blk_queue_dma_alignment(q, 3);
blk_queue_write_cache(q, vwc, vwc);
}
@@ -3032,10 +3032,6 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- ret = nvme_init_non_mdts_limits(ctrl);
- if (ret < 0)
- return ret;
-
ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
@@ -3186,8 +3182,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
* we have no UUID set
*/
if (uuid_is_null(&ids->uuid)) {
- printk_ratelimited(KERN_WARNING
- "No UUID available providing old NGUID\n");
+ dev_warn_ratelimited(dev,
+ "No UUID available providing old NGUID\n");
return sysfs_emit(buf, "%pU\n", ids->nguid);
}
return sysfs_emit(buf, "%pU\n", &ids->uuid);
@@ -4096,11 +4092,26 @@ static void nvme_scan_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, scan_work);
+ int ret;
/* No tagset on a live ctrl means IO queues could not created */
if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
return;
+ /*
+ * Identify controller limits can change at controller reset due to
+ * new firmware download, even though it is not common we cannot ignore
+ * such scenario. Controller's non-mdts limits are reported in the unit
+ * of logical blocks that is dependent on the format of attached
+ * namespace. Hence re-read the limits at the time of ns allocation.
+ */
+ ret = nvme_init_non_mdts_limits(ctrl);
+ if (ret < 0) {
+ dev_warn(ctrl->device,
+ "reading non-mdts-limits failed: %d\n", ret);
+ return;
+ }
+
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
@@ -4358,6 +4369,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
+ nvme_mpath_update(ctrl);
}
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index e9301b51db76..064acad505d3 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -574,8 +574,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
-
- if (nvme_state_is_live(ns->ana_state))
+ /*
+ * nvme_mpath_set_live() will trigger I/O to the multipath path device
+ * and in turn to this path device. However we cannot accept this I/O
+ * if the controller is not live. This may deadlock if called from
+ * nvme_mpath_init_identify() and the ctrl will never complete
+ * initialization, preventing I/O from completing. For this case we
+ * will reprocess the ANA log page in nvme_mpath_update() once the
+ * controller is ready.
+ */
+ if (nvme_state_is_live(ns->ana_state) &&
+ ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -662,6 +671,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl);
}
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+ u32 nr_change_groups = 0;
+
+ if (!ctrl->ana_log_buf)
+ return;
+
+ mutex_lock(&ctrl->ana_lock);
+ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+ mutex_unlock(&ctrl->ana_lock);
+}
+
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f1e5c7564cae..72bcd7e5716e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -776,6 +776,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -850,6 +851,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d7695bdbde8d..3ddd24a42043 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1680,6 +1680,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
if (IS_ERR(dev->ctrl.admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
+ dev->ctrl.admin_q = NULL;
return -ENOMEM;
}
if (!blk_get_queue(dev->ctrl.admin_q)) {
@@ -3379,7 +3380,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
-
+ { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
+ NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index aa6d84d8848e..52bb262d267a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -978,7 +978,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 496d775c6770..cea30e4f5053 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1554,7 +1554,7 @@ static void nvmet_port_release(struct config_item *item)
struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
list_del(&port->global_entry);
kfree(port->ana_state);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b8425fa34300..a8dafe8670f2 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
/*
* This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1477,7 +1480,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- schedule_work(&ctrl->fatal_err_work);
+ queue_work(nvmet_wq, &ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
}
@@ -1617,9 +1620,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue;
}
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ if (!nvmet_wq) {
+ error = -ENOMEM;
+ goto out_free_buffered_work_queue;
+ }
+
error = nvmet_init_discovery();
if (error)
- goto out_free_work_queue;
+ goto out_free_nvmet_work_queue;
error = nvmet_init_configfs();
if (error)
@@ -1628,7 +1637,9 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+ destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
@@ -1640,6 +1651,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 22b5108168a6..c43bc5e1c7a2 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle;
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return 0;
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 54606f1872b4..5c16372f3b53 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
- schedule_work(&rport->ls_work);
+ queue_work(nvmet_wq, &rport->ls_work);
return ret;
}
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
- schedule_work(&rport->ls_work);
+ queue_work(nvmet_wq, &rport->ls_work);
}
return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
- schedule_work(&tport->ls_work);
+ queue_work(nvmet_wq, &tport->ls_work);
return ret;
}
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
- schedule_work(&tport->ls_work);
+ queue_work(nvmet_wq, &tport->ls_work);
}
return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
- schedule_work(&tgt_rscn->work);
+ queue_work(nvmet_wq, &tgt_rscn->work);
}
static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref);
- schedule_work(&tfcp_req->fcp_rcv_work);
+ queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0;
}
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->tio_done_work);
+ queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}
static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio)
/* leave the reference while the work item is scheduled */
- WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else {
/*
* as the io has already had the done callback made,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index df7e033dd273..228871d48106 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -292,7 +292,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -352,7 +352,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -382,7 +382,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0285ccc7541f..2553f487c9f2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK;
}
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return;
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
}
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7143c7fa7464..dbeb0b8c1194 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -365,6 +365,7 @@ struct nvmet_req {
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index f0efb3537989..6220e1dd961a 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -281,7 +281,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
- schedule_work(&req->p.work);
+ queue_work(nvmet_wq, &req->p.work);
} else {
rq->end_io_data = req;
blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f1eedbf493d5..18e082091c82 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1583,7 +1583,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1668,7 +1668,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
}
@@ -1698,7 +1698,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
/**
@@ -1772,7 +1772,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
if (!queue) {
struct nvmet_rdma_port *port = cm_id->context;
- schedule_delayed_work(&port->repair_work, 0);
+ queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break;
}
fallthrough;
@@ -1902,7 +1902,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port);
if (ret)
- schedule_delayed_work(&port->repair_work, 5 * HZ);
+ queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
}
static int nvmet_rdma_add_port(struct nvmet_port *nport)
@@ -2046,7 +2046,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
}
mutex_unlock(&nvmet_rdma_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
static struct ib_client nvmet_rdma_ib_client = {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 2b8bab28417b..f592e5f7f5f3 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1251,7 +1251,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING;
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
spin_unlock(&queue->state_lock);
}
@@ -1662,7 +1662,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out;
if (sk->sk_state == TCP_LISTEN)
- schedule_work(&port->accept_work);
+ queue_work(nvmet_wq, &port->accept_work);
out:
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1793,7 +1793,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
queue->nr_cmds = sq->size * 2;
@@ -1854,12 +1854,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
}
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index 761fd870d1db..72c790a3c910 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -386,6 +386,15 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
crashk_res.end - crashk_res.start + 1);
if (ret)
goto out;
+
+ if (crashk_low_res.end) {
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
+ "linux,usable-memory-range",
+ crashk_low_res.start,
+ crashk_low_res.end - crashk_low_res.start + 1);
+ if (ret)
+ goto out;
+ }
}
/* add bootargs */
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index d80160cf34bb..d1187123c4fc 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -170,9 +170,7 @@ static int overlay_notify(struct overlay_changeset *ovcs,
ret = blocking_notifier_call_chain(&overlay_notify_chain,
action, &nd);
- if (ret == NOTIFY_OK || ret == NOTIFY_STOP)
- return 0;
- if (ret) {
+ if (notifier_to_errno(ret)) {
ret = notifier_to_errno(ret);
pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
of_overlay_action_name[action], ret, nd.target);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index c32ae7497392..3028353afece 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -437,11 +437,11 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
/* Checking only first OPP is sufficient */
np = of_get_next_available_child(opp_np, NULL);
+ of_node_put(opp_np);
if (!np) {
dev_err(dev, "OPP table empty\n");
return -EINVAL;
}
- of_node_put(opp_np);
prop = of_find_property(np, "opp-peak-kBps", NULL);
of_node_put(np);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 88e05b9c2e5b..18e32b8ffd5e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -187,8 +187,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 6490323351a5..fba5b16f3272 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -388,7 +388,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
sizeof(pp->msi_msg),
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ ret = dma_mapping_error(pci->dev, pp->msi_data);
+ if (ret) {
dev_err(pci->dev, "Failed to map MSI data\n");
pp->msi_data = 0;
goto err_free_msi;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 8a7a300163e5..b139a2e4af12 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1203,12 +1203,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
goto err_disable_clocks;
}
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- goto err_disable_clocks;
- }
-
/* configure PCIe to RC mode */
writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
@@ -1521,22 +1515,21 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
ret = phy_init(pcie->phy);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
+ if (ret)
goto err_pm_runtime_put;
- }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
- pm_runtime_disable(&pdev->dev);
- goto err_pm_runtime_put;
+ goto err_phy_exit;
}
return 0;
+err_phy_exit:
+ phy_exit(pcie->phy);
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index ff45052cf48d..7cc2c54daad0 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -272,7 +272,6 @@ struct advk_pcie {
u32 actions;
} wins[OB_WIN_COUNT];
u8 wins_count;
- int irq;
struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
@@ -1572,26 +1571,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
}
}
-static void advk_pcie_irq_handler(struct irq_desc *desc)
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
{
- struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 val, mask, status;
+ struct advk_pcie *pcie = arg;
+ u32 status;
- chained_irq_enter(chip, desc);
+ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ if (!(status & PCIE_IRQ_CORE_INT))
+ return IRQ_NONE;
- val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
- mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
- status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
+ advk_pcie_handle_int(pcie);
- if (status & PCIE_IRQ_CORE_INT) {
- advk_pcie_handle_int(pcie);
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
- /* Clear interrupt */
- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
- }
-
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
}
static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -1673,7 +1667,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- int ret;
+ int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
if (!bridge)
@@ -1759,9 +1753,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq < 0)
- return pcie->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+ pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
"reset-gpios", 0,
@@ -1818,15 +1820,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
-
bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops;
bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge);
if (ret < 0) {
- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
@@ -1875,9 +1874,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
- /* Remove IRQ handler */
- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
-
/* Remove IRQ domains */
advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 2f3f974977a3..5273cb5ede0f 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1008,6 +1008,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
"mediatek,generic-pciecfg");
if (cfg_node) {
pcie->cfg = syscon_node_to_regmap(cfg_node);
+ of_node_put(cfg_node);
if (IS_ERR(pcie->cfg))
return PTR_ERR(pcie->cfg);
}
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 329f930d17aa..fa209ad067bf 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -416,6 +416,7 @@ static void mc_handle_msi(struct irq_desc *desc)
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
ret = generic_handle_domain_irq(msi->dev_domain, bit);
@@ -432,13 +433,8 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 bitpos = data->hwirq;
- unsigned long status;
writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- if (!status)
- writel_relaxed(BIT(PM_MSI_INT_MSI_SHIFT),
- bridge_base_addr + ISTATUS_LOCAL);
}
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 5fb9ce6e536e..d1a200b93b2b 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -264,8 +264,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
/*
* Region 0 is reserved for configuration space and shouldn't
* be used elsewhere per TRM, so leave it out.
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 260a06fb78a6..813e0d25e841 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -976,9 +976,11 @@ static bool acpi_pci_power_manageable(struct pci_dev *dev)
static bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
- const union acpi_object *obj;
- struct acpi_device *adev;
struct pci_dev *rpdev;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long state;
+ const union acpi_object *obj;
if (!dev->is_hotplug_bridge)
return false;
@@ -987,12 +989,6 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (acpi_pci_power_manageable(dev))
return true;
- /*
- * The ACPI firmware will provide the device-specific properties through
- * _DSD configuration object. Look for the 'HotPlugSupportInD3' property
- * for the root port and if it is set we know the hierarchy behind it
- * supports D3 just fine.
- */
rpdev = pcie_find_root_port(dev);
if (!rpdev)
return false;
@@ -1001,11 +997,34 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (!adev)
return false;
- if (acpi_dev_get_property(adev, "HotPlugSupportInD3",
- ACPI_TYPE_INTEGER, &obj) < 0)
+ /*
+ * If the Root Port cannot signal wakeup signals at all, i.e., it
+ * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
+ * events from low-power states including D3hot and D3cold.
+ */
+ if (!adev->wakeup.flags.valid)
return false;
- return obj->integer.value == 1;
+ /*
+ * If the Root Port cannot wake itself from D3hot or D3cold, we
+ * can't use D3.
+ */
+ status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+ if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
+ return false;
+
+ /*
+ * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
+ * the Port can signal hotplug events while in D3. We assume any
+ * bridges *below* that Root Port can also signal hotplug events
+ * while in D3.
+ */
+ if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ ACPI_TYPE_INTEGER, &obj) &&
+ obj->integer.value == 1)
+ return true;
+
+ return false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a101faf3e88a..2bfff2328cf8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2889,6 +2889,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
},
+ {
+ /*
+ * Downstream device is not accessible after putting a root port
+ * into D3cold and back into D0 on Elo i2.
+ */
+ .ident = "Elo i2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
+ },
+ },
#endif
{ }
};
@@ -5069,18 +5081,18 @@ static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
static void pci_dev_lock(struct pci_dev *dev)
{
- pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
+ pci_cfg_access_lock(dev);
}
/* Return 1 on successful lock, 0 on contention */
int pci_dev_trylock(struct pci_dev *dev)
{
- if (pci_cfg_access_trylock(dev)) {
- if (device_trylock(&dev->dev))
+ if (device_trylock(&dev->dev)) {
+ if (pci_cfg_access_trylock(dev))
return 1;
- pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
return 0;
@@ -5089,8 +5101,8 @@ EXPORT_SYMBOL_GPL(pci_dev_trylock);
void pci_dev_unlock(struct pci_dev *dev)
{
- device_unlock(&dev->dev);
pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
EXPORT_SYMBOL_GPL(pci_dev_unlock);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 9784fdcf3006..80fe3e83c9f5 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -101,6 +101,11 @@ struct aer_stats {
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
+#define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \
+ PCI_ERR_ROOT_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+
static int pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
@@ -1196,7 +1201,7 @@ static irqreturn_t aer_irq(int irq, void *context)
struct aer_err_source e_src = {};
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
- if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
+ if (!(e_src.status & AER_ERR_STATUS_MASK))
return IRQ_NONE;
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index d13b8d1a780a..4868ec03e32f 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -151,7 +151,7 @@ config TCIC
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
- depends on MIPS_ALCHEMY && PCMCIA
+ depends on MIPS_DB1XXX && PCMCIA
help
Enable this driver of you want PCMCIA support on your Alchemy
Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 06b04606dd7e..ed69d455ac0e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -4802,7 +4802,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
- goto err_lane_rst;
+ goto err_pcs_ready;
qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
cfg->pcs_misc_tbl_num);
@@ -5382,6 +5382,11 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
.owner = THIS_MODULE,
};
+static void qcom_qmp_reset_control_put(void *data)
+{
+ reset_control_put(data);
+}
+
static
int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
@@ -5454,7 +5459,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
* all phys that don't need this.
*/
snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
if (IS_ERR(qphy->pipe_clk)) {
if (cfg->type == PHY_TYPE_PCIE ||
cfg->type == PHY_TYPE_USB3) {
@@ -5476,6 +5481,10 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
dev_err(dev, "failed to get lane%d reset\n", id);
return PTR_ERR(qphy->lane_rst);
}
+ ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
+ qphy->lane_rst);
+ if (ret)
+ return ret;
}
if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index a3fa03bcd9a3..54064714d73f 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -1236,18 +1236,12 @@ FUNC_GROUP_DECL(SALT8, AA12);
FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196
-SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
- SIG_EXPR_LIST_PTR(AE12, GPIOY4));
+PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197
-SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
- SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
#define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@@ -1520,9 +1514,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
-GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
-FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
+FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
@@ -1918,7 +1911,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID),
- ASPEED_PINCTRL_GROUP(FWQSPID),
ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1),
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index cb339299adf9..a2938995c7c1 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -351,6 +351,22 @@ static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
return pinctrl_gpio_direction_output(chip->base + offset);
}
+static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
+ struct device_node *np)
+{
+ struct pinctrl_dev *pctldev = of_pinctrl_get(np);
+
+ of_node_put(np);
+
+ if (!pctldev)
+ return 0;
+
+ gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
+ gc->ngpio);
+
+ return 0;
+}
+
static const struct gpio_chip bcm2835_gpio_chip = {
.label = MODULE_NAME,
.owner = THIS_MODULE,
@@ -365,6 +381,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
.can_sleep = false,
+ .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
};
static const struct gpio_chip bcm2711_gpio_chip = {
@@ -381,6 +398,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
.can_sleep = false,
+ .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
};
static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 246b0e951e1c..8a1706c8bb6e 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -152,6 +152,7 @@ config PINCTRL_MT8195
bool "Mediatek MT8195 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT8365
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
index 79b1fee5a1eb..ddee0db72d26 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
@@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
- MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16),
+ MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5cb018f98800..85a0052bb0e6 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -781,7 +781,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
- if (irq < 0)
+ if (!irq)
continue;
girq->parents[i] = irq;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 543a4991cf70..350e721c4658 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2107,19 +2107,20 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
return false;
}
-static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
- unsigned int pin, u32 arg)
+static int rockchip_pinconf_defer_pin(struct rockchip_pin_bank *bank,
+ unsigned int pin, u32 param, u32 arg)
{
- struct rockchip_pin_output_deferred *cfg;
+ struct rockchip_pin_deferred *cfg;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cfg->pin = pin;
+ cfg->param = param;
cfg->arg = arg;
- list_add_tail(&cfg->head, &bank->deferred_output);
+ list_add_tail(&cfg->head, &bank->deferred_pins);
return 0;
}
@@ -2140,6 +2141,25 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
+ if (param == PIN_CONFIG_OUTPUT || param == PIN_CONFIG_INPUT_ENABLE) {
+ /*
+ * Check for gpio driver not being probed yet.
+ * The lock makes sure that either gpio-probe has completed
+ * or the gpio driver hasn't probed yet.
+ */
+ mutex_lock(&bank->deferred_lock);
+ if (!gpio || !gpio->direction_output) {
+ rc = rockchip_pinconf_defer_pin(bank, pin - bank->pin_base, param,
+ arg);
+ mutex_unlock(&bank->deferred_lock);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ mutex_unlock(&bank->deferred_lock);
+ }
+
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
rc = rockchip_set_pull(bank, pin - bank->pin_base,
@@ -2168,27 +2188,21 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc != RK_FUNC_GPIO)
return -EINVAL;
- /*
- * Check for gpio driver not being probed yet.
- * The lock makes sure that either gpio-probe has completed
- * or the gpio driver hasn't probed yet.
- */
- mutex_lock(&bank->deferred_lock);
- if (!gpio || !gpio->direction_output) {
- rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
- mutex_unlock(&bank->deferred_lock);
- if (rc)
- return rc;
-
- break;
- }
- mutex_unlock(&bank->deferred_lock);
-
rc = gpio->direction_output(gpio, pin - bank->pin_base,
arg);
if (rc)
return rc;
break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ rc = rockchip_set_mux(bank, pin - bank->pin_base,
+ RK_FUNC_GPIO);
+ if (rc != RK_FUNC_GPIO)
+ return -EINVAL;
+
+ rc = gpio->direction_input(gpio, pin - bank->pin_base);
+ if (rc)
+ return rc;
+ break;
case PIN_CONFIG_DRIVE_STRENGTH:
/* rk3288 is the first with per-pin drive-strength */
if (!info->ctrl->drv_calc_reg)
@@ -2504,7 +2518,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pdesc++;
}
- INIT_LIST_HEAD(&pin_bank->deferred_output);
+ INIT_LIST_HEAD(&pin_bank->deferred_pins);
mutex_init(&pin_bank->deferred_lock);
}
@@ -2778,7 +2792,7 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev)
{
struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
struct rockchip_pin_bank *bank;
- struct rockchip_pin_output_deferred *cfg;
+ struct rockchip_pin_deferred *cfg;
int i;
of_platform_depopulate(&pdev->dev);
@@ -2787,9 +2801,9 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev)
bank = &info->ctrl->pin_banks[i];
mutex_lock(&bank->deferred_lock);
- while (!list_empty(&bank->deferred_output)) {
- cfg = list_first_entry(&bank->deferred_output,
- struct rockchip_pin_output_deferred, head);
+ while (!list_empty(&bank->deferred_pins)) {
+ cfg = list_first_entry(&bank->deferred_pins,
+ struct rockchip_pin_deferred, head);
list_del(&cfg->head);
kfree(cfg);
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 91f10279d084..98a01a616da6 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -171,7 +171,7 @@ struct rockchip_pin_bank {
u32 toggle_edge_mode;
u32 recalced_mask;
u32 route_mask;
- struct list_head deferred_output;
+ struct list_head deferred_pins;
struct mutex deferred_lock;
};
@@ -247,9 +247,12 @@ struct rockchip_pin_config {
unsigned int nconfigs;
};
-struct rockchip_pin_output_deferred {
+enum pin_config_param;
+
+struct rockchip_pin_deferred {
struct list_head head;
unsigned int pin;
+ enum pin_config_param param;
u32 arg;
};
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index bc17f3131de5..75fc420b6bdf 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -71,12 +71,11 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
/* Fill them. */
for (i = 0; i < num_windows; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- windows->phys = res->start;
- windows->size = resource_size(res);
- windows->virt = devm_ioremap_resource(pfc->dev, res);
+ windows->virt = devm_platform_get_and_ioremap_resource(pdev, i, &res);
if (IS_ERR(windows->virt))
return -ENOMEM;
+ windows->phys = res->start;
+ windows->size = resource_size(res);
windows++;
}
for (i = 0; i < num_irqs; i++)
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index ad6532443a78..a480677dd03d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -629,7 +629,36 @@ enum {
};
static const u16 pinmux_data[] = {
+/* Using GP_2_[2-15] requires disabling I2C in MOD_SEL2 */
+#define GP_2_2_FN GP_2_2_FN, FN_SEL_I2C0_0
+#define GP_2_3_FN GP_2_3_FN, FN_SEL_I2C0_0
+#define GP_2_4_FN GP_2_4_FN, FN_SEL_I2C1_0
+#define GP_2_5_FN GP_2_5_FN, FN_SEL_I2C1_0
+#define GP_2_6_FN GP_2_6_FN, FN_SEL_I2C2_0
+#define GP_2_7_FN GP_2_7_FN, FN_SEL_I2C2_0
+#define GP_2_8_FN GP_2_8_FN, FN_SEL_I2C3_0
+#define GP_2_9_FN GP_2_9_FN, FN_SEL_I2C3_0
+#define GP_2_10_FN GP_2_10_FN, FN_SEL_I2C4_0
+#define GP_2_11_FN GP_2_11_FN, FN_SEL_I2C4_0
+#define GP_2_12_FN GP_2_12_FN, FN_SEL_I2C5_0
+#define GP_2_13_FN GP_2_13_FN, FN_SEL_I2C5_0
+#define GP_2_14_FN GP_2_14_FN, FN_SEL_I2C6_0
+#define GP_2_15_FN GP_2_15_FN, FN_SEL_I2C6_0
PINMUX_DATA_GP_ALL(),
+#undef GP_2_2_FN
+#undef GP_2_3_FN
+#undef GP_2_4_FN
+#undef GP_2_5_FN
+#undef GP_2_6_FN
+#undef GP_2_7_FN
+#undef GP_2_8_FN
+#undef GP_2_9_FN
+#undef GP_2_10_FN
+#undef GP_2_11_FN
+#undef GP_2_12_FN
+#undef GP_2_13_FN
+#undef GP_2_14_FN
+#undef GP_2_15_FN
PINMUX_SINGLE(MMC_D7),
PINMUX_SINGLE(MMC_D6),
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index ef5fb25b6016..849d091205d4 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -865,17 +865,15 @@ static int rzn1_pinctrl_probe(struct platform_device *pdev)
ipctl->mdio_func[0] = -1;
ipctl->mdio_func[1] = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipctl->lev1_protect_phys = (u32)res->start + 0x400;
- ipctl->lev1 = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->lev1 = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ipctl->lev1))
return PTR_ERR(ipctl->lev1);
+ ipctl->lev1_protect_phys = (u32)res->start + 0x400;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ipctl->lev2_protect_phys = (u32)res->start + 0x400;
- ipctl->lev2 = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->lev2 = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(ipctl->lev2))
return PTR_ERR(ipctl->lev2);
+ ipctl->lev2_protect_phys = (u32)res->start + 0x400;
ipctl->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ipctl->clk))
diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
index 2801ca706273..68a5b627fb9b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
@@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fc5aa1525d13..ff2a24b0c611 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -189,6 +189,8 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
ec_dev->max_request = sizeof(struct ec_params_hello);
ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
ec_dev->max_passthru = 0;
+ ec_dev->ec = NULL;
+ ec_dev->pd = NULL;
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din)
@@ -245,18 +247,16 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
if (IS_ERR(ec_dev->pd)) {
dev_err(ec_dev->dev,
"Failed to create CrOS PD platform device\n");
- platform_device_unregister(ec_dev->ec);
- return PTR_ERR(ec_dev->pd);
+ err = PTR_ERR(ec_dev->pd);
+ goto exit;
}
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
err = devm_of_platform_populate(dev);
if (err) {
- platform_device_unregister(ec_dev->pd);
- platform_device_unregister(ec_dev->ec);
dev_err(dev, "Failed to register sub-devices\n");
- return err;
+ goto exit;
}
}
@@ -278,7 +278,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
err = blocking_notifier_chain_register(&ec_dev->event_notifier,
&ec_dev->notifier_ready);
if (err)
- return err;
+ goto exit;
}
dev_info(dev, "Chrome EC device registered\n");
@@ -291,6 +291,10 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
cros_ec_irq_thread(0, ec_dev);
return 0;
+exit:
+ platform_device_unregister(ec_dev->ec);
+ platform_device_unregister(ec_dev->pd);
+ return err;
}
EXPORT_SYMBOL(cros_ec_register);
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index e0bce869c49a..fd33de546aee 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -301,7 +301,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
}
s_cmd->command += ec->cmd_offset;
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, s_cmd);
+ ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 272c89837d74..0dbceee87a4b 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -25,6 +25,9 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+/* waitqueue for log readers */
+static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
+
/**
* struct cros_ec_debugfs - EC debugging information.
*
@@ -33,7 +36,6 @@
* @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer
- * @log_wq: waitqueue for log readers
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
*/
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
struct circ_buf log_buffer;
struct cros_ec_command *read_msg;
struct mutex log_mutex;
- wait_queue_head_t log_wq;
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
buf_space--;
}
- wake_up(&debug_info->log_wq);
+ wake_up(&cros_ec_debugfs_log_wq);
}
mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
mutex_unlock(&debug_info->log_mutex);
- ret = wait_event_interruptible(debug_info->log_wq,
+ ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0)
return ret;
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0;
- poll_wait(file, &debug_info->log_wq, wait);
+ poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex);
- init_waitqueue_head(&debug_info->log_wq);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index a7404d69b2d3..ed2b4807328d 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -560,22 +560,28 @@ exit:
EXPORT_SYMBOL(cros_ec_query_all);
/**
- * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
- * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
- * cmd_xfer() callback directly. It returns success status only if both the command was transmitted
- * successfully and the EC replied with success status.
+ * Call this to send a command to the ChromeOS EC. This should be used instead
+ * of calling the EC's cmd_xfer() callback directly. This function does not
+ * convert EC command execution error codes to Linux error codes. Most
+ * in-kernel users will want to use cros_ec_cmd_xfer_status() instead since
+ * that function implements the conversion.
*
* Return:
- * >=0 - The number of bytes transferred
- * <0 - Linux error code
+ * >0 - EC command was executed successfully. The return value is the number
+ * of bytes returned by the EC (excluding the header).
+ * =0 - EC communication was successful. EC command execution results are
+ * reported in msg->result. The result will be EC_RES_SUCCESS if the
+ * command was executed successfully or report an EC command execution
+ * error.
+ * <0 - EC communication error. Return value is the Linux error code.
*/
-int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
- int ret, mapped;
+ int ret;
mutex_lock(&ec_dev->lock);
if (ec_dev->proto_version == EC_PROTO_VERSION_UNKNOWN) {
@@ -616,6 +622,32 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
ret = send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
+ * cmd_xfer() callback directly. It returns success status only if both the command was transmitted
+ * successfully and the EC replied with success status.
+ *
+ * Return:
+ * >=0 - The number of bytes transferred.
+ * <0 - Linux error code
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret, mapped;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0)
+ return ret;
+
mapped = cros_ec_map_error(msg->result);
if (mapped) {
dev_dbg(ec_dev->dev, "Command result (err: %d [%d])\n",
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 8ac149173c64..495da331ca2d 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES
if MIPS_PLATFORM_DEVICES
config CPU_HWMON
- tristate "Loongson-3 CPU HWMon Driver"
+ bool "Loongson-3 CPU HWMon Driver"
depends on MACH_LOONGSON64
select HWMON
default y
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 386389ffec41..d8c5f9195f85 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -55,55 +55,6 @@ out:
static int nr_packages;
static struct device *cpu_hwmon_dev;
-static SENSOR_DEVICE_ATTR(name, 0444, NULL, NULL, 0);
-
-static struct attribute *cpu_hwmon_attributes[] = {
- &sensor_dev_attr_name.dev_attr.attr,
- NULL
-};
-
-/* Hwmon device attribute group */
-static struct attribute_group cpu_hwmon_attribute_group = {
- .attrs = cpu_hwmon_attributes,
-};
-
-static ssize_t get_cpu_temp(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t cpu_temp_label(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
-
-static const struct attribute *hwmon_cputemp[4][3] = {
- {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- NULL
- }
-};
-
static ssize_t cpu_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -121,24 +72,47 @@ static ssize_t get_cpu_temp(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static int create_sysfs_cputemp_files(struct kobject *kobj)
-{
- int i, ret = 0;
-
- for (i = 0; i < nr_packages; i++)
- ret = sysfs_create_files(kobj, hwmon_cputemp[i]);
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
- return ret;
-}
+static struct attribute *cpu_hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ NULL
+};
-static void remove_sysfs_cputemp_files(struct kobject *kobj)
+static umode_t cpu_hwmon_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
- int i;
+ int id = i / 2;
- for (i = 0; i < nr_packages; i++)
- sysfs_remove_files(kobj, hwmon_cputemp[i]);
+ if (id < nr_packages)
+ return attr->mode;
+ return 0;
}
+static struct attribute_group cpu_hwmon_group = {
+ .attrs = cpu_hwmon_attributes,
+ .is_visible = cpu_hwmon_is_visible,
+};
+
+static const struct attribute_group *cpu_hwmon_groups[] = {
+ &cpu_hwmon_group,
+ NULL
+};
+
#define CPU_THERMAL_THRESHOLD 90000
static struct delayed_work thermal_work;
@@ -159,50 +133,31 @@ static void do_thermal_timer(struct work_struct *work)
static int __init loongson_hwmon_init(void)
{
- int ret;
-
pr_info("Loongson Hwmon Enter...\n");
if (cpu_has_csr())
csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
LOONGSON_CSRF_TEMP;
- cpu_hwmon_dev = hwmon_device_register_with_info(NULL, "cpu_hwmon", NULL, NULL, NULL);
- if (IS_ERR(cpu_hwmon_dev)) {
- ret = PTR_ERR(cpu_hwmon_dev);
- pr_err("hwmon_device_register fail!\n");
- goto fail_hwmon_device_register;
- }
-
nr_packages = loongson_sysconf.nr_cpus /
loongson_sysconf.cores_per_package;
- ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
- if (ret) {
- pr_err("fail to create cpu temperature interface!\n");
- goto fail_create_sysfs_cputemp_files;
+ cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon",
+ NULL, cpu_hwmon_groups);
+ if (IS_ERR(cpu_hwmon_dev)) {
+ pr_err("hwmon_device_register fail!\n");
+ return PTR_ERR(cpu_hwmon_dev);
}
INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
- return ret;
-
-fail_create_sysfs_cputemp_files:
- sysfs_remove_group(&cpu_hwmon_dev->kobj,
- &cpu_hwmon_attribute_group);
- hwmon_device_unregister(cpu_hwmon_dev);
-
-fail_hwmon_device_register:
- return ret;
+ return 0;
}
static void __exit loongson_hwmon_exit(void)
{
cancel_delayed_work_sync(&thermal_work);
- remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
- sysfs_remove_group(&cpu_hwmon_dev->kobj,
- &cpu_hwmon_attribute_group);
hwmon_device_unregister(cpu_hwmon_dev);
}
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 658bab4b7964..ebd15c1d13ec 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
}}
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
@@ -153,6 +154,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
{ }
};
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 41a2a026f156..d7d6782c40c2 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -129,6 +129,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ },
{ }
};
@@ -245,7 +251,7 @@ static bool intel_hid_evaluate_method(acpi_handle handle,
method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
- if (!(intel_hid_dsm_fn_mask & fn_index))
+ if (!(intel_hid_dsm_fn_mask & BIT(fn_index)))
goto skip_dsm_eval;
obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid,
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index fb9db7f43895..22378dad4d9f 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -832,17 +832,20 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->regmap_irqc = axp20x->regmap_irqc;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
- if (info->cable.edev == NULL) {
- dev_dbg(dev, "%s is not ready, probe deferred\n",
- AXP288_EXTCON_DEV_NAME);
- return -EPROBE_DEFER;
+ if (IS_ERR(info->cable.edev)) {
+ dev_err_probe(dev, PTR_ERR(info->cable.edev),
+ "extcon_get_extcon_dev(%s) failed\n",
+ AXP288_EXTCON_DEV_NAME);
+ return PTR_ERR(info->cable.edev);
}
if (acpi_dev_present(USB_HOST_EXTCON_HID, NULL, -1)) {
info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_NAME);
- if (info->otg.cable == NULL) {
- dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
- return -EPROBE_DEFER;
+ if (IS_ERR(info->otg.cable)) {
+ dev_err_probe(dev, PTR_ERR(info->otg.cable),
+ "extcon_get_extcon_dev(%s) failed\n",
+ USB_HOST_EXTCON_NAME);
+ return PTR_ERR(info->otg.cable);
}
dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
}
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index c1da217fdb0e..97e8663c08df 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -605,7 +605,6 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
- DMI_MATCH(DMI_BIOS_VERSION, "5.11"),
},
},
{}
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index d67edb760c94..92db79400a6a 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -985,13 +985,10 @@ static int charger_extcon_init(struct charger_manager *cm,
cable->nb.notifier_call = charger_extcon_notifier;
cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name);
- if (IS_ERR_OR_NULL(cable->extcon_dev)) {
+ if (IS_ERR(cable->extcon_dev)) {
pr_err("Cannot find extcon_dev for %s (cable: %s)\n",
cable->extcon_name, cable->name);
- if (cable->extcon_dev == NULL)
- return -EPROBE_DEFER;
- else
- return PTR_ERR(cable->extcon_dev);
+ return PTR_ERR(cable->extcon_dev);
}
for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) {
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 25207fe2aa68..bfa7a576523d 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -248,10 +248,10 @@ static int max8997_battery_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "couldn't get charger regulator\n");
}
charger->edev = extcon_get_extcon_dev("max8997-muic");
- if (IS_ERR_OR_NULL(charger->edev)) {
- if (!charger->edev)
- return -EPROBE_DEFER;
- dev_info(charger->dev, "couldn't get extcon device\n");
+ if (IS_ERR(charger->edev)) {
+ dev_err_probe(charger->dev, PTR_ERR(charger->edev),
+ "couldn't get extcon device: max8997-muic\n");
+ return PTR_ERR(charger->edev);
}
if (!IS_ERR(charger->reg) && !IS_ERR_OR_NULL(charger->edev)) {
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index ea17d446a627..2bd04ecb508c 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -125,6 +125,7 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (err)
return err;
+ duty_ns = min(duty_ns, period_ns);
val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
return lp3943_write_byte(lp3943, reg_duty, val);
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 579a15240e0a..c877de37734d 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -66,7 +66,7 @@ static int raspberrypi_pwm_get_property(struct rpi_firmware *firmware,
u32 reg, u32 *val)
{
struct raspberrypi_pwm_prop msg = {
- .reg = reg
+ .reg = cpu_to_le32(reg),
};
int ret;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bb5a85baa5f5..4d302bca4e0e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2132,10 +2132,13 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
rdev->exclusive = 1;
ret = _regulator_is_enabled(rdev);
- if (ret > 0)
+ if (ret > 0) {
rdev->use_count = 1;
- else
+ regulator->enable_count = 1;
+ } else {
rdev->use_count = 0;
+ regulator->enable_count = 0;
+ }
}
link = device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 0a4fd449c27d..3315994d7e31 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -936,6 +936,8 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
chip->variant_id = DA9121_TYPE_DA9220_DA9132;
regmap = &da9121_2ch_regmap_config;
break;
+ default:
+ return -EINVAL;
}
/* Set these up for of_regulator_match call which may want .of_map_modes */
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 2ce2f5ad9266..5286dd043a4b 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -532,6 +532,7 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
+ of_node_put(np);
return -EINVAL;
}
@@ -561,6 +562,7 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
}
of_node_put(parent);
+ of_node_put(np);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n",
ret);
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index b6287f7e78f4..eb974b9c0b19 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -926,32 +926,31 @@ static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8950_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8950_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" },
+ /* S5 is managed via SPMI. */
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_hfsmps, "vdd_s6" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8950_ult_nldo, "vdd_l1_l19" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8950_ult_nldo, "vdd_l2_l23" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8950_ult_nldo, "vdd_l3" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ /* L4 seems not to exist. */
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_nldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16"},
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l19", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l1_l19"},
- { "l20", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l20"},
- { "l21", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l21"},
- { "l22", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l23", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l2_l23"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l5_l6_l7_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ /* L18 seems not to exist. */
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8950_pldo, "vdd_l1_l19" },
+ /* L20 & L21 seem not to exist. */
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8950_pldo, "vdd_l2_l23" },
{}
};
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index 1f02f60ad136..41ae7ac27ff6 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -352,7 +352,7 @@ static int scmi_regulator_probe(struct scmi_device *sdev)
return ret;
}
}
-
+ of_node_put(np);
/*
* Register a regulator for each valid regulator-DT-entry that we
* can successfully reach via SCMI and has a valid associated voltage
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 073ae607eef5..3ad8e5929f9b 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -658,6 +658,9 @@ static int imx_rproc_prepare(struct rproc *rproc)
if (!strcmp(it.node->name, "vdev0buffer"))
continue;
+ if (!strcmp(it.node->name, "rsc-table"))
+ continue;
+
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
dev_err(priv->dev, "unable to acquire memory-region\n");
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index d223e438d17c..d9ed37de681a 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1404,9 +1404,9 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->name = node->name;
irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
+ if (!irq) {
dev_err(dev, "required smd interrupt missing\n");
- ret = irq;
+ ret = -EINVAL;
goto put_node;
}
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index e42234a3e2ab..fca034aafd15 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -842,7 +842,7 @@ static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev
err = rpmsg_ctrldev_register_device(rpdev_ctrl);
if (err) {
- kfree(vch);
+ /* vch will be free in virtio_rpmsg_release_device() */
return ERR_PTR(err);
}
@@ -853,7 +853,7 @@ static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl)
{
if (!rpdev_ctrl)
return;
- kfree(to_virtio_rpmsg_channel(rpdev_ctrl));
+ device_unregister(&rpdev_ctrl->dev);
}
static int rpmsg_probe(struct virtio_device *vdev)
@@ -964,7 +964,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
err = rpmsg_ns_register_device(rpdev_ns);
if (err)
- goto free_vch;
+ /* vch will be free in virtio_rpmsg_release_device() */
+ goto free_ctrldev;
}
/*
@@ -988,8 +989,6 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
-free_vch:
- kfree(vch);
free_ctrldev:
rpmsg_virtio_del_ctrl_dev(rpdev_ctrl);
free_coherent:
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index f77bc089eb6b..0aef7df2ea70 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -26,6 +26,15 @@ struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
+ struct timerqueue_head *head = &rtc->timerqueue;
+ struct timerqueue_node *node;
+
+ mutex_lock(&rtc->ops_lock);
+ while ((node = timerqueue_getnext(head)))
+ timerqueue_del(head, node);
+ mutex_unlock(&rtc->ops_lock);
+
+ cancel_work_sync(&rtc->irqwork);
ida_simple_remove(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock);
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index ad3add5db4c8..25c6e7d9570f 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -137,28 +137,34 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(rtc->extclk);
if (ret) {
dev_err(dev, "failed to enable EXTCLK\n");
- return ret;
+ goto err_disable_pclk;
}
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -ENODEV;
-
- rtc->rtc_irq = res->start;
+ rtc->rtc_irq = platform_get_irq(pdev, 0);
+ if (rtc->rtc_irq < 0) {
+ ret = rtc->rtc_irq;
+ goto err_disable_extclk;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ if (!res) {
+ ret = -ENODEV;
+ goto err_disable_extclk;
+ }
rtc->rtc_base = devm_ioremap(dev, res->start,
resource_size(res));
- if (!rtc->rtc_base)
- return -ENOMEM;
+ if (!rtc->rtc_base) {
+ ret = -ENOMEM;
+ goto err_disable_extclk;
+ }
rtc->rtc_dev = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
+ if (IS_ERR(rtc->rtc_dev)) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err_disable_extclk;
+ }
rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
@@ -174,9 +180,15 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- return ret;
+ goto err_disable_extclk;
return devm_rtc_register_device(rtc->rtc_dev);
+
+err_disable_extclk:
+ clk_disable_unprepare(rtc->extclk);
+err_disable_pclk:
+ clk_disable_unprepare(rtc->pclk);
+ return ret;
}
static int ftrtc010_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 8abac672f3cb..f3f5a87fe376 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -146,6 +146,17 @@ again:
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
+/* AMD systems don't allow access to AltCentury with DV1 */
+static bool apply_amd_register_a_behavior(void)
+{
+#ifdef CONFIG_X86
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ return true;
+#endif
+ return false;
+}
+
/* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time)
{
@@ -219,7 +230,10 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+ if (apply_amd_register_a_behavior())
+ CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
+ else
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 80dc479a6ff0..1d297af80f87 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -269,6 +269,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
rtc->addr_base = res->start;
rtc->data = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 56c58b055dff..43f801107095 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned int buf[5], ctrl2;
+ u8 buf[5];
+ unsigned int ctrl2;
int ret;
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index adec1b14a8de..c551ebf0ac00 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -138,7 +138,7 @@ struct sun6i_rtc_dev {
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
- unsigned long alarm;
+ time64_t alarm;
struct clk_hw hw;
struct clk_hw *int_osc;
@@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now;
- unsigned long time_now = 0;
- unsigned long time_set = 0;
- unsigned long time_gap = 0;
- int ret = 0;
+ time64_t time_now, time_set;
+ int ret;
ret = sun6i_rtc_gettime(dev, &tm_now);
if (ret < 0) {
@@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
return -EINVAL;
}
- time_gap = time_set - time_now;
-
- if (time_gap > U32_MAX) {
+ if ((time_set - time_now) > U32_MAX) {
dev_err(dev, "Date too far in the future\n");
return -EINVAL;
}
@@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
writel(0, chip->base + SUN6I_ALRM_COUNTER);
usleep_range(100, 300);
- writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+ writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER);
chip->alarm = time_set;
sun6i_rtc_setaie(wkalrm->enabled, chip);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 297fb399363c..620a917cd3a1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1255,7 +1255,7 @@ exit:
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta)
{
struct {
struct chsc_header request;
@@ -1266,7 +1266,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
unsigned int rsvd2[5];
struct chsc_header response;
unsigned int rsvd3[3];
- u64 clock_delta;
+ s64 clock_delta;
unsigned int rsvd4[2];
} *rr;
int rc;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1c79e6c27163..d5623253826f 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3590,10 +3590,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
#endif
if (dcb->target_lun != 0) {
/* Copy settings */
- struct DeviceCtlBlk *p;
- list_for_each_entry(p, &acb->dcb_list, list)
- if (p->target_id == dcb->target_id)
+ struct DeviceCtlBlk *p = NULL, *iter;
+
+ list_for_each_entry(iter, &acb->dcb_list, list)
+ if (iter->target_id == dcb->target_id) {
+ p = iter;
break;
+ }
+
+ if (!p) {
+ kfree(dcb);
+ return NULL;
+ }
+
dprintkdbg(DBG_1,
"device_alloc: <%02i-%i> copy from <%02i-%i>\n",
dcb->target_id, dcb->target_lun,
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 37d06f993b76..1d9be771f3ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA:
- return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_AGAIN;
+ return BLK_STS_OK;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 1756a0ac6f08..558f3f4e1859 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1969,7 +1969,7 @@ EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
*
* Returns: u64 fc world wide name
*/
-u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN],
unsigned int scheme, unsigned int port)
{
u64 wwn;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5d78f7e939a3..56b8a2d6ffe4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9791,7 +9791,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) {
- while (--i > 0)
+ while (--i >= 0)
dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq,
@@ -10064,7 +10064,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
- while (--i >= 0)
+ while (--i > 0)
free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 886006ad12a2..5f44a0763f37 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2955,18 +2955,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
- /* Presume the node was released. */
- return;
+ goto out_rsrc_free;
}
out:
- /* Driver is done with the IO. */
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
* on link up processing. For a LOGO / PLOGI initiated by the
@@ -2993,6 +2985,10 @@ out:
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout,
vport->num_disc_nodes);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
lpfc_disc_start(vport);
return;
}
@@ -3009,6 +3005,10 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
+out_rsrc_free:
+ /* Driver is done with the I/O. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -3777,9 +3777,6 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
{
u32 rsp_sig_cap = 0, drv_sig_cap = 0;
u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
- struct lpfc_cgn_info *cp;
- u32 crc;
- u16 sig_freq;
/* Get rsp signal and frequency capabilities. */
rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
@@ -3835,25 +3832,7 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
}
}
- if (!phba->cgn_i)
- return;
-
- /* Update signal frequency in congestion info buffer */
- cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
-
- /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
- * are received by the HBA
- */
- sig_freq = phba->cgn_sig_freq;
-
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
- cp->cgn_warn_freq = cpu_to_le16(sig_freq);
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- cp->cgn_alarm_freq = cpu_to_le16(sig_freq);
- cp->cgn_warn_freq = cpu_to_le16(sig_freq);
- }
- crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
- cp->cgn_info_crc = cpu_to_le32(crc);
+ /* We are NOT recording signal frequency in congestion info buffer */
return;
out_no_support:
@@ -9584,11 +9563,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
/* Take action here for an Alarm event */
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
- /* Track of alarm cnt for cgn_info */
- atomic_inc(&phba->cgn_fabric_alarm_cnt);
/* Track of alarm cnt for SYNC_WQE */
atomic_inc(&phba->cgn_sync_alarm_cnt);
}
+ /* Track alarm cnt for cgn_info regardless
+ * of whether CMF is configured for Signals
+ * or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_alarm_cnt);
goto cleanup;
}
break;
@@ -9596,11 +9578,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
/* Take action here for a Warning event */
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
- /* Track of warning cnt for cgn_info */
- atomic_inc(&phba->cgn_fabric_warn_cnt);
/* Track of warning cnt for SYNC_WQE */
atomic_inc(&phba->cgn_sync_warn_cnt);
}
+ /* Track warning cnt and freq for cgn_info
+ * regardless of whether CMF is configured for
+ * Signals or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_warn_cnt);
cleanup:
/* Save frequency in ms */
phba->cgn_fpin_frequency =
@@ -9609,14 +9594,10 @@ cleanup:
if (phba->cgn_i) {
cp = (struct lpfc_cgn_info *)
phba->cgn_i->virt;
- if (phba->cgn_reg_fpin &
- LPFC_CGN_FPIN_ALARM)
- cp->cgn_alarm_freq =
- cpu_to_le16(value);
- if (phba->cgn_reg_fpin &
- LPFC_CGN_FPIN_WARN)
- cp->cgn_warn_freq =
- cpu_to_le16(value);
+ cp->cgn_alarm_freq =
+ cpu_to_le16(value);
+ cp->cgn_warn_freq =
+ cpu_to_le16(value);
crc = lpfc_cgn_calc_crc32
(cp,
LPFC_CGN_INFO_SZ,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 7359505e6041..824fc8c08840 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4448,6 +4448,9 @@ struct wqe_common {
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
+#define wqe_ffrq_SHIFT 6
+#define wqe_ffrq_MASK 0x00000001
+#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 16246526e4c1..a2694fb32b5d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5795,21 +5795,8 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
/* Use the frequency found in the last rcv'ed FPIN */
value = phba->cgn_fpin_frequency;
- if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
- cp->cgn_warn_freq = cpu_to_le16(value);
- if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
- cp->cgn_alarm_freq = cpu_to_le16(value);
-
- /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
- * are received by the HBA
- */
- value = phba->cgn_sig_freq;
-
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
- phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
- cp->cgn_warn_freq = cpu_to_le16(value);
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
- cp->cgn_alarm_freq = cpu_to_le16(value);
+ cp->cgn_warn_freq = cpu_to_le16(value);
+ cp->cgn_alarm_freq = cpu_to_le16(value);
lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
LPFC_CGN_CRC32_SEED);
@@ -6493,9 +6480,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
/* Alarm overrides warning, so check that first */
if (cgn_signal->alarm_cnt) {
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- /* Keep track of alarm cnt for cgn_info */
- atomic_add(cgn_signal->alarm_cnt,
- &phba->cgn_fabric_alarm_cnt);
/* Keep track of alarm cnt for CMF_SYNC_WQE */
atomic_add(cgn_signal->alarm_cnt,
&phba->cgn_sync_alarm_cnt);
@@ -6504,8 +6488,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
/* signal action needs to be taken */
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- /* Keep track of warning cnt for cgn_info */
- atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
/* Keep track of warning cnt for CMF_SYNC_WQE */
atomic_add(cnt, &phba->cgn_sync_warn_cnt);
}
@@ -15564,34 +15546,7 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
unsigned int temp_idx;
int i;
int j = 0;
- unsigned long rem_nsec, iflags;
- bool log_verbose = false;
- struct lpfc_vport *port_iterator;
-
- /* Don't dump messages if we explicitly set log_verbose for the
- * physical port or any vport.
- */
- if (phba->cfg_log_verbose)
- return;
-
- spin_lock_irqsave(&phba->port_list_lock, iflags);
- list_for_each_entry(port_iterator, &phba->port_list, listentry) {
- if (port_iterator->load_flag & FC_UNLOADING)
- continue;
- if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- if (port_iterator->cfg_log_verbose)
- log_verbose = true;
-
- scsi_host_put(lpfc_shost_from_vport(port_iterator));
-
- if (log_verbose) {
- spin_unlock_irqrestore(&phba->port_list_lock,
- iflags);
- return;
- }
- }
- }
- spin_unlock_irqrestore(&phba->port_list_lock, iflags);
+ unsigned long rem_nsec;
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
return;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 7d480c798794..a5aafe230c74 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -73,7 +73,7 @@ do { \
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
- if ((mask) & LOG_TRACE_EVENT) \
+ if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \
lpfc_dmp_dbg((vport)->phba); \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \
@@ -89,11 +89,11 @@ do { \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
if (((mask) & log_verbose) || (level[1] <= '3')) { \
- if ((mask) & LOG_TRACE_EVENT) \
+ if ((mask) & LOG_TRACE_EVENT && !log_verbose) \
lpfc_dmp_dbg(phba); \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); \
- } else if (!(phba)->cfg_log_verbose)\
+ } else if (!log_verbose)\
lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
} \
} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index fdf5e777bf11..2bd35a7424c2 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -810,7 +810,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) {
- if (vport->port_state <= LPFC_FDISC)
+ if (vport->port_state <= LPFC_FDISC ||
+ vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 66cb66aea2cf..4fb3dc5092f5 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1182,7 +1182,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ struct nvme_common_command *sqe;
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
@@ -1239,8 +1240,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
cstat->control_requests++;
}
- if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+ sqe = &((struct nvme_fc_cmd_iu *)
+ nCmd->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_async_event)
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+ }
+
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3d9175f1b678..c6944b282e21 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3917,7 +3917,7 @@ lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
else
time = div_u64(time + 500, 1000); /* round it */
- cgs = this_cpu_ptr(phba->cmf_stat);
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
atomic64_add(size, &cgs->rcv_bytes);
atomic64_add(time, &cgs->rx_latency);
atomic_inc(&cgs->rx_io_cnt);
@@ -3960,7 +3960,7 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
atomic_set(&phba->rx_max_read_cnt, size);
}
- cgs = this_cpu_ptr(phba->cmf_stat);
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
atomic64_add(size, &cgs->total_bytes);
return 0;
}
@@ -5885,25 +5885,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (!lpfc_cmd)
return ret;
- spin_lock_irqsave(&phba->hbalock, flags);
+ /* Guard against IO completion being called at same time */
+ spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
+
+ spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
ret = FAILED;
- goto out_unlock;
+ goto out_unlock_hba;
}
- /* Guard against IO completion being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
if (!lpfc_cmd->pCmd) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %llu\n",
SUCCESS, cmnd->device->id, cmnd->device->lun);
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
iocb = &lpfc_cmd->cur_iocbq;
@@ -5911,7 +5911,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
spin_lock(&pring_s4->ring_lock);
}
@@ -5944,8 +5944,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
"3389 SCSI Layer I/O Abort Request is pending\n");
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
goto wait_for_cmpl;
}
@@ -5966,15 +5966,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
lpfc_cmd->waitq = NULL;
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
ret = FAILED;
- goto out;
+ goto out_unlock_hba;
}
/* no longer need the lock after this point */
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
@@ -6009,10 +6007,9 @@ wait_for_cmpl:
out_unlock_ring:
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
-out_unlock_buf:
- spin_unlock(&lpfc_cmd->buf_lock);
-out_unlock:
- spin_unlock_irqrestore(&phba->hbalock, flags);
+out_unlock_hba:
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 68d8e55c1205..d8d26cde70b6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -18455,7 +18455,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
case FC_RCTL_ELS_REP: /* extended link services reply */
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
- case FC_RCTL_BA_NOP: /* basic link service NOP */
case FC_RCTL_BA_ABTS: /* basic link service abort */
case FC_RCTL_BA_RMC: /* remove connection */
case FC_RCTL_BA_ACC: /* basic accept */
@@ -18476,6 +18475,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
return lpfc_fc_frame_check(phba, fc_hdr);
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
default:
goto drop;
}
@@ -19288,12 +19288,14 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
if (!lpfc_complete_unsol_iocb(phba,
phba->sli4_hba.els_wq->pring,
iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
+ fc_hdr->fh_type)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
+ }
/* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 56910e94dbf2..7dd6dd74d2bc 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4628,7 +4628,7 @@ static int __init megaraid_init(void)
* major number allocation.
*/
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
- if (!major) {
+ if (major < 0) {
printk(KERN_WARNING
"megaraid: failed to register char device\n");
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index c38e68943205..fafa9fbf3b10 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5381,6 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+ u16 depth;
int sz;
int rc = 0;
@@ -5392,7 +5393,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
goto out;
/* sas iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
if (!sas_iounit_pg1) {
pr_err("%s: failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -5405,16 +5406,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
ioc->name, __FILE__, __LINE__, __func__);
goto out;
}
- ioc->max_wideport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
- MPT3SAS_SAS_QUEUE_DEPTH;
- ioc->max_narrowport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
- MPT3SAS_SAS_QUEUE_DEPTH;
- ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
- sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
+ ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
+ ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = sas_iounit_pg1->SATAMaxQDepth;
+ ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
+
/* pcie iounit page 1 */
rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
&pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index a4a88323e020..386256369dfc 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1239,7 +1239,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
myrb_unmap(cb);
if (cb->mmio_base) {
- cb->disable_intr(cb->io_base);
+ if (cb->disable_intr)
+ cb->disable_intr(cb->io_base);
iounmap(cb->mmio_base);
}
if (cb->irq)
@@ -3409,9 +3410,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
mutex_init(&cb->dcmd_mutex);
mutex_init(&cb->dma_mutex);
cb->pdev = pdev;
+ cb->host = shost;
- if (pci_enable_device(pdev))
- goto failure;
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ scsi_host_put(shost);
+ return NULL;
+ }
if (privdata->hw_init == DAC960_PD_hw_init ||
privdata->hw_init == DAC960_P_hw_init) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index bffd9a9349e7..9660c4f4de40 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4526,7 +4526,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
return 0;
out_unwind:
- while (--i > 0)
+ while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev);
return rc;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f5d32d830a9b..ae5eaa4a9283 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3837,6 +3837,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a713babaee0f..de6640ad1943 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3480,7 +3480,6 @@ static int sd_probe(struct device *dev)
out_put:
put_disk(gd);
out_free:
- sd_zbc_release_disk(sdkp);
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index eafe0db98d54..122d650d0810 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -29,11 +29,9 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
return PTR_ERR(regbase);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto disable_pm;
- }
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9d9770f1db4f..f810b99ef5c5 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -637,12 +637,7 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
return err;
}
- err = ufs_qcom_ice_resume(host);
- if (err)
- return err;
-
- hba->is_sys_suspended = false;
- return 0;
+ return ufs_qcom_ice_resume(host);
}
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
@@ -683,8 +678,11 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
+ /*
+ * Make sure the write to ref_clk reaches the destination and
+ * not stored in a Write Buffer (WB).
+ */
+ readl(host->dev_ref_clk_ctrl_mmio);
/*
* If we call hibern8 exit after this, we need to make sure that
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b55e0a07363f..5c9a31f18b7f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -112,8 +112,13 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
if (!regs)
return -ENOMEM;
- for (pos = 0; pos < len; pos += 4)
+ for (pos = 0; pos < len; pos += 4) {
+ if (offset == 0 &&
+ pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
+ pos <= REG_UIC_ERROR_CODE_DME)
+ continue;
regs[pos / 4] = ufshcd_readl(hba, offset + pos);
+ }
ufshcd_hex_dump(prefix, regs, len);
kfree(regs);
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index f7eaf64293a4..14300896c57f 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -1257,6 +1257,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
int data_seg_len;
+ data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+ & MASK_RSP_UPIU_DATA_SEG_LEN;
+
+ /* If data segment length is zero, rsp_field is not valid */
+ if (!data_seg_len)
+ return;
+
if (unlikely(lrbp->lun != rsp_field->lun)) {
struct scsi_device *sdev;
bool found = false;
@@ -1291,18 +1298,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return;
}
- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
- & MASK_RSP_UPIU_DATA_SEG_LEN;
-
- /* To flush remained rsp_list, we queue the map_work task */
- if (!data_seg_len) {
- if (!ufshpb_is_general_lun(hpb->lun))
- return;
-
- ufshpb_kick_map_work(hpb);
- return;
- }
-
BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 51a82f7803d3..9d16cf925483 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc {
u8 tag;
u8 bus;
u8 target;
- u8 vcpuHint;
- u8 unused[59];
+ u16 vcpuHint;
+ u8 unused[58];
} __packed;
/*
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index 774465c119be..2ac20084e5a5 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -314,6 +314,9 @@ static int bcm_pmb_probe(struct platform_device *pdev)
for (e = table; e->name; e++) {
struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
pd->pmb = pmb;
pd->data = e;
pd->genpd.name = e->name;
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index e53109a5c3da..cabd8870316d 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -630,6 +630,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
static struct platform_driver qcom_llcc_driver = {
.driver = {
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 2df488333be9..cac6b0b7b0b1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -421,6 +421,7 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
}
smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(smp2p->ipc_regmap))
return PTR_ERR(smp2p->ipc_regmap);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index ef15d014c03a..9df9bba242f3 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -374,6 +374,7 @@ static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
return 0;
host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(host->ipc_regmap))
return PTR_ERR(host->ipc_regmap);
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 494cf2b5bf7b..343ff61ccccb 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -148,12 +148,14 @@ static int __init rockchip_grf_init(void)
return -ENODEV;
if (!match || !match->data) {
pr_err("%s: missing grf data\n", __func__);
+ of_node_put(np);
return -EINVAL;
}
grf_info = match->data;
grf = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(grf)) {
pr_err("%s: could not get grf syscon\n", __func__);
return PTR_ERR(grf);
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8afb3f45d263..a33ec7eaf23d 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -183,6 +183,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains),
GFP_KERNEL);
+ if (!pd_provider->data.domains)
+ return -ENOMEM;
pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate;
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index f72d36654ac2..38e7f1a2bb97 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1298,6 +1298,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
/* use generic bandwidth allocation algorithm */
sdw->cdns.bus.compute_params = sdw_compute_params;
+ /* avoid resuming from pm_runtime suspend if it's not required */
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 0ef79d60e88e..f5955826b152 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -97,7 +97,7 @@
#define SWRM_SPECIAL_CMD_ID 0xF
#define MAX_FREQ_NUM 1
-#define TIMEOUT_MS (2 * HZ)
+#define TIMEOUT_MS 100
#define QCOM_SWRM_MAX_RD_LEN 0x1
#define QCOM_SDW_MAX_PORTS 14
#define DEFAULT_CLK_FREQ 9600000
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 2714ba02b176..cda70de38330 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1660,7 +1660,7 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
- .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
};
static const struct of_device_id cqspi_dt_ids[] = {
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 5f05d519fbbd..71376b6df89d 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -731,7 +731,7 @@ static int img_spfi_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index c6a1bb09be05..b721b62118e1 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -133,7 +133,8 @@
#define INT_TF_OVERFLOW (1 << 1)
#define INT_RF_UNDERFLOW (1 << 2)
#define INT_RF_OVERFLOW (1 << 3)
-#define INT_RF_FULL (1 << 4)
+#define INT_RF_FULL (1 << 4)
+#define INT_CS_INACTIVE (1 << 6)
/* Bit fields in ICR, 4bit */
#define ICR_MASK 0x0f
@@ -194,6 +195,10 @@ struct rockchip_spi {
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
bool slave_abort;
+ bool cs_inactive; /* spi slave tansmition stop when cs inactive */
+ bool cs_high_supported; /* native CS supports active-high polarity */
+
+ struct spi_transfer *xfer; /* Store xfer temporarily */
};
static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
@@ -343,6 +348,15 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
struct spi_controller *ctlr = dev_id;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ /* When int_cs_inactive comes, spi slave abort */
+ if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
+ ctlr->slave_abort(ctlr);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
+ return IRQ_HANDLED;
+ }
+
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
@@ -350,6 +364,7 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
if (!rs->rx_left) {
spi_enable_chip(rs, false);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
spi_finalize_current_transfer(ctlr);
}
@@ -357,14 +372,18 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
}
static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
- struct spi_transfer *xfer)
+ struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
{
rs->tx = xfer->tx_buf;
rs->rx = xfer->rx_buf;
rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
rs->rx_left = xfer->len / rs->n_bytes;
- writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+ if (rs->cs_inactive)
+ writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+ else
+ writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
spi_enable_chip(rs, true);
if (rs->tx_left)
@@ -383,6 +402,9 @@ static void rockchip_spi_dma_rxcb(void *data)
if (state & TXDMA && !rs->slave_abort)
return;
+ if (rs->cs_inactive)
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
}
@@ -423,14 +445,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
atomic_set(&rs->state, 0);
+ rs->tx = xfer->tx_buf;
+ rs->rx = xfer->rx_buf;
+
rxdesc = NULL;
if (xfer->rx_buf) {
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = rs->dma_addr_rx,
.src_addr_width = rs->n_bytes,
- .src_maxburst = rockchip_spi_calc_burst_size(xfer->len /
- rs->n_bytes),
+ .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
};
dmaengine_slave_config(ctlr->dma_rx, &rxconf);
@@ -474,10 +498,13 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
atomic_or(RXDMA, &rs->state);
- dmaengine_submit(rxdesc);
+ ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
dma_async_issue_pending(ctlr->dma_rx);
}
+ if (rs->cs_inactive)
+ writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+
spi_enable_chip(rs, true);
if (txdesc) {
@@ -584,7 +611,42 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ u32 rx_fifo_left;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ /* Get current dma rx point */
+ if (atomic_read(&rs->state) & RXDMA) {
+ dmaengine_pause(ctlr->dma_rx);
+ status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
+ if (status == DMA_ERROR) {
+ rs->rx = rs->xfer->rx_buf;
+ rs->xfer->len = 0;
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--)
+ readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+ goto out;
+ } else {
+ rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
+ }
+ }
+
+ /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
+ if (rs->rx) {
+ rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ for (; rx_fifo_left; rx_fifo_left--) {
+ u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+
+ if (rs->n_bytes == 1)
+ *(u8 *)rs->rx = (u8)rxw;
+ else
+ *(u16 *)rs->rx = (u16)rxw;
+ rs->rx += rs->n_bytes;
+ }
+ rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
+ }
+out:
if (atomic_read(&rs->state) & RXDMA)
dmaengine_terminate_sync(ctlr->dma_rx);
if (atomic_read(&rs->state) & TXDMA)
@@ -626,7 +688,7 @@ static int rockchip_spi_transfer_one(
}
rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
-
+ rs->xfer = xfer;
use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
@@ -636,7 +698,7 @@ static int rockchip_spi_transfer_one(
if (use_dma)
return rockchip_spi_prepare_dma(rs, ctlr, xfer);
- return rockchip_spi_prepare_irq(rs, xfer);
+ return rockchip_spi_prepare_irq(rs, ctlr, xfer);
}
static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
@@ -653,6 +715,34 @@ static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
return xfer->len / bytes_per_word >= rs->fifo_len;
}
+static int rockchip_spi_setup(struct spi_device *spi)
+{
+ struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
+ u32 cr0;
+
+ if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(rs->dev);
+
+ cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ cr0 &= ~(0x3 << CR0_SCPH_OFFSET);
+ cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
+ if (spi->mode & SPI_CS_HIGH && spi->chip_select <= 1)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
+ else if (spi->chip_select <= 1)
+ cr0 &= ~(BIT(spi->chip_select) << CR0_SOI_OFFSET);
+
+ writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ pm_runtime_put(rs->dev);
+
+ return 0;
+}
+
static int rockchip_spi_probe(struct platform_device *pdev)
{
int ret;
@@ -780,6 +870,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
+ ctlr->setup = rockchip_spi_setup;
ctlr->set_cs = rockchip_spi_set_cs;
ctlr->transfer_one = rockchip_spi_transfer_one;
ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
@@ -814,9 +905,15 @@ static int rockchip_spi_probe(struct platform_device *pdev)
switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
case ROCKCHIP_SPI_VER2_TYPE2:
+ rs->cs_high_supported = true;
ctlr->mode_bits |= SPI_CS_HIGH;
+ if (ctlr->can_dma && slave_mode)
+ rs->cs_inactive = true;
+ else
+ rs->cs_inactive = false;
break;
default:
+ rs->cs_inactive = false;
break;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index d16ed88802d3..d575c935e9f0 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1107,14 +1107,11 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
+ cfg.dst_addr = port_addr + RSPI_SPDR;
+ cfg.src_addr = port_addr + RSPI_SPDR;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV) {
- cfg.dst_addr = port_addr;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else {
- cfg.src_addr = port_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- }
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
@@ -1145,12 +1142,12 @@ static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
}
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
- res->start + RSPI_SPDR);
+ res->start);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
- res->start + RSPI_SPDR);
+ res->start);
if (!ctlr->dma_rx) {
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index ffdc55f87e82..dd38cb8ffbc2 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -308,7 +308,8 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
if (!op->data.nbytes)
goto wait_nobusy;
- if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
+ if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
+ qspi->fmode == CCR_FMODE_APM)
goto out;
reinit_completion(&qspi->data_completion);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index e06aafe169e0..081da1fd3fd7 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -448,6 +448,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct dma_async_tx_descriptor *tx;
int ret;
+ unsigned long time_left;
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
if (!tx) {
@@ -467,9 +468,9 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
}
dma_async_issue_pending(chan);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ time_left = wait_for_completion_timeout(&qspi->transfer_complete,
msecs_to_jiffies(len));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_sync(chan);
dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index 8a75f6642c78..0c41d1e0204f 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev,
goto err_device;
return cd;
err_device:
- device_unregister(&cd->client->dev);
+ put_device(&cd->client->dev);
err_kthread:
kthread_stop(cd->qthread);
err_reset:
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index b589cf6b1d03..e19b91e7a72e 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -599,8 +599,8 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
break;
}
if (!data) {
- dev_err(dai->dev, "%s:%s DATA connection missing\n",
- dai->name, module->name);
+ dev_err(dai->dev, "%s DATA connection missing\n",
+ dai->name);
mutex_unlock(&codec->lock);
return -ENODEV;
}
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
index 340efb57fd18..e63b777d4266 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -74,7 +74,7 @@ static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
no_chroma = 1;
for (j = 0, tmp_w = 0; j < num_tile_cols - 1; j++) {
tmp_w += pps->column_width_minus1[j] + 1;
- *p++ = pps->column_width_minus1[j + 1];
+ *p++ = pps->column_width_minus1[j] + 1;
*p++ = h;
if (i == 0 && h == 1 && ctb_size == 16)
no_chroma = 1;
@@ -194,13 +194,8 @@ static void set_params(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &g2_max_cu_qpd_depth, 0);
}
- if (pps->flags & V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT) {
- hantro_reg_write(vpu, &g2_cb_qp_offset, pps->pps_cb_qp_offset);
- hantro_reg_write(vpu, &g2_cr_qp_offset, pps->pps_cr_qp_offset);
- } else {
- hantro_reg_write(vpu, &g2_cb_qp_offset, 0);
- hantro_reg_write(vpu, &g2_cr_qp_offset, 0);
- }
+ hantro_reg_write(vpu, &g2_cb_qp_offset, pps->pps_cb_qp_offset);
+ hantro_reg_write(vpu, &g2_cr_qp_offset, pps->pps_cr_qp_offset);
hantro_reg_write(vpu, &g2_filt_offset_beta, pps->pps_beta_offset_div2);
hantro_reg_write(vpu, &g2_filt_offset_tc, pps->pps_tc_offset_div2);
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 0b4d2491be3b..228629fb3cdf 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -354,8 +354,6 @@ u16 hantro_h264_get_ref_nbr(struct hantro_ctx *ctx, unsigned int dpb_idx)
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
return 0;
- if (dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return dpb->pic_num;
return dpb->frame_num;
}
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index bcb0bdff4a9a..629bf40a5e5c 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -647,8 +647,12 @@ static int hantro_buf_prepare(struct vb2_buffer *vb)
* (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
* it to buffer length).
*/
- if (V4L2_TYPE_IS_CAPTURE(vq->type))
- vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ if (ctx->is_encoder)
+ vb2_set_plane_payload(vb, 0, 0);
+ else
+ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ }
return 0;
}
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 951e19231da2..22b4bf9e9ef4 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -112,6 +112,7 @@ struct rkvdec_h264_run {
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+ int ref_buf_idx[V4L2_H264_NUM_DPB_ENTRIES];
};
struct rkvdec_h264_ctx {
@@ -661,8 +662,8 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx,
WRITE_PPS(0xff, PROFILE_IDC);
WRITE_PPS(1, CONSTRAINT_SET3_FLAG);
WRITE_PPS(sps->chroma_format_idc, CHROMA_FORMAT_IDC);
- WRITE_PPS(sps->bit_depth_luma_minus8 + 8, BIT_DEPTH_LUMA);
- WRITE_PPS(sps->bit_depth_chroma_minus8 + 8, BIT_DEPTH_CHROMA);
+ WRITE_PPS(sps->bit_depth_luma_minus8, BIT_DEPTH_LUMA);
+ WRITE_PPS(sps->bit_depth_chroma_minus8, BIT_DEPTH_CHROMA);
WRITE_PPS(0, QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG);
WRITE_PPS(sps->log2_max_frame_num_minus4, LOG2_MAX_FRAME_NUM_MINUS4);
WRITE_PPS(sps->max_num_ref_frames, MAX_NUM_REF_FRAMES);
@@ -725,6 +726,26 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx,
}
}
+static void lookup_ref_buf_idx(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ int buf_idx = -1;
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ buf_idx = vb2_find_timestamp(cap_q,
+ dpb[i].reference_ts, 0);
+
+ run->ref_buf_idx[i] = buf_idx;
+ }
+}
+
static void assemble_hw_rps(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
@@ -762,7 +783,7 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx,
for (j = 0; j < RKVDEC_NUM_REFLIST; j++) {
for (i = 0; i < h264_ctx->reflists.num_valid; i++) {
- u8 dpb_valid = 0;
+ bool dpb_valid = run->ref_buf_idx[i] >= 0;
u8 idx = 0;
switch (j) {
@@ -779,8 +800,6 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx,
if (idx >= ARRAY_SIZE(dec_params->dpb))
continue;
- dpb_valid = !!(dpb[idx].flags &
- V4L2_H264_DPB_ENTRY_FLAG_ACTIVE);
set_ps_field(hw_rps, DPB_INFO(i, j),
idx | dpb_valid << 4);
@@ -859,13 +878,8 @@ get_ref_buf(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run,
unsigned int dpb_idx)
{
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
- const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
- int buf_idx = -1;
-
- if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ int buf_idx = run->ref_buf_idx[dpb_idx];
/*
* If a DPB entry is unused or invalid, address of current destination
@@ -1102,6 +1116,7 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
assemble_hw_scaling_list(ctx, &run);
assemble_hw_pps(ctx, &run);
+ lookup_ref_buf_idx(ctx, &run);
assemble_hw_rps(ctx, &run);
config_registers(ctx, &run);
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 3f3f96488d74..4fd4a2907da7 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -967,7 +967,6 @@ static const char * const rkvdec_clk_names[] = {
static int rkvdec_probe(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec;
- struct resource *res;
unsigned int i;
int ret, irq;
@@ -999,8 +998,7 @@ static int rkvdec_probe(struct platform_device *pdev)
*/
clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rkvdec->regs = devm_ioremap_resource(&pdev->dev, res);
+ rkvdec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkvdec->regs))
return PTR_ERR(rkvdec->regs);
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 46fe62c7c32c..af13079a6d2c 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -179,7 +179,11 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- rtw_alloc_hwxmits(padapter);
+ if (rtw_alloc_hwxmits(padapter)) {
+ res = _FAIL;
+ goto exit;
+ }
+
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@@ -1516,7 +1520,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+int rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1524,22 +1528,17 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
pxmitpriv->hwxmits = kzalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry, GFP_KERNEL);
+ if (!pxmitpriv->hwxmits)
+ return -ENOMEM;
hwxmits = pxmitpriv->hwxmits;
- if (pxmitpriv->hwxmit_entry == 5) {
- hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
- hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
- hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
- hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
- } else if (pxmitpriv->hwxmit_entry == 4) {
- hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
- hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
- hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
- hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- } else {
- }
+ hwxmits[0].sta_queue = &pxmitpriv->vo_pending;
+ hwxmits[1].sta_queue = &pxmitpriv->vi_pending;
+ hwxmits[2].sta_queue = &pxmitpriv->be_pending;
+ hwxmits[3].sta_queue = &pxmitpriv->bk_pending;
+
+ return 0;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index 5f6e2402e5c4..762a2fa3bd17 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -345,7 +345,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+int rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 0eccce57c63a..ca376f7efd42 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -465,12 +465,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep = kmalloc(wep_total_len, GFP_KERNEL);
+ wep_total_len = wep_key_len + sizeof(*pwep);
+ pwep = kzalloc(wep_total_len, GFP_KERNEL);
if (!pwep)
goto exit;
- memset(pwep, 0, wep_total_len);
pwep->KeyLength = wep_key_len;
pwep->Length = wep_total_len;
if (wep_key_len == 13) {
@@ -1249,9 +1248,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
sec_len = *(pos++); len -= 1;
- if (sec_len > 0 && sec_len <= len) {
+ if (sec_len > 0 &&
+ sec_len <= len &&
+ sec_len <= 32) {
ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
ssid_index++;
}
pos += sec_len;
@@ -2050,99 +2051,6 @@ static int rtw_wx_get_nick(struct net_device *dev,
return 0;
}
-static int rtw_wx_read32(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter;
- struct iw_point *p;
- u16 len;
- u32 addr;
- u32 data32;
- u32 bytes;
- u8 *ptmp;
- int ret;
-
- padapter = (struct adapter *)rtw_netdev_priv(dev);
- p = &wrqu->data;
- len = p->length;
- ptmp = kmalloc(len, GFP_KERNEL);
- if (!ptmp)
- return -ENOMEM;
-
- if (copy_from_user(ptmp, p->pointer, len)) {
- kfree(ptmp);
- return -EFAULT;
- }
-
- bytes = 0;
- addr = 0;
- sscanf(ptmp, "%d,%x", &bytes, &addr);
-
- switch (bytes) {
- case 1:
- data32 = rtw_read8(padapter, addr);
- sprintf(extra, "0x%02X", data32);
- break;
- case 2:
- data32 = rtw_read16(padapter, addr);
- sprintf(extra, "0x%04X", data32);
- break;
- case 4:
- data32 = rtw_read32(padapter, addr);
- sprintf(extra, "0x%08X", data32);
- break;
- default:
- DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
- ret = -EINVAL;
- goto err_free_ptmp;
- }
- DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
-
- kfree(ptmp);
- return 0;
-
-err_free_ptmp:
- kfree(ptmp);
- return ret;
-}
-
-static int rtw_wx_write32(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- u32 addr;
- u32 data32;
- u32 bytes;
-
- bytes = 0;
- addr = 0;
- data32 = 0;
- sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
-
- switch (bytes) {
- case 1:
- rtw_write8(padapter, addr, (u8)data32);
- DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%02X\n", __func__, addr, (u8)data32);
- break;
- case 2:
- rtw_write16(padapter, addr, (u16)data32);
- DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%04X\n", __func__, addr, (u16)data32);
- break;
- case 4:
- rtw_write32(padapter, addr, data32);
- DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%08X\n", __func__, addr, data32);
- break;
- default:
- DBG_88E(KERN_INFO "%s: usage> write [bytes],[address(hex)],[data(hex)]\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int rtw_wx_read_rf(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -6577,8 +6485,8 @@ static const struct iw_priv_args rtw_private_args[] = {
};
static iw_handler rtw_private_handler[] = {
-rtw_wx_write32, /* 0x00 */
-rtw_wx_read32, /* 0x01 */
+ NULL, /* 0x00 */
+ NULL, /* 0x01 */
rtw_drvext_hdl, /* 0x02 */
rtw_mp_ioctl_hdl, /* 0x03 */
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 503d33be71d9..ea8bc27fce49 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1a193f900779..2b06706a7071 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 9502f6aa5306..bc033849fcea 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -332,7 +332,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
r8712_free_evt_priv(&padapter->evtpriv);
r8712_DeInitSwLeds(padapter);
r8712_free_mlme_priv(&padapter->mlmepriv);
- r8712_free_io_queue(padapter);
_free_xmit_priv(&padapter->xmitpriv);
_r8712_free_sta_priv(&padapter->stapriv);
_r8712_free_recv_priv(&padapter->recvpriv);
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index cae04272deff..6db2493e6d3a 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -265,6 +265,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
{
+ r8712_free_io_queue(padapter);
}
void rtl871x_intf_stop(struct _adapter *padapter)
@@ -302,9 +303,6 @@ void r871x_dev_unload(struct _adapter *padapter)
rtl8712_hal_deinit(padapter);
}
- /*s6.*/
- if (padapter->dvobj_deinit)
- padapter->dvobj_deinit(padapter);
padapter->bup = false;
}
}
@@ -538,13 +536,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
} else {
AutoloadFail = false;
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
+ if ((!AutoloadFail) ||
+ ((mac[0] == 0xff) && (mac[1] == 0xff) &&
(mac[2] == 0xff) && (mac[3] == 0xff) &&
(mac[4] == 0xff) && (mac[5] == 0xff)) ||
((mac[0] == 0x00) && (mac[1] == 0x00) &&
(mac[2] == 0x00) && (mac[3] == 0x00) &&
- (mac[4] == 0x00) && (mac[5] == 0x00)) ||
- (!AutoloadFail)) {
+ (mac[4] == 0x00) && (mac[5] == 0x00))) {
mac[0] = 0x00;
mac[1] = 0xe0;
mac[2] = 0x4c;
@@ -607,6 +605,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
/* Stop driver mlme relation timer */
r8712_stop_drv_timers(padapter);
r871x_dev_unload(padapter);
+ if (padapter->dvobj_deinit)
+ padapter->dvobj_deinit(padapter);
r8712_free_drv_sw(padapter);
free_netdev(pnetdev);
diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c
index e64845e6adf3..af9966d03979 100644
--- a/drivers/staging/rtl8712/usb_ops.c
+++ b/drivers/staging/rtl8712/usb_ops.c
@@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 1;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u8)(le32_to_cpu(data) & 0x0ff);
}
@@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 2;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u16)(le32_to_cpu(data) & 0xffff);
}
@@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 4;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return le32_to_cpu(data);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index cf79bec916c5..952c3e14d1b3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -749,7 +749,9 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ spin_unlock_bh(&pmlmepriv->lock);
del_timer_sync(&pmlmepriv->scan_to_timer);
+ spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
@@ -1236,8 +1238,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+ spin_unlock_bh(&pmlmepriv->lock);
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
+ spin_lock_bh(&pmlmepriv->lock);
} else {
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
}
@@ -1543,7 +1547,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1571,7 +1575,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
}
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
}
/*
@@ -1584,11 +1588,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- spin_lock_bh(&pmlmepriv->lock);
+ spin_lock_irq(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- spin_unlock_bh(&pmlmepriv->lock);
+ spin_unlock_irq(&pmlmepriv->lock);
rtw_indicate_scan_done(adapter, true);
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 44bb380e7390..fa866acef5bb 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -850,7 +850,6 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
- attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 0ca5ec14d3db..1e8e9dd3f482 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -20,6 +20,7 @@
#include <linux/configfs.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/pagemap.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -1660,17 +1661,37 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
unsigned long last)
{
- XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
struct page *page;
+ unsigned long dpi;
u32 pages_freed = 0;
- xas_lock(&xas);
- xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
- xas_store(&xas, NULL);
+ first = first * udev->data_pages_per_blk;
+ last = (last + 1) * udev->data_pages_per_blk - 1;
+ xa_for_each_range(&udev->data_pages, dpi, page, first, last) {
+ xa_erase(&udev->data_pages, dpi);
+ /*
+ * While reaching here there may be page faults occurring on
+ * the to-be-released pages. A race condition may occur if
+ * unmap_mapping_range() is called before page faults on these
+ * pages have completed; a valid but stale map is created.
+ *
+ * If another command subsequently runs and needs to extend
+ * dbi_thresh, it may reuse the slot corresponding to the
+ * previous page in data_bitmap. Though we will allocate a new
+ * page for the slot in data_area, no page fault will happen
+ * because we have a valid map. Therefore the command's data
+ * will be lost.
+ *
+ * We lock and unlock pages that are to be released to ensure
+ * all page faults have completed. This way
+ * unmap_mapping_range() can ensure stale maps are cleanly
+ * removed.
+ */
+ lock_page(page);
+ unlock_page(page);
__free_page(page);
pages_freed++;
}
- xas_unlock(&xas);
atomic_sub(pages_freed, &global_page_count);
@@ -1822,6 +1843,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
get_page(page);
+ lock_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
@@ -1863,6 +1885,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
struct page *page;
unsigned long offset;
void *addr;
+ vm_fault_t ret = 0;
int mi = tcmu_find_mem_index(vmf->vma);
if (mi < 0)
@@ -1887,10 +1910,11 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
page = tcmu_try_get_data_page(udev, dpi);
if (!page)
return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_LOCKED;
}
vmf->page = page;
- return 0;
+ return ret;
}
static const struct vm_operations_struct tcmu_vm_ops = {
@@ -3153,12 +3177,22 @@ static void find_free_blocks(void)
udev->dbi_max = block;
}
+ /*
+ * Release the block pages.
+ *
+ * Also note that since tcmu_vma_fault() gets an extra page
+ * refcount, tcmu_blocks_release() won't free pages if pages
+ * are mapped. This means it is safe to call
+ * tcmu_blocks_release() before unmap_mapping_range() which
+ * drops the refcount of any pages it unmaps and thus releases
+ * them.
+ */
+ pages_freed = tcmu_blocks_release(udev, start, end - 1);
+
/* Here will truncate the data area from off */
off = udev->data_off + (loff_t)start * udev->data_blk_size;
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
- /* Release the block pages */
- pages_freed = tcmu_blocks_release(udev, start, end - 1);
mutex_unlock(&udev->cmdr_lock);
total_pages_freed += pages_freed;
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
index 1ec57d9ecf53..e9bef5c3414b 100644
--- a/drivers/thermal/broadcom/bcm2711_thermal.c
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -38,7 +38,6 @@ static int bcm2711_get_temp(void *data, int *temp)
int offset = thermal_zone_get_offset(priv->thermal);
u32 val;
int ret;
- long t;
ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val);
if (ret)
@@ -50,9 +49,7 @@ static int bcm2711_get_temp(void *data, int *temp)
val &= AVS_RO_TEMP_STATUS_DATA_MSK;
/* Convert a HW code to a temperature reading (millidegree celsius) */
- t = slope * val + offset;
-
- *temp = t < 0 ? 0 : t;
+ *temp = slope * val + offset;
return 0;
}
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 475ce2900771..85ab9edd580c 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -60,6 +60,9 @@ static int sr_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
sr_thermal->regs = (void __iomem *)devm_memremap(&pdev->dev, res->start,
resource_size(res),
MEMREMAP_WB);
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 4310cb342a9f..d38a80adec73 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -358,21 +358,28 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct thermal_cooling_device *cdev;
struct device *dev = df->dev.parent;
struct devfreq_cooling_device *dfc;
+ struct thermal_cooling_device_ops *ops;
char *name;
int err, num_opps;
- dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
- if (!dfc)
+ ops = kmemdup(&devfreq_cooling_ops, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
return ERR_PTR(-ENOMEM);
+ dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
+ if (!dfc) {
+ err = -ENOMEM;
+ goto free_ops;
+ }
+
dfc->devfreq = df;
dfc->em_pd = em_pd_get(dev);
if (dfc->em_pd) {
- devfreq_cooling_ops.get_requested_power =
+ ops->get_requested_power =
devfreq_cooling_get_requested_power;
- devfreq_cooling_ops.state2power = devfreq_cooling_state2power;
- devfreq_cooling_ops.power2state = devfreq_cooling_power2state;
+ ops->state2power = devfreq_cooling_state2power;
+ ops->power2state = devfreq_cooling_power2state;
dfc->power_ops = dfc_power;
@@ -407,8 +414,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
if (!name)
goto remove_qos_req;
- cdev = thermal_of_cooling_device_register(np, name, dfc,
- &devfreq_cooling_ops);
+ cdev = thermal_of_cooling_device_register(np, name, dfc, ops);
kfree(name);
if (IS_ERR(cdev)) {
@@ -429,6 +435,8 @@ free_table:
kfree(dfc->freq_table);
free_dfc:
kfree(dfc);
+free_ops:
+ kfree(ops);
return ERR_PTR(err);
}
@@ -510,11 +518,13 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_em_register);
void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct devfreq_cooling_device *dfc;
+ const struct thermal_cooling_device_ops *ops;
struct device *dev;
if (IS_ERR_OR_NULL(cdev))
return;
+ ops = cdev->ops;
dfc = cdev->devdata;
dev = dfc->devfreq->dev.parent;
@@ -525,5 +535,6 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
kfree(dfc->freq_table);
kfree(dfc);
+ kfree(ops);
}
EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 4d2cf3dc6c9b..32eb7d514530 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -151,8 +151,8 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor) {
of_node_put(child);
- of_node_put(sensor_np);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
ret = thermal_zone_of_get_sensor_id(child,
@@ -207,7 +207,9 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
}
}
+put_node:
of_node_put(sensor_np);
+ of_node_put(np);
return ret;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 13891745a971..867c8aa92b3a 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -945,6 +945,7 @@ __thermal_cooling_device_register(struct device_node *np,
return cdev;
out_kfree_type:
+ thermal_cooling_device_destroy_sysfs(cdev);
kfree(cdev->type);
put_device(&cdev->device);
cdev = NULL;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 2897a77d44c3..b805b6939794 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -851,7 +851,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
static void tb_tunnel_dp(struct tb *tb)
{
- int available_up, available_down, ret;
+ int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out;
struct tb_tunnel *tunnel;
@@ -897,6 +897,20 @@ static void tb_tunnel_dp(struct tb *tb)
}
/*
+ * This is only applicable to links that are not bonded (so
+ * when Thunderbolt 1 hardware is involved somewhere in the
+ * topology). For these try to share the DP bandwidth between
+ * the two lanes.
+ */
+ link_nr = 1;
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel)) {
+ link_nr = 0;
+ break;
+ }
+ }
+
+ /*
* DP stream needs the domain to be active so runtime resume
* both ends of the tunnel.
*
@@ -927,7 +941,8 @@ static void tb_tunnel_dp(struct tb *tb)
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
- tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
+ available_down);
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim;
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 1f69bab236ee..66b6e665e96f 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -1348,7 +1348,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1394,7 +1394,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1444,7 +1444,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1509,7 +1509,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1627,7 +1627,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -2009,7 +2009,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2245,7 +2245,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2282,7 +2282,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index bb5cc480fc9a..bd98c719bf55 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -843,6 +843,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @link_nr: Preferred lane adapter when the link is not bonded
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
* if not limited)
* @max_down: Maximum available downstream bandwidth for the DP tunnel
@@ -854,8 +855,8 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out, int max_up,
- int max_down)
+ struct tb_port *out, int link_nr,
+ int max_up, int max_down)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -879,21 +880,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
paths = tunnel->paths;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
- 1, "Video");
+ link_nr, "Video");
if (!path)
goto err_free;
tb_dp_init_video_path(path);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
- TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+ TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
- TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+ TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index eea14e24f7e0..a92027431697 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -69,8 +69,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out, int max_up,
- int max_down);
+ struct tb_port *out, int link_nr,
+ int max_up, int max_down);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index d24af649a8bb..0e32920af10d 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -61,13 +61,13 @@ static void do_rw_io(struct goldfish_tty *qtty,
spin_lock_irqsave(&qtty->lock, irq_flags);
gf_write_ptr((void *)address, base + GOLDFISH_TTY_REG_DATA_PTR,
base + GOLDFISH_TTY_REG_DATA_PTR_HIGH);
- __raw_writel(count, base + GOLDFISH_TTY_REG_DATA_LEN);
+ gf_iowrite32(count, base + GOLDFISH_TTY_REG_DATA_LEN);
if (is_write)
- __raw_writel(GOLDFISH_TTY_CMD_WRITE_BUFFER,
+ gf_iowrite32(GOLDFISH_TTY_CMD_WRITE_BUFFER,
base + GOLDFISH_TTY_REG_CMD);
else
- __raw_writel(GOLDFISH_TTY_CMD_READ_BUFFER,
+ gf_iowrite32(GOLDFISH_TTY_CMD_READ_BUFFER,
base + GOLDFISH_TTY_REG_CMD);
spin_unlock_irqrestore(&qtty->lock, irq_flags);
@@ -142,7 +142,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
unsigned char *buf;
u32 count;
- count = __raw_readl(base + GOLDFISH_TTY_REG_BYTES_READY);
+ count = gf_ioread32(base + GOLDFISH_TTY_REG_BYTES_READY);
if (count == 0)
return IRQ_NONE;
@@ -159,7 +159,7 @@ static int goldfish_tty_activate(struct tty_port *port, struct tty_struct *tty)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty,
port);
- __raw_writel(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
+ gf_iowrite32(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
return 0;
}
@@ -167,7 +167,7 @@ static void goldfish_tty_shutdown(struct tty_port *port)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty,
port);
- __raw_writel(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
+ gf_iowrite32(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
}
static int goldfish_tty_open(struct tty_struct *tty, struct file *filp)
@@ -202,7 +202,7 @@ static unsigned int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
void __iomem *base = qtty->base;
- return __raw_readl(base + GOLDFISH_TTY_REG_BYTES_READY);
+ return gf_ioread32(base + GOLDFISH_TTY_REG_BYTES_READY);
}
static void goldfish_tty_console_write(struct console *co, const char *b,
@@ -357,7 +357,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
* on Ranchu emulator (qemu2) returns 1 here and
* driver will use physical addresses.
*/
- qtty->version = __raw_readl(base + GOLDFISH_TTY_REG_VERSION);
+ qtty->version = gf_ioread32(base + GOLDFISH_TTY_REG_VERSION);
/*
* Goldfish TTY device on Ranchu emulator (qemu2)
@@ -376,7 +376,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
}
}
- __raw_writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_REG_CMD);
+ gf_iowrite32(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_REG_CMD);
ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED,
"goldfish_tty", qtty);
@@ -407,6 +407,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
err_tty_register_device_failed:
free_irq(irq, qtty);
err_dec_line_count:
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
@@ -427,7 +428,8 @@ static int goldfish_tty_remove(struct platform_device *pdev)
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = NULL;
- free_irq(qtty->irq, pdev);
+ free_irq(qtty->irq, qtty);
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
@@ -438,7 +440,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
static void gf_early_console_putchar(struct uart_port *port, int ch)
{
- __raw_writel(ch, port->membase);
+ gf_iowrite32(ch, port->membase);
}
static void gf_early_write(struct console *con, const char *s, unsigned int n)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 1767503de744..6734ef22c304 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -443,6 +443,25 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
return modembits;
}
+static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
+ unsigned long len)
+{
+ char *prefix;
+
+ if (!fname) {
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len,
+ true);
+ return;
+ }
+
+ prefix = kasprintf(GFP_ATOMIC, "%s: ", fname);
+ if (!prefix)
+ return;
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
+ true);
+ kfree(prefix);
+}
+
/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
@@ -507,7 +526,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
else
pr_cont("(F)");
- print_hex_dump_bytes("", DUMP_PREFIX_NONE, data, dlen);
+ gsm_hex_dump_bytes(NULL, data, dlen);
}
@@ -689,10 +708,8 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
}
if (debug & 4)
- print_hex_dump_bytes("gsm_data_kick: ",
- DUMP_PREFIX_OFFSET,
- gsm->txframe, len);
- if (gsmld_output(gsm, gsm->txframe, len) < 0)
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
+ if (gsmld_output(gsm, gsm->txframe, len) <= 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
gsm->tx_bytes -= msg->len;
@@ -2371,10 +2388,8 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return -ENOSPC;
}
if (debug & 4)
- print_hex_dump_bytes("gsmld_output: ", DUMP_PREFIX_OFFSET,
- data, len);
- gsm->tty->ops->write(gsm->tty, data, len);
- return len;
+ gsm_hex_dump_bytes(__func__, data, len);
+ return gsm->tty->ops->write(gsm->tty, data, len);
}
/**
@@ -2449,8 +2464,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char flags = TTY_NORMAL;
if (debug & 4)
- print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
- cp, count);
+ gsm_hex_dump_bytes(__func__, cp, count);
for (; count; count--, cp++) {
if (fp)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index de5b45de5040..891036bd9f89 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2012,6 +2012,35 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return ldata->read_tail != canon_head;
}
+/*
+ * If we finished a read at the exact location of an
+ * EOF (special EOL character that's a __DISABLED_CHAR)
+ * in the stream, silently eat the EOF.
+ */
+static void canon_skip_eof(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t tail, canon_head;
+
+ canon_head = smp_load_acquire(&ldata->canon_head);
+ tail = ldata->read_tail;
+
+ // No data?
+ if (tail == canon_head)
+ return;
+
+ // See if the tail position is EOF in the circular buffer
+ tail &= (N_TTY_BUF_SIZE - 1);
+ if (!test_bit(tail, ldata->read_flags))
+ return;
+ if (read_buf(ldata, tail) != __DISABLED_CHAR)
+ return;
+
+ // Clear the EOL bit, skip the EOF char.
+ clear_bit(tail, ldata->read_flags);
+ smp_store_release(&ldata->read_tail, ldata->read_tail + 1);
+}
+
/**
* job_control - check job control
* @tty: tty
@@ -2081,7 +2110,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
*/
if (*cookie) {
if (ldata->icanon && !L_EXTPROC(tty)) {
- if (canon_copy_from_read_buf(tty, &kb, &nr))
+ /*
+ * If we have filled the user buffer, see
+ * if we should skip an EOF character before
+ * releasing the lock and returning done.
+ */
+ if (!nr)
+ canon_skip_eof(tty);
+ else if (canon_copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
} else {
if (copy_from_read_buf(tty, &kb, &nr))
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index c2cecc6f47db..179bb1375636 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -429,6 +429,8 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
memset(&port, 0, sizeof(port));
port.port.private_data = vuart;
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 251f0018ae8c..dba5950b8d0e 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -200,12 +200,12 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!pdata)
return -EINVAL;
- /* Hardware do not support same RTS level on send and receive */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
if (rs485->flags & SER_RS485_ENABLED) {
+ /* Hardware do not support same RTS level on send and receive */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
} else {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 2285ef947755..df9731f73746 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1535,6 +1535,8 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (em485) {
unsigned char lsr = serial_in(p, UART_LSR);
+ p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
/*
* To provide required timeing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3d40306971b8..0e908061b5d7 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1620,13 +1620,6 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
container_of(port, struct uart_amba_port, port);
unsigned int cr;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
- mctrl &= ~TIOCM_RTS;
- else
- mctrl |= TIOCM_RTS;
- }
-
cr = pl011_read(uap, REG_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
@@ -1850,14 +1843,8 @@ static int pl011_startup(struct uart_port *port)
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
- } else {
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
cr |= UART011_CR_TXE;
- }
pl011_write(cr, uap, REG_CR);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d6d3db9c3b1f..db07d6a5d764 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1247,7 +1247,7 @@ static int cpm_uart_init_port(struct device_node *np,
}
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-#ifdef CONFIG_CONSOLE_POLL
+#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_CPM_CONSOLE)
if (!udbg_port)
#endif
udbg_putc = NULL;
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index c7f81aa1ce91..5fea9bf86e85 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -309,6 +309,8 @@ static void digicolor_uart_set_termios(struct uart_port *port,
case CS8:
default:
config |= UA_CONFIG_CHAR_LEN;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 03a2fe9f4c9a..02b375ba2f07 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1501,7 +1501,7 @@ static int icom_probe(struct pci_dev *dev,
retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
if (retval) {
dev_err(&dev->dev, "PCI Config read FAILED\n");
- return retval;
+ goto probe_exit0;
}
pci_write_config_dword(dev, PCI_COMMAND,
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index efee3935917f..62e6c1af1344 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -253,6 +253,14 @@ static const char *meson_uart_type(struct uart_port *port)
return (port->type == PORT_MESON) ? "meson_uart" : NULL;
}
+/*
+ * This function is called only from probe() using a temporary io mapping
+ * in order to perform a reset before setting up the device. Since the
+ * temporarily mapped region was successfully requested, there can be no
+ * console on this port at this time. Hence it is not necessary for this
+ * function to acquire the port->lock. (Since there is no console on this
+ * port at this time, the port->lock is not initialized yet.)
+ */
static void meson_uart_reset(struct uart_port *port)
{
u32 val;
@@ -267,9 +275,12 @@ static void meson_uart_reset(struct uart_port *port)
static int meson_uart_startup(struct uart_port *port)
{
+ unsigned long flags;
u32 val;
int ret = 0;
+ spin_lock_irqsave(&port->lock, flags);
+
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
@@ -285,6 +296,8 @@ static int meson_uart_startup(struct uart_port *port)
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
+ spin_unlock_irqrestore(&port->lock, flags);
+
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 489d19274f9a..03ff63438e77 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1588,6 +1588,7 @@ static inline struct uart_port *msm_get_port_from_line(unsigned int line)
static void __msm_console_write(struct uart_port *port, const char *s,
unsigned int count, bool is_uartdm)
{
+ unsigned long flags;
int i;
int num_newlines = 0;
bool replaced = false;
@@ -1605,6 +1606,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
+ local_irq_save(flags);
+
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
@@ -1650,6 +1653,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (locked)
spin_unlock(&port->lock);
+
+ local_irq_restore(flags);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 91f1eb0058d7..9a6611cfc18e 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -731,6 +731,7 @@ static int owl_uart_probe(struct platform_device *pdev)
owl_port->port.uartclk = clk_get_rate(owl_port->clk);
if (owl_port->port.uartclk == 0) {
dev_err(&pdev->dev, "clock rate is zero\n");
+ clk_disable_unprepare(owl_port->clk);
return -EINVAL;
}
owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index f0351e6f0ef6..1e65933f6cce 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -624,22 +624,6 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
return 0;
}
-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
-{
- int ret = 0;
- struct uart_port *port = &priv->port;
-
- if (port->x_char) {
- dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
- __func__, port->x_char, jiffies);
- buf[0] = port->x_char;
- port->x_char = 0;
- ret = 1;
- }
-
- return ret;
-}
-
static int dma_push_rx(struct eg20t_port *priv, int size)
{
int room;
@@ -889,9 +873,10 @@ static unsigned int handle_tx(struct eg20t_port *priv)
fifo_size = max(priv->fifo_size, 1);
tx_empty = 1;
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+ if (port->x_char) {
+ pch_uart_hal_write(priv, &port->x_char, 1);
port->icount.tx++;
+ port->x_char = 0;
tx_empty = 0;
fifo_size--;
}
@@ -946,9 +931,11 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
}
fifo_size = max(priv->fifo_size, 1);
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+
+ if (port->x_char) {
+ pch_uart_hal_write(priv, &port->x_char, 1);
port->icount.tx++;
+ port->x_char = 0;
fifo_size--;
}
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index d550d8fa2fab..a8fe1c3ebcd9 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -262,6 +262,8 @@ static void rda_uart_set_termios(struct uart_port *port,
fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS7;
break;
default:
ctrl |= RDA_UART_DBITS_8;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 697b6a002a16..4ddcc985621a 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -446,6 +446,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
+ del_timer_sync(&sport->timer);
+
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
@@ -476,8 +478,6 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
UTSR1_TO_SM(UTSR1_ROR);
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index eb15423f935a..4d3ad4c6c60f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -144,6 +144,11 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ set &= ~TIOCM_RTS;
+ clear &= ~TIOCM_RTS;
+ }
+
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
@@ -157,23 +162,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
static void uart_port_dtr_rts(struct uart_port *uport, int raise)
{
- int rs485_on = uport->rs485_config &&
- (uport->rs485.flags & SER_RS485_ENABLED);
- int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND);
-
- if (raise) {
- if (rs485_on && RTS_after_send) {
- uart_set_mctrl(uport, TIOCM_DTR);
- uart_clear_mctrl(uport, TIOCM_RTS);
- } else {
- uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- }
- } else {
- unsigned int clear = TIOCM_DTR;
-
- clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0;
- uart_clear_mctrl(uport, clear);
- }
+ if (raise)
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/*
@@ -1089,11 +1081,6 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
goto out;
if (!tty_io_error(tty)) {
- if (uport->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
uart_update_mctrl(uport, set, clear);
ret = 0;
}
@@ -2408,6 +2395,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
*/
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= TIOCM_DTR;
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
+ port->mctrl |= TIOCM_RTS;
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index aaca4fe38486..1f8362d5e3b9 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -644,6 +644,8 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
case CS6: /* not supported */
case CS8:
cval |= TXX9_SILCR_UMODE_8BIT;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 89ee43061d3a..c5c0f39cb1c7 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2390,8 +2390,12 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
int best_clk = -1;
unsigned long flags;
- if ((termios->c_cflag & CSIZE) == CS7)
+ if ((termios->c_cflag & CSIZE) == CS7) {
smr_val |= SCSMR_CHR;
+ } else {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_cflag & PARENB)
smr_val |= SCSMR_PE;
if (termios->c_cflag & PARODD)
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 0ac0371f943b..4b0fa91e9f9a 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -666,12 +666,16 @@ static void sifive_serial_set_termios(struct uart_port *port,
int rate;
char nstop;
- if ((termios->c_cflag & CSIZE) != CS8)
+ if ((termios->c_cflag & CSIZE) != CS8) {
dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_iflag & (INPCK | PARMRK))
dev_err_once(ssp->port.dev, "parity checking not supported\n");
if (termios->c_iflag & BRKINT)
dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
+ termios->c_iflag &= ~(INPCK|PARMRK|BRKINT);
/* Set number of stop bits */
nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
@@ -998,7 +1002,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
/* Set up clock divider */
ssp->clkin_rate = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
- ssp->port.uartclk = ssp->baud_rate * 16;
+ ssp->port.uartclk = ssp->clkin_rate;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 87e480cc8206..5a45633aaea8 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -535,10 +535,14 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
/* set character length */
if ((cflag & CSIZE) == CS7) {
ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
+ cflag |= PARENB;
} else {
ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
ASC_CTL_MODE_8BIT;
+ cflag &= ~CSIZE;
+ cflag |= CS8;
}
+ termios->c_cflag = cflag;
/* set stop bit */
ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 810a1b0b6520..10e9f983de62 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -807,13 +807,22 @@ static void stm32_usart_set_termios(struct uart_port *port,
* CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
* M0 and M1 already cleared by cr1 initialization.
*/
- if (bits == 9)
+ if (bits == 9) {
cr1 |= USART_CR1_M0;
- else if ((bits == 7) && cfg->has_7bits_data)
+ } else if ((bits == 7) && cfg->has_7bits_data) {
cr1 |= USART_CR1_M1;
- else if (bits != 8)
+ } else if (bits != 8) {
dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
, bits);
+ cflag &= ~CSIZE;
+ cflag |= CS8;
+ termios->c_cflag = cflag;
+ bits = 8;
+ if (cflag & PARENB) {
+ bits++;
+ cr1 |= USART_CR1_M0;
+ }
+ }
if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
(stm32_port->fifoen &&
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 25c558e65ece..9bc2a9265277 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1746,6 +1746,8 @@ static int hdlcdev_init(struct slgt_info *info)
*/
static void hdlcdev_exit(struct slgt_info *info)
{
+ if (!info->netdev)
+ return;
unregister_hdlc_device(info->netdev);
free_netdev(info->netdev);
info->netdev = NULL;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c911196ac893..6b445ece8339 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -232,8 +232,10 @@ static void showacpu(void *dummy)
unsigned long flags;
/* Idle CPUs have no interesting backtrace. */
- if (idle_cpu(smp_processor_id()))
+ if (idle_cpu(smp_processor_id())) {
+ pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id());
return;
+ }
raw_spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
@@ -260,10 +262,13 @@ static void sysrq_handle_showallcpus(int key)
if (in_hardirq())
regs = get_irq_regs();
- if (regs) {
- pr_info("CPU%d:\n", smp_processor_id());
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ if (regs)
show_regs(regs);
- }
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
schedule_work(&sysrq_showallcpus);
}
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 6c7e65b1d9a1..6127f84b92b1 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -174,7 +174,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
*/
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
- p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
+ GFP_ATOMIC | __GFP_NOWARN);
if (p == NULL)
return NULL;
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index e45c3d6e1536..794e413800ae 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1941,13 +1941,16 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
}
if (enqd_len + trb_buff_len >= full_len) {
- if (need_zero_pkt)
- zero_len_trb = !zero_len_trb;
-
- field &= ~TRB_CHAIN;
- field |= TRB_IOC;
- more_trbs_coming = false;
- preq->td.last_trb = ring->enqueue;
+ if (need_zero_pkt && !zero_len_trb) {
+ zero_len_trb = true;
+ } else {
+ zero_len_trb = false;
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ need_zero_pkt = false;
+ preq->td.last_trb = ring->enqueue;
+ }
}
/* Only set interrupt on short packet for OUT endpoints. */
@@ -1962,7 +1965,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
- cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
+ cdnsp_queue_trb(pdev, ring, more_trbs_coming,
lower_32_bits(send_addr),
upper_32_bits(send_addr),
length_field,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index d630cccd2e6e..5af810cd8a58 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -616,10 +616,10 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
- .freeze = check_root_hub_suspended,
+ .freeze = hcd_pci_suspend,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
- .thaw = NULL,
+ .thaw = hcd_pci_resume,
.poweroff = hcd_pci_suspend,
.poweroff_noirq = hcd_pci_suspend_noirq,
.restore_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index dd3c288fa952..9e28d715fa3b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2816,6 +2816,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
{
int retval;
struct usb_device *rhdev;
+ struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2976,13 +2977,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_hcd_driver_start;
}
+ /* starting here, usbcore will pay attention to the shared HCD roothub */
+ shared_hcd = hcd->shared_hcd;
+ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+ retval = register_root_hub(shared_hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
+
+ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+ usb_hcd_poll_rh_status(shared_hcd);
+ }
+
/* starting here, usbcore will pay attention to this root hub */
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ if (!HCD_DEFER_RH_REGISTER(hcd)) {
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+ }
return retval;
@@ -3020,6 +3034,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
+ bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -3030,6 +3045,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
+ rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -3039,7 +3055,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ if (rh_registered)
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 97b44a68668a..f99a65a64588 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -510,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* DELL USB GEN2 */
+ { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
+
/* VCOM device */
{ USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0909b088a284..e1cebf581a4a 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4544,7 +4544,6 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
WARN_ON(hsotg->driver);
- driver->driver.bus = NULL;
hsotg->driver = driver;
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 657dbd50faf1..82322696b903 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5194,7 +5194,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -EINVAL;
- goto error1;
+ goto error2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 6c82493f7df9..903e9dd76f38 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -454,13 +454,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
* This device property is for kernel internal use only and
* is expected to be set by the glue code.
*/
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
- edev = extcon_get_extcon_dev(name);
- if (!edev)
- return ERR_PTR(-EPROBE_DEFER);
-
- return edev;
- }
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
+ return extcon_get_extcon_dev(name);
/*
* Try to get an extcon device from the USB PHY controller's "port"
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index f08b2178fd32..9c8887615701 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -256,7 +256,7 @@ static void dwc3_pci_resume_work(struct work_struct *work)
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0a19494589c6..11b3259e02ce 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1955,10 +1955,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
{
struct dwc3_request *req;
- struct dwc3_request *tmp;
struct dwc3 *dwc = dep->dwc;
- list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
+ while (!list_empty(&dep->cancelled_list)) {
+ req = next_request(&dep->cancelled_list);
dwc3_gadget_ep_skip_trbs(dep, req);
switch (req->status) {
case DWC3_REQUEST_STATUS_DISCONNECTED:
@@ -1975,6 +1975,12 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
dwc3_gadget_giveback(dep, req, -ECONNRESET);
break;
}
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
}
}
@@ -3258,15 +3264,21 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3_request *req;
- struct dwc3_request *tmp;
- list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
+ while (!list_empty(&dep->started_list)) {
int ret;
+ req = next_request(&dep->started_list);
ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
req, status);
if (ret)
break;
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
}
}
@@ -3305,14 +3317,14 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
- if (!dep->endpoint.desc)
- return no_started_trb;
-
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
goto out;
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list) &&
(list_empty(&dep->pending_list) || status == -EXDEV))
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 02f70c5c65fc..adc44a2685b5 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -122,8 +122,6 @@ struct ffs_ep {
struct usb_endpoint_descriptor *descs[3];
u8 num;
-
- int status; /* P: epfile->mutex */
};
struct ffs_epfile {
@@ -227,6 +225,9 @@ struct ffs_io_data {
bool use_sg;
struct ffs_data *ffs;
+
+ int status;
+ struct completion done;
};
struct ffs_desc_helper {
@@ -707,12 +708,15 @@ static const struct file_operations ffs_ep0_operations = {
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
+ struct ffs_io_data *io_data = req->context;
+
ENTER();
- if (req->context) {
- struct ffs_ep *ep = _ep->driver_data;
- ep->status = req->status ? req->status : req->actual;
- complete(req->context);
- }
+ if (req->status)
+ io_data->status = req->status;
+ else
+ io_data->status = req->actual;
+
+ complete(&io_data->done);
}
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
@@ -1050,7 +1054,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} else if (!io_data->aio) {
- DECLARE_COMPLETION_ONSTACK(done);
bool interrupted = false;
req = ep->req;
@@ -1066,7 +1069,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
io_data->buf = data;
- req->context = &done;
+ init_completion(&io_data->done);
+ req->context = io_data;
req->complete = ffs_epfile_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
@@ -1075,7 +1079,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
- if (wait_for_completion_interruptible(&done)) {
+ if (wait_for_completion_interruptible(&io_data->done)) {
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ if (epfile->ep != ep) {
+ ret = -ESHUTDOWN;
+ goto error_lock;
+ }
/*
* To avoid race condition with ffs_epfile_io_complete,
* dequeue the request first then check
@@ -1083,17 +1092,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
- wait_for_completion(&done);
- interrupted = ep->status < 0;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ wait_for_completion(&io_data->done);
+ interrupted = io_data->status < 0;
}
if (interrupted)
ret = -EINTR;
- else if (io_data->read && ep->status > 0)
- ret = __ffs_epfile_read_data(epfile, data, ep->status,
+ else if (io_data->read && io_data->status > 0)
+ ret = __ffs_epfile_read_data(epfile, data, io_data->status,
&io_data->data);
else
- ret = ep->status;
+ ret = io_data->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index d15a54f6c24b..ef253599dcf9 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -774,9 +774,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
- if (get_ether_addr(dev_addr, net->dev_addr))
+ if (get_ether_addr(dev_addr, net->dev_addr)) {
+ net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
+ } else {
+ net->addr_assign_type = NET_ADDR_SET;
+ }
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
@@ -833,6 +837,9 @@ struct net_device *gether_setup_name_default(const char *netname)
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
+ /* by default we always have a random MAC address */
+ net->addr_assign_type = NET_ADDR_RANDOM;
+
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
@@ -869,7 +876,6 @@ int gether_register_netdev(struct net_device *net)
dev = netdev_priv(net);
g = dev->gadget;
- net->addr_assign_type = NET_ADDR_RANDOM;
eth_hw_addr_set(net, dev->dev_mac);
status = register_netdev(net);
@@ -910,6 +916,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+ net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index d86c3a36441e..3427ce37a5c5 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -145,6 +145,7 @@ enum dev_state {
STATE_DEV_INVALID = 0,
STATE_DEV_OPENED,
STATE_DEV_INITIALIZED,
+ STATE_DEV_REGISTERING,
STATE_DEV_RUNNING,
STATE_DEV_CLOSED,
STATE_DEV_FAILED
@@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
ret = -EINVAL;
goto out_unlock;
}
+ dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index a25d01c89564..865de8db998a 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3014,6 +3014,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
}
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
if (!udc->isp1301_i2c_client) {
return -EPROBE_DEFER;
}
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 8835f6bd528e..8c7f0991c21b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1541,10 +1541,12 @@ static int isp116x_remove(struct platform_device *pdev)
iounmap(isp116x->data_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
iounmap(isp116x->addr_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 4300326b3730..6be6c5878d08 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3909,8 +3909,10 @@ static int oxu_bus_suspend(struct usb_hcd *hcd)
}
}
+ spin_unlock_irq(&oxu->lock);
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
+ spin_lock_irq(&oxu->lock);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cb8b481a9499..7d13ddff6b33 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
@@ -268,6 +269,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index d1d9a7d5da17..af88f4fe00d2 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -251,6 +251,8 @@ static const struct reg_field isp1760_hc_reg_fields[] = {
[HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 2, 2),
[HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 1, 1),
[HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL, 0, 0),
+ /* Make sure the array is sized properly during compilation */
+ [HC_FIELD_MAX] = {},
};
static const struct reg_field isp1763_hc_reg_fields[] = {
@@ -321,6 +323,8 @@ static const struct reg_field isp1763_hc_reg_fields[] = {
[HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 2, 2),
[HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 1, 1),
[HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 0, 0),
+ /* Make sure the array is sized properly during compilation */
+ [HC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_hc_volatile_ranges[] = {
@@ -405,6 +409,8 @@ static const struct reg_field isp1761_dc_reg_fields[] = {
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_DC_CHIPID, 16, 31),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP176x_DC_CHIPID, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP176x_DC_SCRATCH, 0, 15),
+ /* Make sure the array is sized properly during compilation */
+ [DC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_dc_volatile_ranges[] = {
@@ -458,6 +464,8 @@ static const struct reg_field isp1763_dc_reg_fields[] = {
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_DC_CHIPID_HIGH, 0, 15),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP1763_DC_CHIPID_LOW, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP1763_DC_SCRATCH, 0, 15),
+ /* Make sure the array is sized properly during compilation */
+ [DC_FIELD_MAX] = {},
};
static const struct regmap_config isp1763_dc_regmap_conf = {
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index f086960fe2b5..bd1de5c4c434 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -363,6 +363,7 @@ static int omap2430_probe(struct platform_device *pdev)
control_node = of_parse_phandle(np, "ctrl-module", 0);
if (control_node) {
control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
ret = -EINVAL;
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index ee0863c6553e..6e6ef8c0bc7e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -95,8 +95,8 @@ static int omap_otg_probe(struct platform_device *pdev)
return -ENODEV;
extcon = extcon_get_extcon_dev(config->extcon);
- if (!extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(extcon))
+ return PTR_ERR(extcon);
otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
if (!otg_dev)
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a7b3c15957ba..feba2a8d1233 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -166,6 +166,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
@@ -204,6 +205,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 52cbc353051f..9a6f742ad3ab 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -212,6 +212,7 @@
//
// Definitions for other product IDs
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
+#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4)
#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 152ad882657d..ed1e50d83cca 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
+#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
+#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
@@ -1137,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ .driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -1977,6 +1981,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1d878d05a658..3506c47e1eef 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -421,6 +421,9 @@ static int pl2303_detect_type(struct usb_serial *serial)
bcdUSB = le16_to_cpu(desc->bcdUSB);
switch (bcdUSB) {
+ case 0x101:
+ /* USB 1.0.1? Let's assume they meant 1.1... */
+ fallthrough;
case 0x110:
switch (bcdDevice) {
case 0x300:
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 05cec81dcd3f..38ddfedef629 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -174,24 +174,25 @@ static void rio_karma_destructor(void *extra)
static int rio_karma_init(struct us_data *us)
{
- int ret = 0;
struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
if (!data)
- goto out;
+ return -ENOMEM;
data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
if (!data->recv) {
kfree(data);
- goto out;
+ return -ENOMEM;
}
us->extra = data;
us->extra_destructor = rio_karma_destructor;
- ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
- data->in_storage = (ret == 0);
-out:
- return ret;
+ if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
+ return -EIO;
+
+ data->in_storage = 1;
+
+ return 0;
}
static struct scsi_host_template karma_host_template;
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index c8340de0ed49..d2aaf294b649 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -131,8 +131,11 @@ typec_switch_register(struct device *parent,
sw->dev.class = &typec_mux_class;
sw->dev.type = &typec_switch_dev_type;
sw->dev.driver_data = desc->drvdata;
- dev_set_name(&sw->dev, "%s-switch",
- desc->name ? desc->name : dev_name(parent));
+ ret = dev_set_name(&sw->dev, "%s-switch", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&sw->dev);
+ return ERR_PTR(ret);
+ }
ret = device_add(&sw->dev);
if (ret) {
@@ -338,8 +341,11 @@ typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
mux->dev.class = &typec_mux_class;
mux->dev.type = &typec_mux_dev_type;
mux->dev.driver_data = desc->drvdata;
- dev_set_name(&mux->dev, "%s-mux",
- desc->name ? desc->name : dev_name(parent));
+ ret = dev_set_name(&mux->dev, "%s-mux", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&mux->dev);
+ return ERR_PTR(ret);
+ }
ret = device_add(&mux->dev);
if (ret) {
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 72f9001b0792..96c55eaf3f80 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1708,8 +1708,8 @@ static int fusb302_probe(struct i2c_client *client,
*/
if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
chip->extcon = extcon_get_extcon_dev(name);
- if (!chip->extcon)
- return -EPROBE_DEFER;
+ if (IS_ERR(chip->extcon))
+ return PTR_ERR(chip->extcon);
}
chip->vbus = devm_regulator_get(chip->dev, "vbus");
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index d8d3892e5a69..3c6d452e3bf4 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -393,7 +393,6 @@ static int stub_probe(struct usb_device *udev)
err_port:
dev_set_drvdata(&udev->dev, NULL);
- usb_put_dev(udev);
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
@@ -408,6 +407,7 @@ call_put_busid_priv:
put_busid_priv(busid_priv);
sdev_free:
+ usb_put_dev(udev);
stub_device_free(sdev);
return rc;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 325c22008e53..5dd41e8215e0 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -138,7 +138,9 @@ static int tweak_set_configuration_cmd(struct urb *urb)
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
+ usb_lock_device(sdev->udev);
err = usb_set_configuration(sdev->udev, config);
+ usb_unlock_device(sdev->udev);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 003530b19b4e..4fe8aa13ac68 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -505,7 +505,6 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
}
ifcvf_mgmt_dev->adapter = adapter;
- pci_set_drvdata(pdev, ifcvf_mgmt_dev);
vf = &adapter->vf;
vf->dev_type = get_dev_type(pdev);
@@ -620,6 +619,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
+
return 0;
err:
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 12bf3d16a40f..86571498c1c2 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -558,14 +558,19 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
goto mdev_err;
}
err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
- if (!err)
- err = genlmsg_reply(msg, info);
+ if (err)
+ goto mdev_err;
+
+ err = genlmsg_reply(msg, info);
+ put_device(dev);
+ mutex_unlock(&vdpa_dev_mutex);
+ return err;
+
mdev_err:
put_device(dev);
err:
mutex_unlock(&vdpa_dev_mutex);
- if (err)
- nlmsg_free(msg);
+ nlmsg_free(msg);
return err;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 41b0cd17fcba..2faf3bd1c3ba 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -353,11 +353,14 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ bool old_ready;
spin_lock(&vdpasim->lock);
+ old_ready = vq->ready;
vq->ready = ready;
- if (vq->ready)
+ if (vq->ready && !old_ready) {
vdpasim_queue_ready(vdpasim, idx);
+ }
spin_unlock(&vdpasim->lock);
}
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 8c3de5a76282..9270398caf15 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1336,9 +1336,9 @@ static int vduse_create_dev(struct vduse_dev_config *config,
dev->minor = ret;
dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
- dev->dev = device_create(vduse_class, NULL,
- MKDEV(MAJOR(vduse_major), dev->minor),
- dev, "%s", config->name);
+ dev->dev = device_create_with_groups(vduse_class, NULL,
+ MKDEV(MAJOR(vduse_major), dev->minor),
+ dev, vduse_dev_groups, "%s", config->name);
if (IS_ERR(dev->dev)) {
ret = PTR_ERR(dev->dev);
goto err_dev;
@@ -1585,7 +1585,6 @@ static int vduse_init(void)
return PTR_ERR(vduse_class);
vduse_class->devnode = vduse_devnode;
- vduse_class->dev_groups = vduse_dev_groups;
ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
if (ret)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 792ab5f23647..297b5db47454 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1450,13 +1450,9 @@ err:
return ERR_PTR(r);
}
-static struct ptr_ring *get_tap_ptr_ring(int fd)
+static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
@@ -1465,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
goto out;
ring = NULL;
out:
- fput(file);
return ring;
}
@@ -1552,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
- if (index == VHOST_NET_VQ_RX)
- nvq->rx_ring = get_tap_ptr_ring(fd);
+ if (index == VHOST_NET_VQ_RX) {
+ if (sock)
+ nvq->rx_ring = get_tap_ptr_ring(sock->file);
+ else
+ nvq->rx_ring = NULL;
+ }
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index d62f05d056b7..299a99532618 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
return;
irq = ops->get_vq_irq(vdpa, qid);
+ if (irq < 0)
+ return;
+
irq_bypass_unregister_producer(&vq->call_ctx.producer);
- if (!vq->call_ctx.ctx || irq < 0)
+ if (!vq->call_ctx.ctx)
return;
vq->call_ctx.producer.token = vq->call_ctx.ctx;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 14e2043d7685..eab55accf381 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
int (*copy)(const struct vringh *vrh,
void *dst, const void *src, size_t len))
{
- int err, count = 0, up_next, desc_max;
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
struct vringh_range range = { -1ULL, 0 }, slowrange;
bool slow = false;
@@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i,
continue;
}
- if (count++ == vrh->vring.num) {
+ if (up_next == -1)
+ count++;
+ else
+ indirect_count++;
+
+ if (count > vrh->vring.num || indirect_count > desc_max) {
vringh_bad("Descriptor loop in %p", descs);
err = -ELOOP;
goto fail;
@@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
i = return_from_indirect(vrh, &up_next,
&descs, &desc_max);
slow = false;
+ indirect_count = 0;
} else
break;
}
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 40496e9e9b43..f304163e87e9 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -46,6 +46,7 @@
#include <linux/slab.h>
#include <linux/font.h>
#include <linux/crc32.h>
+#include <linux/fb.h>
#include <asm/io.h>
@@ -392,7 +393,9 @@ static int __init sticonsole_init(void)
for (i = 0; i < MAX_NR_CONSOLES; i++)
font_data[i] = STI_DEF_FONT;
- pr_info("sticon: Initializing STI text console.\n");
+ pr_info("sticon: Initializing STI text console on %s at [%s]\n",
+ sticon_sti->sti_data->inq_outptr.dev_name,
+ sticon_sti->pa_path);
console_lock();
err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1,
PAGE0->mem_cons.cl_class != CL_DUPLEX);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index f869b723494f..6a947ff96d6e 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -30,10 +30,11 @@
#include <asm/pdc.h>
#include <asm/cacheflush.h>
#include <asm/grfioctl.h>
+#include <asm/fb.h>
#include "../fbdev/sticore.h"
-#define STI_DRIVERVERSION "Version 0.9b"
+#define STI_DRIVERVERSION "Version 0.9c"
static struct sti_struct *default_sti __read_mostly;
@@ -502,7 +503,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (!fbfont)
return NULL;
- pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
+ pr_info(" using %ux%u framebuffer font %s\n",
fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
@@ -946,6 +947,7 @@ out_err:
static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
{
+ pr_info(" located at [%s]\n", sti->pa_path);
if (strcmp (path, default_sti_path) == 0)
default_sti = sti;
}
@@ -957,7 +959,6 @@ static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
*/
static int __init sticore_pa_init(struct parisc_device *dev)
{
- char pa_path[21];
struct sti_struct *sti = NULL;
int hpa = dev->hpa.start;
@@ -970,8 +971,8 @@ static int __init sticore_pa_init(struct parisc_device *dev)
if (!sti)
return 1;
- print_pa_hwpath(dev, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pa_hwpath(dev, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
return 0;
}
@@ -1007,9 +1008,8 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
sti = sti_try_rom_generic(rom_base, fb_base, pd);
if (sti) {
- char pa_path[30];
- print_pci_hwpath(pd, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pci_hwpath(pd, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
}
if (!sti) {
@@ -1127,6 +1127,22 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
return ret;
}
+/* check if given fb_info is the primary device */
+int fb_is_primary_device(struct fb_info *info)
+{
+ struct sti_struct *sti;
+
+ sti = sti_get_rom(0);
+
+ /* if no built-in graphics card found, allow any fb driver as default */
+ if (!sti)
+ return true;
+
+ /* return true if it's the default built-in framebuffer driver */
+ return (sti->info == info);
+}
+EXPORT_SYMBOL(fb_is_primary_device);
+
MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9ec969e136bf..8080116aea84 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -758,12 +758,15 @@ static int clcdfb_of_vram_setup(struct clcd_fb *fb)
return -ENODEV;
fb->fb.screen_base = of_iomap(memory, 0);
- if (!fb->fb.screen_base)
+ if (!fb->fb.screen_base) {
+ of_node_put(memory);
return -ENOMEM;
+ }
fb->fb.fix.smem_start = of_translate_address(memory,
of_get_address(memory, 0, &size, NULL));
fb->fb.fix.smem_len = size;
+ of_node_put(memory);
return 0;
}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index a53c1f6906f0..a25b63b56223 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -3265,6 +3265,9 @@ static void fbcon_register_existing_fbs(struct work_struct *work)
console_lock();
+ deferred_takeover = false;
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
for_each_registered_fb(i)
fbcon_fb_registered(registered_fb[i]);
@@ -3282,8 +3285,6 @@ static int fbcon_output_notifier(struct notifier_block *nb,
pr_info("fbcon: Taking over console\n");
dummycon_unregister_output_notifier(&fbcon_output_nb);
- deferred_takeover = false;
- logo_shown = FBCON_LOGO_DONTSHOW;
/* We may get called in atomic context */
schedule_work(&fbcon_deferred_takeover_work);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index da434546ef9d..dedfb363d6bd 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1436,10 +1436,7 @@ fb_release(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
- struct fb_info * const info = file_fb_info(file);
-
- if (!info)
- return -ENODEV;
+ struct fb_info * const info = file->private_data;
lock_fb_info(info);
if (info->fbops->fb_release)
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 65dae05fff8e..ce699396d6ba 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
{
if (!info)
return;
+
+ if (WARN_ON(refcount_read(&info->count)))
+ return;
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index c8e0ea27caf1..58c304a3b7c4 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1009,7 +1009,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
- resource_size_t pot_start, pot_end;
phys_addr_t paddr;
int ret;
@@ -1060,23 +1059,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
- if (gen2vm) {
- pot_start = 0;
- pot_end = -1;
- } else {
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size) {
- pr_err("Resource not available or (0x%lx < 0x%lx)\n",
- (unsigned long) pci_resource_len(pdev, 0),
- (unsigned long) screen_fb_size);
- goto err1;
- }
-
- pot_end = pci_resource_end(pdev, 0);
- pot_start = pot_end - screen_fb_size + 1;
- }
-
- ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
+ ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1,
screen_fb_size, 0x100000, true);
if (ret != 0) {
pr_err("Unable to allocate framebuffer memory\n");
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 4279e13a3b58..9421d14d0eb0 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -650,6 +650,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
for (i = 0; i < 8; i++) {
ret = pxa3xx_gcu_add_buffer(dev, priv);
if (ret) {
+ pxa3xx_gcu_free_buffers(dev, priv);
dev_err(dev, "failed to allocate DMA memory\n");
goto err_disable_clk;
}
@@ -666,15 +667,15 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
SHARED_SIZE, irq);
return 0;
-err_free_dma:
- dma_free_coherent(dev, SHARED_SIZE,
- priv->shared, priv->shared_phys);
+err_disable_clk:
+ clk_disable_unprepare(priv->clk);
err_misc_deregister:
misc_deregister(&priv->misc_dev);
-err_disable_clk:
- clk_disable_unprepare(priv->clk);
+err_free_dma:
+ dma_free_coherent(dev, SHARED_SIZE,
+ priv->shared, priv->shared_phys);
return ret;
}
@@ -687,6 +688,7 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev)
pxa3xx_gcu_wait_idle(priv);
misc_deregister(&priv->misc_dev);
dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
+ clk_disable_unprepare(priv->clk);
pxa3xx_gcu_free_buffers(dev, priv);
return 0;
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index c338f7848ae2..0ebdd28a0b81 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -370,6 +370,9 @@ struct sti_struct {
/* pointer to all internal data */
struct sti_all_data *sti_data;
+
+ /* pa_path of this device */
+ char pa_path[24];
};
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 265865610edc..002f265d8db5 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1317,11 +1317,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
goto out_err3;
}
+ /* save for primary gfx device detection & unregister_framebuffer() */
+ sti->info = info;
if (register_framebuffer(&fb->info) < 0)
goto out_err4;
- sti->info = info; /* save for unregister_framebuffer() */
-
fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index e25e8de5ff67..929d4775cb4b 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -490,11 +490,12 @@ static int vesafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
- /* vesafb_destroy takes care of info cleanup */
- unregister_framebuffer(info);
if (((struct vesafb_par *)(info->par))->region)
release_region(0x3c0, 32);
+ /* vesafb_destroy takes care of info cleanup */
+ unregister_framebuffer(info);
+
return 0;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 56128b9c46eb..1dd396d4bebb 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -688,6 +688,7 @@ static int vm_cmdline_set(const char *device,
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
if (err) {
+ put_device(&vm_cmdline_parent);
pr_err("Failed to register parent device!\n");
return err;
}
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index b35bb2d57f62..1e890ef17687 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev)
if (vp_dev->msix_affinity_masks) {
for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
if (vp_dev->msix_enabled) {
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index e11ed748e661..9ab66e44738e 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -340,6 +340,7 @@ err_map_notify:
err_map_isr:
pci_iounmap(pci_dev, mdev->common);
err_map_common:
+ pci_release_selected_regions(pci_dev, mdev->modern_bars);
return err;
}
EXPORT_SYMBOL_GPL(vp_modern_probe);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index ae7f9357bb87..46c2a4bd9ebe 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -227,7 +227,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_noidle(dev);
pm_runtime_disable(&pdev->dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index a730ecbf78cd..4820af929a82 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -48,7 +48,7 @@
/* internal variables */
enum tco_reg_layout {
- sp5100, sb800, efch
+ sp5100, sb800, efch, efch_mmio
};
struct sp5100_tco {
@@ -86,6 +86,10 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
dev->revision < 0x40) {
return sp5100;
} else if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+ sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+ return efch_mmio;
+ } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
dev->revision >= 0x41) ||
(dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
@@ -201,6 +205,8 @@ static void tco_timer_enable(struct sp5100_tco *tco)
~EFCH_PM_WATCHDOG_DISABLE,
EFCH_PM_DECODEEN_SECOND_RES);
break;
+ default:
+ break;
}
}
@@ -215,14 +221,195 @@ static u32 sp5100_tco_read_pm_reg32(u8 index)
return val;
}
+static u32 sp5100_tco_request_region(struct device *dev,
+ u32 mmio_addr,
+ const char *dev_name)
+{
+ if (!devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
+ dev_name)) {
+ dev_dbg(dev, "MMIO address 0x%08x already in use\n", mmio_addr);
+ return 0;
+ }
+
+ return mmio_addr;
+}
+
+static u32 sp5100_tco_prepare_base(struct sp5100_tco *tco,
+ u32 mmio_addr,
+ u32 alt_mmio_addr,
+ const char *dev_name)
+{
+ struct device *dev = tco->wdd.parent;
+
+ dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n", mmio_addr);
+
+ if (!mmio_addr && !alt_mmio_addr)
+ return -ENODEV;
+
+ /* Check for MMIO address and alternate MMIO address conflicts */
+ if (mmio_addr)
+ mmio_addr = sp5100_tco_request_region(dev, mmio_addr, dev_name);
+
+ if (!mmio_addr && alt_mmio_addr)
+ mmio_addr = sp5100_tco_request_region(dev, alt_mmio_addr, dev_name);
+
+ if (!mmio_addr) {
+ dev_err(dev, "Failed to reserve MMIO or alternate MMIO region\n");
+ return -EBUSY;
+ }
+
+ tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+ if (!tco->tcobase) {
+ dev_err(dev, "MMIO address 0x%08x failed mapping\n", mmio_addr);
+ devm_release_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
+ return -ENOMEM;
+ }
+
+ dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
+
+ return 0;
+}
+
+static int sp5100_tco_timer_init(struct sp5100_tco *tco)
+{
+ struct watchdog_device *wdd = &tco->wdd;
+ struct device *dev = wdd->parent;
+ u32 val;
+
+ val = readl(SP5100_WDT_CONTROL(tco->tcobase));
+ if (val & SP5100_WDT_DISABLED) {
+ dev_err(dev, "Watchdog hardware is disabled\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Save WatchDogFired status, because WatchDogFired flag is
+ * cleared here.
+ */
+ if (val & SP5100_WDT_FIRED)
+ wdd->bootstatus = WDIOF_CARDRESET;
+
+ /* Set watchdog action to reset the system */
+ val &= ~SP5100_WDT_ACTION_RESET;
+ writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+
+ /* Set a reasonable heartbeat before we stop the timer */
+ tco_timer_set_timeout(wdd, wdd->timeout);
+
+ /*
+ * Stop the TCO before we change anything so we don't race with
+ * a zeroed timer.
+ */
+ tco_timer_stop(wdd);
+
+ return 0;
+}
+
+static u8 efch_read_pm_reg8(void __iomem *addr, u8 index)
+{
+ return readb(addr + index);
+}
+
+static void efch_update_pm_reg8(void __iomem *addr, u8 index, u8 reset, u8 set)
+{
+ u8 val;
+
+ val = readb(addr + index);
+ val &= reset;
+ val |= set;
+ writeb(val, addr + index);
+}
+
+static void tco_timer_enable_mmio(void __iomem *addr)
+{
+ efch_update_pm_reg8(addr, EFCH_PM_DECODEEN3,
+ ~EFCH_PM_WATCHDOG_DISABLE,
+ EFCH_PM_DECODEEN_SECOND_RES);
+}
+
+static int sp5100_tco_setupdevice_mmio(struct device *dev,
+ struct watchdog_device *wdd)
+{
+ struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
+ const char *dev_name = SB800_DEVNAME;
+ u32 mmio_addr = 0, alt_mmio_addr = 0;
+ struct resource *res;
+ void __iomem *addr;
+ int ret;
+ u32 val;
+
+ res = request_mem_region_muxed(EFCH_PM_ACPI_MMIO_PM_ADDR,
+ EFCH_PM_ACPI_MMIO_PM_SIZE,
+ "sp5100_tco");
+
+ if (!res) {
+ dev_err(dev,
+ "Memory region 0x%08x already in use\n",
+ EFCH_PM_ACPI_MMIO_PM_ADDR);
+ return -EBUSY;
+ }
+
+ addr = ioremap(EFCH_PM_ACPI_MMIO_PM_ADDR, EFCH_PM_ACPI_MMIO_PM_SIZE);
+ if (!addr) {
+ dev_err(dev, "Address mapping failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * EFCH_PM_DECODEEN_WDT_TMREN is dual purpose. This bitfield
+ * enables sp5100_tco register MMIO space decoding. The bitfield
+ * also starts the timer operation. Enable if not already enabled.
+ */
+ val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+ if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+ efch_update_pm_reg8(addr, EFCH_PM_DECODEEN, 0xff,
+ EFCH_PM_DECODEEN_WDT_TMREN);
+ }
+
+ /* Error if the timer could not be enabled */
+ val = efch_read_pm_reg8(addr, EFCH_PM_DECODEEN);
+ if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+ dev_err(dev, "Failed to enable the timer\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ mmio_addr = EFCH_PM_WDT_ADDR;
+
+ /* Determine alternate MMIO base address */
+ val = efch_read_pm_reg8(addr, EFCH_PM_ISACONTROL);
+ if (val & EFCH_PM_ISACONTROL_MMIOEN)
+ alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+ EFCH_PM_ACPI_MMIO_WDT_OFFSET;
+
+ ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+ if (!ret) {
+ tco_timer_enable_mmio(addr);
+ ret = sp5100_tco_timer_init(tco);
+ }
+
+out:
+ if (addr)
+ iounmap(addr);
+
+ release_resource(res);
+
+ return ret;
+}
+
static int sp5100_tco_setupdevice(struct device *dev,
struct watchdog_device *wdd)
{
struct sp5100_tco *tco = watchdog_get_drvdata(wdd);
const char *dev_name;
u32 mmio_addr = 0, val;
+ u32 alt_mmio_addr = 0;
int ret;
+ if (tco->tco_reg_layout == efch_mmio)
+ return sp5100_tco_setupdevice_mmio(dev, wdd);
+
/* Request the IO ports used by this driver */
if (!request_muxed_region(SP5100_IO_PM_INDEX_REG,
SP5100_PM_IOPORTS_SIZE, "sp5100_tco")) {
@@ -239,138 +426,55 @@ static int sp5100_tco_setupdevice(struct device *dev,
dev_name = SP5100_DEVNAME;
mmio_addr = sp5100_tco_read_pm_reg32(SP5100_PM_WATCHDOG_BASE) &
0xfffffff8;
+
+ /*
+ * Secondly, find the watchdog timer MMIO address
+ * from SBResource_MMIO register.
+ */
+
+ /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_SB_RESOURCE_MMIO_BASE,
+ &val);
+
+ /* Verify MMIO is enabled and using bar0 */
+ if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+ alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
break;
case sb800:
dev_name = SB800_DEVNAME;
mmio_addr = sp5100_tco_read_pm_reg32(SB800_PM_WATCHDOG_BASE) &
0xfffffff8;
+
+ /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+ val = sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
+
+ /* Verify MMIO is enabled and using bar0 */
+ if ((val & SB800_ACPI_MMIO_MASK) == SB800_ACPI_MMIO_DECODE_EN)
+ alt_mmio_addr = (val & ~0xfff) + SB800_PM_WDT_MMIO_OFFSET;
break;
case efch:
dev_name = SB800_DEVNAME;
- /*
- * On Family 17h devices, the EFCH_PM_DECODEEN_WDT_TMREN bit of
- * EFCH_PM_DECODEEN not only enables the EFCH_PM_WDT_ADDR memory
- * region, it also enables the watchdog itself.
- */
- if (boot_cpu_data.x86 == 0x17) {
- val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
- if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
- sp5100_tco_update_pm_reg8(EFCH_PM_DECODEEN, 0xff,
- EFCH_PM_DECODEEN_WDT_TMREN);
- }
- }
val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
if (val & EFCH_PM_DECODEEN_WDT_TMREN)
mmio_addr = EFCH_PM_WDT_ADDR;
+
+ val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
+ if (val & EFCH_PM_ISACONTROL_MMIOEN)
+ alt_mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
+ EFCH_PM_ACPI_MMIO_WDT_OFFSET;
break;
default:
return -ENODEV;
}
- /* Check MMIO address conflict */
- if (!mmio_addr ||
- !devm_request_mem_region(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE,
- dev_name)) {
- if (mmio_addr)
- dev_dbg(dev, "MMIO address 0x%08x already in use\n",
- mmio_addr);
- switch (tco->tco_reg_layout) {
- case sp5100:
- /*
- * Secondly, Find the watchdog timer MMIO address
- * from SBResource_MMIO register.
- */
- /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
- pci_read_config_dword(sp5100_tco_pci,
- SP5100_SB_RESOURCE_MMIO_BASE,
- &mmio_addr);
- if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
- SB800_ACPI_MMIO_SEL)) !=
- SB800_ACPI_MMIO_DECODE_EN) {
- ret = -ENODEV;
- goto unreg_region;
- }
- mmio_addr &= ~0xFFF;
- mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
- break;
- case sb800:
- /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
- mmio_addr =
- sp5100_tco_read_pm_reg32(SB800_PM_ACPI_MMIO_EN);
- if ((mmio_addr & (SB800_ACPI_MMIO_DECODE_EN |
- SB800_ACPI_MMIO_SEL)) !=
- SB800_ACPI_MMIO_DECODE_EN) {
- ret = -ENODEV;
- goto unreg_region;
- }
- mmio_addr &= ~0xFFF;
- mmio_addr += SB800_PM_WDT_MMIO_OFFSET;
- break;
- case efch:
- val = sp5100_tco_read_pm_reg8(EFCH_PM_ISACONTROL);
- if (!(val & EFCH_PM_ISACONTROL_MMIOEN)) {
- ret = -ENODEV;
- goto unreg_region;
- }
- mmio_addr = EFCH_PM_ACPI_MMIO_ADDR +
- EFCH_PM_ACPI_MMIO_WDT_OFFSET;
- break;
- }
- dev_dbg(dev, "Got 0x%08x from SBResource_MMIO register\n",
- mmio_addr);
- if (!devm_request_mem_region(dev, mmio_addr,
- SP5100_WDT_MEM_MAP_SIZE,
- dev_name)) {
- dev_dbg(dev, "MMIO address 0x%08x already in use\n",
- mmio_addr);
- ret = -EBUSY;
- goto unreg_region;
- }
- }
-
- tco->tcobase = devm_ioremap(dev, mmio_addr, SP5100_WDT_MEM_MAP_SIZE);
- if (!tco->tcobase) {
- dev_err(dev, "failed to get tcobase address\n");
- ret = -ENOMEM;
- goto unreg_region;
- }
-
- dev_info(dev, "Using 0x%08x for watchdog MMIO address\n", mmio_addr);
-
- /* Setup the watchdog timer */
- tco_timer_enable(tco);
-
- val = readl(SP5100_WDT_CONTROL(tco->tcobase));
- if (val & SP5100_WDT_DISABLED) {
- dev_err(dev, "Watchdog hardware is disabled\n");
- ret = -ENODEV;
- goto unreg_region;
+ ret = sp5100_tco_prepare_base(tco, mmio_addr, alt_mmio_addr, dev_name);
+ if (!ret) {
+ /* Setup the watchdog timer */
+ tco_timer_enable(tco);
+ ret = sp5100_tco_timer_init(tco);
}
- /*
- * Save WatchDogFired status, because WatchDogFired flag is
- * cleared here.
- */
- if (val & SP5100_WDT_FIRED)
- wdd->bootstatus = WDIOF_CARDRESET;
- /* Set watchdog action to reset the system */
- val &= ~SP5100_WDT_ACTION_RESET;
- writel(val, SP5100_WDT_CONTROL(tco->tcobase));
-
- /* Set a reasonable heartbeat before we stop the timer */
- tco_timer_set_timeout(wdd, wdd->timeout);
-
- /*
- * Stop the TCO before we change anything so we don't race with
- * a zeroed timer.
- */
- tco_timer_stop(wdd);
-
- release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
-
- return 0;
-
-unreg_region:
release_region(SP5100_IO_PM_INDEX_REG, SP5100_PM_IOPORTS_SIZE);
return ret;
}
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index adf015aa4126..6a0986d2c94b 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -58,6 +58,7 @@
#define SB800_PM_WATCHDOG_SECOND_RES GENMASK(1, 0)
#define SB800_ACPI_MMIO_DECODE_EN BIT(0)
#define SB800_ACPI_MMIO_SEL BIT(1)
+#define SB800_ACPI_MMIO_MASK GENMASK(1, 0)
#define SB800_PM_WDT_MMIO_OFFSET 0xB00
@@ -82,4 +83,10 @@
#define EFCH_PM_ISACONTROL_MMIOEN BIT(1)
#define EFCH_PM_ACPI_MMIO_ADDR 0xfed80000
+#define EFCH_PM_ACPI_MMIO_PM_OFFSET 0x00000300
#define EFCH_PM_ACPI_MMIO_WDT_OFFSET 0x00000b00
+
+#define EFCH_PM_ACPI_MMIO_PM_ADDR (EFCH_PM_ACPI_MMIO_ADDR + \
+ EFCH_PM_ACPI_MMIO_PM_OFFSET)
+#define EFCH_PM_ACPI_MMIO_PM_SIZE 8
+#define AMD_ZEN_SMBUS_PCI_REV 0x51
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index c137ad2bd5c3..0ea554c7cda5 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -125,13 +125,16 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
ret = of_property_read_u32_index(np, "syscon", 1, &reg);
if (ret < 0) {
dev_err(dev, "no offset in syscon\n");
+ of_node_put(syscon_np);
return ret;
}
/* allocate memory for watchdog struct */
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt)
+ if (!wdt) {
+ of_node_put(syscon_np);
return -ENOMEM;
+ }
/* set regmap and offset to know where to write */
wdt->feed_offset = reg;
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 195c8c004b69..4fac8148a8e6 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -462,6 +462,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return ret;
watchdog_set_nowayout(&wdat->wdd, nowayout);
+ watchdog_stop_on_reboot(&wdat->wdd);
return devm_watchdog_register_device(dev, &wdat->wdd);
}
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 34742c6e189e..f17c4c03db30 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
-EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
struct remap_pfn {
struct mm_struct *mm;